Merge branch 'master' into feature/aggs-refactoring

# Conflicts:
#	core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java
#	core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java
#	core/src/main/java/org/elasticsearch/search/SearchModule.java
This commit is contained in:
Colin Goodheart-Smithe 2016-01-25 09:05:51 +00:00
commit cd8320b171
724 changed files with 13658 additions and 10693 deletions

View File

@ -372,6 +372,7 @@ class BuildPlugin implements Plugin<Project> {
systemProperty 'tests.artifact', project.name
systemProperty 'tests.task', path
systemProperty 'tests.security.manager', 'true'
systemProperty 'jna.nosys', 'true'
// default test sysprop values
systemProperty 'tests.ifNoTests', 'fail'
systemProperty 'es.logger.level', 'WARN'

View File

@ -112,9 +112,6 @@ public class PluginBuildPlugin extends BuildPlugin {
include 'config/**'
include 'bin/**'
}
from('src/site') {
include '_site/**'
}
}
project.assemble.dependsOn(bundle)

View File

@ -36,15 +36,9 @@ class PluginPropertiesExtension {
@Input
String description
@Input
boolean jvm = true
@Input
String classname
@Input
boolean site = false
@Input
boolean isolated = true

View File

@ -51,11 +51,11 @@ class PluginPropertiesTask extends Copy {
if (extension.description == null) {
throw new InvalidUserDataException('description is a required setting for esplugin')
}
if (extension.jvm && extension.classname == null) {
throw new InvalidUserDataException('classname is a required setting for esplugin with jvm=true')
if (extension.classname == null) {
throw new InvalidUserDataException('classname is a required setting for esplugin')
}
doFirst {
if (extension.jvm && extension.isolated == false) {
if (extension.isolated == false) {
String warning = "WARNING: Disabling plugin isolation in ${project.path} is deprecated and will be removed in the future"
logger.warn("${'=' * warning.length()}\n${warning}\n${'=' * warning.length()}")
}
@ -74,10 +74,8 @@ class PluginPropertiesTask extends Copy {
'version': extension.version,
'elasticsearchVersion': VersionProperties.elasticsearch,
'javaVersion': project.targetCompatibility as String,
'jvm': extension.jvm as String,
'site': extension.site as String,
'isolated': extension.isolated as String,
'classname': extension.jvm ? extension.classname : 'NA'
'classname': extension.classname
]
}
}

View File

@ -57,12 +57,10 @@ class ClusterConfiguration {
@Input
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
File tmpFile = new File(node.cwd, 'wait.success')
ant.echo(message: "[${LocalDateTime.now()}] Waiting for elasticsearch node ${node.httpUri()}", level: "info")
ant.get(src: "http://${node.httpUri()}",
ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}",
dest: tmpFile.toString(),
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
retries: 10)
ant.echo(message: "[${LocalDateTime.now()}] Finished waiting for elasticsearch node ${node.httpUri()}. Reachable? ${tmpFile.exists()}", level: "info")
return tmpFile.exists()
}

View File

@ -46,9 +46,9 @@ class ClusterFormationTasks {
/**
* Adds dependent tasks to the given task to start and stop a cluster with the given configuration.
*
* Returns an object that will resolve at execution time of the given task to a uri for the cluster.
* Returns a NodeInfo object for the first node in the cluster.
*/
static Object setup(Project project, Task task, ClusterConfiguration config) {
static NodeInfo setup(Project project, Task task, ClusterConfiguration config) {
if (task.getEnabled() == false) {
// no need to add cluster formation tasks if the task won't run!
return
@ -66,7 +66,7 @@ class ClusterFormationTasks {
task.dependsOn(wait)
// delay the resolution of the uri by wrapping in a closure, so it is not used until read for tests
return "${-> nodes[0].transportUri()}"
return nodes[0]
}
/** Adds a dependency on the given distribution */

View File

@ -129,7 +129,7 @@ class NodeInfo {
'JAVA_HOME' : project.javaHome,
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
]
args.add("-Des.tests.portsfile=true")
args.add("-Des.node.portsfile=true")
args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" })
for (Map.Entry<String, String> property : System.properties.entrySet()) {
if (property.getKey().startsWith('es.')) {

View File

@ -20,7 +20,6 @@ package org.elasticsearch.gradle.test
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
import org.elasticsearch.gradle.BuildPlugin
import org.gradle.api.GradleException
import org.gradle.api.Task
import org.gradle.api.internal.tasks.options.Option
import org.gradle.api.plugins.JavaBasePlugin
@ -61,8 +60,12 @@ public class RestIntegTestTask extends RandomizedTestingTask {
// this must run after all projects have been configured, so we know any project
// references can be accessed as a fully configured
project.gradle.projectsEvaluated {
Object clusterUri = ClusterFormationTasks.setup(project, this, clusterConfig)
systemProperty('tests.cluster', clusterUri)
NodeInfo node = ClusterFormationTasks.setup(project, this, clusterConfig)
systemProperty('tests.rest.cluster', "${-> node.httpUri()}")
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
// both as separate sysprops
systemProperty('tests.cluster', "${-> node.transportUri()}")
}
}

View File

@ -41,6 +41,7 @@ public class StandaloneTestPlugin implements Plugin<Project> {
]
RandomizedTestingTask test = project.tasks.create(testOptions)
test.configure(BuildPlugin.commonTestConfig(project))
BuildPlugin.configureCompile(project)
test.classpath = project.sourceSets.test.runtimeClasspath
test.testClassesDir project.sourceSets.test.output.classesDir
test.mustRunAfter(project.precommit)

View File

@ -2,26 +2,13 @@
# This file must exist as 'plugin-descriptor.properties' at
# the root directory of all plugins.
#
# A plugin can be 'site', 'jvm', or both.
#
### example site plugin for "foo":
#
# foo.zip <-- zip file for the plugin, with this structure:
# _site/ <-- the contents that will be served
# plugin-descriptor.properties <-- example contents below:
#
# site=true
# description=My cool plugin
# version=1.0
#
### example jvm plugin for "foo"
### example plugin for "foo"
#
# foo.zip <-- zip file for the plugin, with this structure:
# <arbitrary name1>.jar <-- classes, resources, dependencies
# <arbitrary nameN>.jar <-- any number of jars
# plugin-descriptor.properties <-- example contents below:
#
# jvm=true
# classname=foo.bar.BazPlugin
# description=My cool plugin
# version=2.0
@ -38,21 +25,6 @@ version=${version}
#
# 'name': the plugin name
name=${name}
### mandatory elements for site plugins:
#
# 'site': set to true to indicate contents of the _site/
# directory in the root of the plugin should be served.
site=${site}
#
### mandatory elements for jvm plugins :
#
# 'jvm': true if the 'classname' class should be loaded
# from jar files in the root directory of the plugin.
# Note that only jar files in the root directory are
# added to the classpath for the plugin! If you need
# other resources, package them into a resources jar.
jvm=${jvm}
#
# 'classname': the name of the class to load, fully-qualified.
classname=${classname}

View File

@ -1,5 +1,5 @@
elasticsearch = 3.0.0-SNAPSHOT
lucene = 5.5.0-snapshot-1721183
lucene = 5.5.0-snapshot-1725675
# optional dependencies
spatial4j = 0.5

View File

@ -200,7 +200,7 @@ public class ActionModule extends AbstractModule {
private final Map<String, ActionEntry> actions = new HashMap<>();
private final List<Class<? extends ActionFilter>> actionFilters = new ArrayList<>();
static class ActionEntry<Request extends ActionRequest, Response extends ActionResponse> {
static class ActionEntry<Request extends ActionRequest<Request>, Response extends ActionResponse> {
public final GenericAction<Request, Response> action;
public final Class<? extends TransportAction<Request, Response>> transportAction;
public final Class[] supportTransportActions;
@ -229,7 +229,7 @@ public class ActionModule extends AbstractModule {
* @param <Request> The request type.
* @param <Response> The response type.
*/
public <Request extends ActionRequest, Response extends ActionResponse> void registerAction(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse> void registerAction(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
actions.put(action.name(), new ActionEntry<>(action, transportAction, supportTransportActions));
}

View File

@ -22,14 +22,9 @@ package org.elasticsearch.action.admin.cluster.settings;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import static org.elasticsearch.cluster.ClusterState.builder;
/**
@ -57,11 +52,11 @@ final class SettingsUpdater {
boolean changed = false;
Settings.Builder transientSettings = Settings.settingsBuilder();
transientSettings.put(currentState.metaData().transientSettings());
changed |= apply(transientToApply, transientSettings, transientUpdates, "transient");
changed |= clusterSettings.updateDynamicSettings(transientToApply, transientSettings, transientUpdates, "transient");
Settings.Builder persistentSettings = Settings.settingsBuilder();
persistentSettings.put(currentState.metaData().persistentSettings());
changed |= apply(persistentToApply, persistentSettings, persistentUpdates, "persistent");
changed |= clusterSettings.updateDynamicSettings(persistentToApply, persistentSettings, persistentUpdates, "persistent");
if (!changed) {
return currentState;
@ -86,42 +81,5 @@ final class SettingsUpdater {
return build;
}
private boolean apply(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) {
boolean changed = false;
final Set<String> toRemove = new HashSet<>();
Settings.Builder settingsBuilder = Settings.settingsBuilder();
for (Map.Entry<String, String> entry : toApply.getAsMap().entrySet()) {
if (entry.getValue() == null) {
toRemove.add(entry.getKey());
} else if (clusterSettings.isLoggerSetting(entry.getKey()) || clusterSettings.hasDynamicSetting(entry.getKey())) {
settingsBuilder.put(entry.getKey(), entry.getValue());
updates.put(entry.getKey(), entry.getValue());
changed = true;
} else {
throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable");
}
}
changed |= applyDeletes(toRemove, target);
target.put(settingsBuilder.build());
return changed;
}
private final boolean applyDeletes(Set<String> deletes, Settings.Builder builder) {
boolean changed = false;
for (String entry : deletes) {
Set<String> keysToRemove = new HashSet<>();
Set<String> keySet = builder.internalMap().keySet();
for (String key : keySet) {
if (Regex.simpleMatch(entry, key)) {
keysToRemove.add(key);
}
}
for (String key : keysToRemove) {
builder.remove(key);
changed = true;
}
}
return changed;
}
}

View File

@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.indices.refresh;
import org.elasticsearch.action.ReplicationResponse;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.replication.BasicReplicationRequest;
import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -37,7 +37,7 @@ import java.util.List;
/**
* Refresh action.
*/
public class TransportRefreshAction extends TransportBroadcastReplicationAction<RefreshRequest, RefreshResponse, ReplicationRequest, ReplicationResponse> {
public class TransportRefreshAction extends TransportBroadcastReplicationAction<RefreshRequest, RefreshResponse, BasicReplicationRequest, ReplicationResponse> {
@Inject
public TransportRefreshAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
@ -53,8 +53,8 @@ public class TransportRefreshAction extends TransportBroadcastReplicationAction<
}
@Override
protected ReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) {
return new ReplicationRequest(request, shardId);
protected BasicReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) {
return new BasicReplicationRequest(request, shardId);
}
@Override

View File

@ -21,7 +21,7 @@ package org.elasticsearch.action.admin.indices.refresh;
import org.elasticsearch.action.ReplicationResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.replication.BasicReplicationRequest;
import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
@ -41,7 +41,7 @@ import org.elasticsearch.transport.TransportService;
/**
*
*/
public class TransportShardRefreshAction extends TransportReplicationAction<ReplicationRequest, ReplicationRequest, ReplicationResponse> {
public class TransportShardRefreshAction extends TransportReplicationAction<BasicReplicationRequest, BasicReplicationRequest, ReplicationResponse> {
public static final String NAME = RefreshAction.NAME + "[s]";
@ -51,7 +51,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
actionFilters, indexNameExpressionResolver, ReplicationRequest::new, ReplicationRequest::new, ThreadPool.Names.REFRESH);
actionFilters, indexNameExpressionResolver, BasicReplicationRequest::new, BasicReplicationRequest::new, ThreadPool.Names.REFRESH);
}
@Override
@ -60,7 +60,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
}
@Override
protected Tuple<ReplicationResponse, ReplicationRequest> shardOperationOnPrimary(MetaData metaData, ReplicationRequest shardRequest) throws Throwable {
protected Tuple<ReplicationResponse, BasicReplicationRequest> shardOperationOnPrimary(MetaData metaData, BasicReplicationRequest shardRequest) throws Throwable {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
indexShard.refresh("api");
logger.trace("{} refresh request executed on primary", indexShard.shardId());
@ -68,7 +68,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
}
@Override
protected void shardOperationOnReplica(ReplicationRequest request) {
protected void shardOperationOnReplica(BasicReplicationRequest request) {
final ShardId shardId = request.shardId();
IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
indexShard.refresh("api");

View File

@ -62,7 +62,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
if (globalBlock != null) {
return globalBlock;
}
if (request.settings().getAsMap().size() == 1 && (request.settings().get(IndexMetaData.SETTING_BLOCKS_METADATA) != null || request.settings().get(IndexMetaData.SETTING_READ_ONLY) != null )) {
if (request.settings().getAsMap().size() == 1 && IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.exists(request.settings()) || IndexMetaData.INDEX_READ_ONLY_SETTING.exists(request.settings())) {
return null;
}
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));

View File

@ -25,9 +25,11 @@ import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -38,13 +40,15 @@ import org.elasticsearch.transport.TransportService;
public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<PutIndexTemplateRequest, PutIndexTemplateResponse> {
private final MetaDataIndexTemplateService indexTemplateService;
private final IndexScopedSettings indexScopedSettings;
@Inject
public TransportPutIndexTemplateAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, MetaDataIndexTemplateService indexTemplateService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndexScopedSettings indexScopedSettings) {
super(settings, PutIndexTemplateAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutIndexTemplateRequest::new);
this.indexTemplateService = indexTemplateService;
this.indexScopedSettings = indexScopedSettings;
}
@Override
@ -69,11 +73,13 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
if (cause.length() == 0) {
cause = "api";
}
final Settings.Builder templateSettingsBuilder = Settings.settingsBuilder();
templateSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
indexScopedSettings.validate(templateSettingsBuilder);
indexTemplateService.putTemplate(new MetaDataIndexTemplateService.PutRequest(cause, request.name())
.template(request.template())
.order(request.order())
.settings(request.settings())
.settings(templateSettingsBuilder.build())
.mappings(request.mappings())
.aliases(request.aliases())
.customs(request.customs())

View File

@ -179,7 +179,7 @@ public class BulkProcessor implements Closeable {
private final ScheduledThreadPoolExecutor scheduler;
private final ScheduledFuture scheduledFuture;
private final ScheduledFuture<?> scheduledFuture;
private final AtomicLong executionIdGen = new AtomicLong();
@ -250,24 +250,24 @@ public class BulkProcessor implements Closeable {
* (for example, if no id is provided, one will be generated, or usage of the create flag).
*/
public BulkProcessor add(IndexRequest request) {
return add((ActionRequest) request);
return add((ActionRequest<?>) request);
}
/**
* Adds an {@link DeleteRequest} to the list of actions to execute.
*/
public BulkProcessor add(DeleteRequest request) {
return add((ActionRequest) request);
return add((ActionRequest<?>) request);
}
/**
* Adds either a delete or an index request.
*/
public BulkProcessor add(ActionRequest request) {
public BulkProcessor add(ActionRequest<?> request) {
return add(request, null);
}
public BulkProcessor add(ActionRequest request, @Nullable Object payload) {
public BulkProcessor add(ActionRequest<?> request, @Nullable Object payload) {
internalAdd(request, payload);
return this;
}
@ -282,7 +282,7 @@ public class BulkProcessor implements Closeable {
}
}
private synchronized void internalAdd(ActionRequest request, @Nullable Object payload) {
private synchronized void internalAdd(ActionRequest<?> request, @Nullable Object payload) {
ensureOpen();
bulkRequest.add(request, payload);
executeIfNeeded();

View File

@ -56,7 +56,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
private static final int REQUEST_OVERHEAD = 50;
final List<ActionRequest> requests = new ArrayList<>();
final List<ActionRequest<?>> requests = new ArrayList<>();
List<Object> payloads = null;
protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT;
@ -72,21 +72,21 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
* Creates a bulk request caused by some other request, which is provided as an
* argument so that its headers and context can be copied to the new request
*/
public BulkRequest(ActionRequest request) {
public BulkRequest(ActionRequest<?> request) {
super(request);
}
/**
* Adds a list of requests to be executed. Either index or delete requests.
*/
public BulkRequest add(ActionRequest... requests) {
for (ActionRequest request : requests) {
public BulkRequest add(ActionRequest<?>... requests) {
for (ActionRequest<?> request : requests) {
add(request, null);
}
return this;
}
public BulkRequest add(ActionRequest request) {
public BulkRequest add(ActionRequest<?> request) {
return add(request, null);
}
@ -96,7 +96,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
* @param payload Optional payload
* @return the current bulk request
*/
public BulkRequest add(ActionRequest request, @Nullable Object payload) {
public BulkRequest add(ActionRequest<?> request, @Nullable Object payload) {
if (request instanceof IndexRequest) {
add((IndexRequest) request, payload);
} else if (request instanceof DeleteRequest) {
@ -112,8 +112,8 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
/**
* Adds a list of requests to be executed. Either index or delete requests.
*/
public BulkRequest add(Iterable<ActionRequest> requests) {
for (ActionRequest request : requests) {
public BulkRequest add(Iterable<ActionRequest<?>> requests) {
for (ActionRequest<?> request : requests) {
add(request);
}
return this;
@ -196,15 +196,14 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
/**
* The list of requests in this bulk request.
*/
public List<ActionRequest> requests() {
public List<ActionRequest<?>> requests() {
return this.requests;
}
@Override
@SuppressWarnings("unchecked")
public List<? extends IndicesRequest> subRequests() {
List<IndicesRequest> indicesRequests = new ArrayList<>();
for (ActionRequest request : requests) {
for (ActionRequest<?> request : requests) {
assert request instanceof IndicesRequest;
indicesRequests.add((IndicesRequest) request);
}
@ -486,7 +485,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
if (requests.isEmpty()) {
validationException = addValidationError("no requests added", validationException);
}
for (ActionRequest request : requests) {
for (ActionRequest<?> request : requests) {
// We first check if refresh has been set
if ((request instanceof DeleteRequest && ((DeleteRequest)request).refresh()) ||
(request instanceof UpdateRequest && ((UpdateRequest)request).refresh()) ||
@ -535,7 +534,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
super.writeTo(out);
out.writeByte(consistencyLevel.id());
out.writeVInt(requests.size());
for (ActionRequest request : requests) {
for (ActionRequest<?> request : requests) {
if (request instanceof IndexRequest) {
out.writeByte((byte) 0);
} else if (request instanceof DeleteRequest) {

View File

@ -52,10 +52,6 @@ public class PercolateShardRequest extends BroadcastShardRequest {
this.startTime = request.startTime;
}
public PercolateShardRequest(ShardId shardId, OriginalIndices originalIndices) {
super(shardId, originalIndices);
}
PercolateShardRequest(ShardId shardId, PercolateRequest request) {
super(shardId, request);
this.documentType = request.documentType();

View File

@ -23,7 +23,6 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.single.shard.SingleShardRequest;
@ -109,7 +108,7 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
}
public static class Request extends SingleShardRequest implements IndicesRequest {
public static class Request extends SingleShardRequest<Request> implements IndicesRequest {
private int shardId;
private String preference;
@ -160,12 +159,8 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
items = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
int slot = in.readVInt();
OriginalIndices originalIndices = OriginalIndices.readOriginalIndices(in);
PercolateShardRequest shardRequest = new PercolateShardRequest(new ShardId(index, shardId), originalIndices);
shardRequest.documentType(in.readString());
shardRequest.source(in.readBytesReference());
shardRequest.docSource(in.readBytesReference());
shardRequest.onlyCount(in.readBoolean());
PercolateShardRequest shardRequest = new PercolateShardRequest();
shardRequest.readFrom(in);
Item item = new Item(slot, shardRequest);
items.add(item);
}
@ -179,11 +174,7 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
out.writeVInt(items.size());
for (Item item : items) {
out.writeVInt(item.slot);
OriginalIndices.writeOriginalIndices(item.request.originalIndices(), out);
out.writeString(item.request.documentType());
out.writeBytesReference(item.request.source());
out.writeBytesReference(item.request.docSource());
out.writeBoolean(item.request.onlyCount());
item.request.writeTo(out);
}
}
@ -245,7 +236,7 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
shardResponse.readFrom(in);
items.add(new Item(slot, shardResponse));
} else {
items.add(new Item(slot, (Throwable)in.readThrowable()));
items.add(new Item(slot, in.readThrowable()));
}
}
}

View File

@ -40,13 +40,15 @@ public interface ActionFilter {
* Enables filtering the execution of an action on the request side, either by sending a response through the
* {@link ActionListener} or by continuing the execution through the given {@link ActionFilterChain chain}
*/
void apply(Task task, String action, ActionRequest request, ActionListener listener, ActionFilterChain chain);
<Request extends ActionRequest<Request>, Response extends ActionResponse> void apply(Task task, String action, Request request,
ActionListener<Response> listener, ActionFilterChain<Request, Response> chain);
/**
* Enables filtering the execution of an action on the response side, either by sending a response through the
* {@link ActionListener} or by continuing the execution through the given {@link ActionFilterChain chain}
*/
void apply(String action, ActionResponse response, ActionListener listener, ActionFilterChain chain);
<Response extends ActionResponse> void apply(String action, Response response, ActionListener<Response> listener,
ActionFilterChain<?, Response> chain);
/**
* A simple base class for injectable action filters that spares the implementation from handling the
@ -60,7 +62,8 @@ public interface ActionFilter {
}
@Override
public final void apply(Task task, String action, ActionRequest request, ActionListener listener, ActionFilterChain chain) {
public final <Request extends ActionRequest<Request>, Response extends ActionResponse> void apply(Task task, String action, Request request,
ActionListener<Response> listener, ActionFilterChain<Request, Response> chain) {
if (apply(action, request, listener)) {
chain.proceed(task, action, request, listener);
}
@ -70,10 +73,11 @@ public interface ActionFilter {
* Applies this filter and returns {@code true} if the execution chain should proceed, or {@code false}
* if it should be aborted since the filter already handled the request and called the given listener.
*/
protected abstract boolean apply(String action, ActionRequest request, ActionListener listener);
protected abstract boolean apply(String action, ActionRequest<?> request, ActionListener<?> listener);
@Override
public final void apply(String action, ActionResponse response, ActionListener listener, ActionFilterChain chain) {
public final <Response extends ActionResponse> void apply(String action, Response response, ActionListener<Response> listener,
ActionFilterChain<?, Response> chain) {
if (apply(action, response, listener)) {
chain.proceed(action, response, listener);
}
@ -83,6 +87,6 @@ public interface ActionFilter {
* Applies this filter and returns {@code true} if the execution chain should proceed, or {@code false}
* if it should be aborted since the filter already handled the response by calling the given listener.
*/
protected abstract boolean apply(String action, ActionResponse response, ActionListener listener);
protected abstract boolean apply(String action, ActionResponse response, ActionListener<?> listener);
}
}

View File

@ -27,17 +27,17 @@ import org.elasticsearch.tasks.Task;
/**
* A filter chain allowing to continue and process the transport action request
*/
public interface ActionFilterChain {
public interface ActionFilterChain<Request extends ActionRequest<Request>, Response extends ActionResponse> {
/**
* Continue processing the request. Should only be called if a response has not been sent through
* the given {@link ActionListener listener}
*/
void proceed(Task task, final String action, final ActionRequest request, final ActionListener listener);
void proceed(Task task, final String action, final Request request, final ActionListener<Response> listener);
/**
* Continue processing the response. Should only be called if a response has not been sent through
* the given {@link ActionListener listener}
*/
void proceed(final String action, final ActionResponse response, final ActionListener listener);
void proceed(final String action, final Response response, final ActionListener<Response> listener);
}

View File

@ -44,7 +44,7 @@ public final class AutoCreateIndex {
@Inject
public AutoCreateIndex(Settings settings, IndexNameExpressionResolver resolver) {
this.resolver = resolver;
dynamicMappingDisabled = !settings.getAsBoolean(MapperService.INDEX_MAPPER_DYNAMIC_SETTING, MapperService.INDEX_MAPPER_DYNAMIC_DEFAULT);
dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings);
String value = settings.get("action.auto_create_index");
if (value == null || Booleans.isExplicitTrue(value)) {
needToCheck = true;

View File

@ -34,8 +34,8 @@ import java.util.function.Supplier;
/**
* A TransportAction that self registers a handler into the transport service
*/
public abstract class HandledTransportAction<Request extends ActionRequest, Response extends ActionResponse> extends TransportAction<Request,Response>{
public abstract class HandledTransportAction<Request extends ActionRequest<Request>, Response extends ActionResponse>
extends TransportAction<Request, Response> {
protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request) {
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager());
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new TransportHandler());

View File

@ -40,7 +40,7 @@ import static org.elasticsearch.action.support.PlainActionFuture.newFuture;
/**
*
*/
public abstract class TransportAction<Request extends ActionRequest, Response extends ActionResponse> extends AbstractComponent {
public abstract class TransportAction<Request extends ActionRequest<Request>, Response extends ActionResponse> extends AbstractComponent {
protected final ThreadPool threadPool;
protected final String actionName;
@ -66,7 +66,7 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
return future;
}
public final void execute(Request request, ActionListener<Response> listener) {
public final Task execute(Request request, ActionListener<Response> listener) {
Task task = taskManager.register("transport", actionName, request);
if (task == null) {
execute(null, request, listener);
@ -85,6 +85,7 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
}
});
}
return task;
}
private final void execute(Task task, Request request, ActionListener<Response> listener) {
@ -103,7 +104,7 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
listener.onFailure(t);
}
} else {
RequestFilterChain requestFilterChain = new RequestFilterChain<>(this, logger);
RequestFilterChain<Request, Response> requestFilterChain = new RequestFilterChain<>(this, logger);
requestFilterChain.proceed(task, actionName, request, listener);
}
}
@ -114,7 +115,8 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
protected abstract void doExecute(Request request, ActionListener<Response> listener);
private static class RequestFilterChain<Request extends ActionRequest, Response extends ActionResponse> implements ActionFilterChain {
private static class RequestFilterChain<Request extends ActionRequest<Request>, Response extends ActionResponse>
implements ActionFilterChain<Request, Response> {
private final TransportAction<Request, Response> action;
private final AtomicInteger index = new AtomicInteger();
@ -125,14 +127,15 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
this.logger = logger;
}
@Override @SuppressWarnings("unchecked")
public void proceed(Task task, String actionName, ActionRequest request, ActionListener listener) {
@Override
public void proceed(Task task, String actionName, Request request, ActionListener<Response> listener) {
int i = index.getAndIncrement();
try {
if (i < this.action.filters.length) {
this.action.filters[i].apply(task, actionName, request, listener, this);
} else if (i == this.action.filters.length) {
this.action.doExecute(task, (Request) request, new FilteredActionListener<Response>(actionName, listener, new ResponseFilterChain(this.action.filters, logger)));
this.action.doExecute(task, request, new FilteredActionListener<Response>(actionName, listener,
new ResponseFilterChain<>(this.action.filters, logger)));
} else {
listener.onFailure(new IllegalStateException("proceed was called too many times"));
}
@ -143,12 +146,13 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
}
@Override
public void proceed(String action, ActionResponse response, ActionListener listener) {
public void proceed(String action, Response response, ActionListener<Response> listener) {
assert false : "request filter chain should never be called on the response side";
}
}
private static class ResponseFilterChain implements ActionFilterChain {
private static class ResponseFilterChain<Request extends ActionRequest<Request>, Response extends ActionResponse>
implements ActionFilterChain<Request, Response> {
private final ActionFilter[] filters;
private final AtomicInteger index;
@ -161,12 +165,12 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
}
@Override
public void proceed(Task task, String action, ActionRequest request, ActionListener listener) {
public void proceed(Task task, String action, Request request, ActionListener<Response> listener) {
assert false : "response filter chain should never be called on the request side";
}
@Override @SuppressWarnings("unchecked")
public void proceed(String action, ActionResponse response, ActionListener listener) {
@Override
public void proceed(String action, Response response, ActionListener<Response> listener) {
int i = index.decrementAndGet();
try {
if (i >= 0) {
@ -186,10 +190,10 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
private static class FilteredActionListener<Response extends ActionResponse> implements ActionListener<Response> {
private final String actionName;
private final ActionListener listener;
private final ResponseFilterChain chain;
private final ActionListener<Response> listener;
private final ResponseFilterChain<?, Response> chain;
private FilteredActionListener(String actionName, ActionListener listener, ResponseFilterChain chain) {
private FilteredActionListener(String actionName, ActionListener<Response> listener, ResponseFilterChain<?, Response> chain) {
this.actionName = actionName;
this.listener = listener;
this.chain = chain;

View File

@ -49,7 +49,7 @@ import java.util.function.Supplier;
/**
*
*/
public abstract class TransportBroadcastAction<Request extends BroadcastRequest, Response extends BroadcastResponse, ShardRequest extends BroadcastShardRequest, ShardResponse extends BroadcastShardResponse>
public abstract class TransportBroadcastAction<Request extends BroadcastRequest<Request>, Response extends BroadcastResponse, ShardRequest extends BroadcastShardRequest, ShardResponse extends BroadcastShardResponse>
extends HandledTransportAction<Request, Response> {
protected final ClusterService clusterService;

View File

@ -74,7 +74,7 @@ import java.util.function.Supplier;
* @param <Response> the response to the client request
* @param <ShardOperationResult> per-shard operation results
*/
public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRequest,
public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRequest<Request>,
Response extends BroadcastResponse,
ShardOperationResult extends Streamable> extends HandledTransportAction<Request, Response> {
@ -447,10 +447,12 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
return nodeId;
}
@Override
public String[] indices() {
return indicesLevelRequest.indices();
}
@Override
public IndicesOptions indicesOptions() {
return indicesLevelRequest.indicesOptions();
}

View File

@ -50,7 +50,7 @@ import java.util.function.Supplier;
/**
* A base class for operations that needs to be performed on the master node.
*/
public abstract class TransportMasterNodeAction<Request extends MasterNodeRequest, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
public abstract class TransportMasterNodeAction<Request extends MasterNodeRequest<Request>, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
protected final TransportService transportService;
protected final ClusterService clusterService;

View File

@ -33,7 +33,8 @@ import java.util.function.Supplier;
* A base class for read operations that needs to be performed on the master node.
* Can also be executed on the local node if needed.
*/
public abstract class TransportMasterNodeReadAction<Request extends MasterNodeReadRequest, Response extends ActionResponse> extends TransportMasterNodeAction<Request, Response> {
public abstract class TransportMasterNodeReadAction<Request extends MasterNodeReadRequest<Request>, Response extends ActionResponse>
extends TransportMasterNodeAction<Request, Response> {
public static final String FORCE_LOCAL_SETTING = "action.master.force_local";

View File

@ -33,7 +33,8 @@ import java.util.function.Supplier;
/**
*/
public abstract class TransportClusterInfoAction<Request extends ClusterInfoRequest, Response extends ActionResponse> extends TransportMasterNodeReadAction<Request, Response> {
public abstract class TransportClusterInfoAction<Request extends ClusterInfoRequest<Request>, Response extends ActionResponse>
extends TransportMasterNodeReadAction<Request, Response> {
public TransportClusterInfoAction(Settings settings, String actionName, TransportService transportService,
ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters,

View File

@ -50,7 +50,7 @@ import java.util.function.Supplier;
/**
*
*/
public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest, NodesResponse extends BaseNodesResponse, NodeRequest extends BaseNodeRequest, NodeResponse extends BaseNodeResponse> extends HandledTransportAction<NodesRequest, NodesResponse> {
public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest<NodesRequest>, NodesResponse extends BaseNodesResponse, NodeRequest extends BaseNodeRequest, NodeResponse extends BaseNodeResponse> extends HandledTransportAction<NodesRequest, NodesResponse> {
protected final ClusterName clusterName;
protected final ClusterService clusterService;

View File

@ -0,0 +1,59 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.support.replication;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.index.shard.ShardId;
/**
* A replication request that has no more information than ReplicationRequest.
* Unfortunately ReplicationRequest can't be declared as a type parameter
* because it has a self referential type parameter of its own. So use this
* instead.
*/
public class BasicReplicationRequest extends ReplicationRequest<BasicReplicationRequest> {
public BasicReplicationRequest() {
}
/**
* Creates a new request that inherits headers and context from the request
* provided as argument.
*/
public BasicReplicationRequest(ActionRequest<?> request) {
super(request);
}
/**
* Creates a new request with resolved shard id
*/
public BasicReplicationRequest(ActionRequest<?> request, ShardId shardId) {
super(request, shardId);
}
/**
* Copy constructor that creates a new request that is a copy of the one
* provided as an argument.
*/
protected BasicReplicationRequest(BasicReplicationRequest request) {
super(request);
}
}

View File

@ -38,7 +38,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
/**
*
*/
public class ReplicationRequest<Request extends ReplicationRequest<Request>> extends ActionRequest<Request> implements IndicesRequest {
public abstract class ReplicationRequest<Request extends ReplicationRequest<Request>> extends ActionRequest<Request> implements IndicesRequest {
public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.support.replication;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ReplicationResponse;
@ -52,7 +53,8 @@ import java.util.function.Supplier;
* Base class for requests that should be executed on all shards of an index or several indices.
* This action sends shard requests to all primary shards of the indices and they are then replicated like write requests
*/
public abstract class TransportBroadcastReplicationAction<Request extends BroadcastRequest, Response extends BroadcastResponse, ShardRequest extends ReplicationRequest, ShardResponse extends ReplicationResponse> extends HandledTransportAction<Request, Response> {
public abstract class TransportBroadcastReplicationAction<Request extends BroadcastRequest<Request>, Response extends BroadcastResponse, ShardRequest extends ReplicationRequest<ShardRequest>, ShardResponse extends ReplicationResponse>
extends HandledTransportAction<Request, Response> {
private final TransportReplicationAction replicatedBroadcastShardAction;
private final ClusterService clusterService;

View File

@ -64,7 +64,6 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.BaseTransportResponseHandler;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.EmptyTransportResponseHandler;
import org.elasticsearch.transport.ReceiveTimeoutTransportException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportChannelResponseHandler;
import org.elasticsearch.transport.TransportException;
@ -76,6 +75,7 @@ import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicBoolean;
@ -90,9 +90,7 @@ import java.util.function.Supplier;
* primary node to validate request before primary operation followed by sampling state again for resolving
* nodes with replica copies to perform replication.
*/
public abstract class TransportReplicationAction<Request extends ReplicationRequest, ReplicaRequest extends ReplicationRequest, Response extends ReplicationResponse> extends TransportAction<Request, Response> {
public static final String SHARD_FAILURE_TIMEOUT = "action.support.replication.shard.failure_timeout";
public abstract class TransportReplicationAction<Request extends ReplicationRequest<Request>, ReplicaRequest extends ReplicationRequest<ReplicaRequest>, Response extends ReplicationResponse> extends TransportAction<Request, Response> {
protected final TransportService transportService;
protected final ClusterService clusterService;
@ -101,7 +99,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
protected final WriteConsistencyLevel defaultWriteConsistencyLevel;
protected final TransportRequestOptions transportOptions;
protected final MappingUpdatedAction mappingUpdatedAction;
private final TimeValue shardFailedTimeout;
final String transportReplicaAction;
final String transportPrimaryAction;
@ -133,8 +130,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
this.transportOptions = transportOptions();
this.defaultWriteConsistencyLevel = WriteConsistencyLevel.fromString(settings.get("action.write_consistency", "quorum"));
// TODO: set a default timeout
shardFailedTimeout = settings.getAsTime(SHARD_FAILURE_TIMEOUT, null);
}
@Override
@ -608,7 +603,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version());
}
replicationPhase = new ReplicationPhase(primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference, shardFailedTimeout);
replicationPhase = new ReplicationPhase(primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
} catch (Throwable e) {
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
if (logger.isTraceEnabled()) {
@ -732,15 +727,13 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
private final AtomicInteger pending;
private final int totalShards;
private final Releasable indexShardReference;
private final TimeValue shardFailedTimeout;
public ReplicationPhase(ReplicaRequest replicaRequest, Response finalResponse, ShardId shardId,
TransportChannel channel, Releasable indexShardReference, TimeValue shardFailedTimeout) {
TransportChannel channel, Releasable indexShardReference) {
this.replicaRequest = replicaRequest;
this.channel = channel;
this.finalResponse = finalResponse;
this.indexShardReference = indexShardReference;
this.shardFailedTimeout = shardFailedTimeout;
this.shardId = shardId;
// we have to get a new state after successfully indexing into the primary in order to honour recovery semantics.
@ -882,15 +875,32 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
if (ignoreReplicaException(exp)) {
onReplicaFailure(nodeId, exp);
} else {
logger.warn("{} failed to perform {} on node {}", exp, shardId, transportReplicaAction, node);
shardStateAction.shardFailed(clusterService.state(), shard, indexUUID, "failed to perform " + transportReplicaAction + " on replica on node " + node, exp, shardFailedTimeout, new ReplicationFailedShardStateListener(nodeId, exp));
String message = String.format(Locale.ROOT, "failed to perform %s on replica on node %s", transportReplicaAction, node);
logger.warn("{} {}", exp, shardId, message);
shardStateAction.shardFailed(
shard,
indexUUID,
message,
exp,
new ShardStateAction.Listener() {
@Override
public void onSuccess() {
onReplicaFailure(nodeId, exp);
}
@Override
public void onFailure(Throwable t) {
// TODO: handle catastrophic non-channel failures
onReplicaFailure(nodeId, exp);
}
}
);
}
}
}
);
}
void onReplicaFailure(String nodeId, @Nullable Throwable e) {
// Only version conflict should be ignored from being put into the _shards header?
if (e != null && ignoreReplicaException(e) == false) {
@ -955,34 +965,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
}
}
public class ReplicationFailedShardStateListener implements ShardStateAction.Listener {
private final String nodeId;
private Throwable failure;
public ReplicationFailedShardStateListener(String nodeId, Throwable failure) {
this.nodeId = nodeId;
this.failure = failure;
}
@Override
public void onSuccess() {
onReplicaFailure(nodeId, failure);
}
@Override
public void onShardFailedNoMaster() {
onReplicaFailure(nodeId, failure);
}
@Override
public void onShardFailedFailure(DiscoveryNode master, TransportException e) {
if (e instanceof ReceiveTimeoutTransportException) {
logger.trace("timeout sending shard failure to master [{}]", e, master);
}
onReplicaFailure(nodeId, failure);
}
}
}
/**

View File

@ -54,8 +54,8 @@ import java.util.function.Supplier;
/**
*
*/
public abstract class TransportInstanceSingleOperationAction<Request extends InstanceShardOperationRequest, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
public abstract class TransportInstanceSingleOperationAction<Request extends InstanceShardOperationRequest<Request>, Response extends ActionResponse>
extends HandledTransportAction<Request, Response> {
protected final ClusterService clusterService;
protected final TransportService transportService;

View File

@ -54,7 +54,7 @@ import static org.elasticsearch.action.support.TransportActions.isShardNotAvaila
* the read operation can be performed on other shard copies. Concrete implementations can provide their own list
* of candidate shards to try the read operation on.
*/
public abstract class TransportSingleShardAction<Request extends SingleShardRequest, Response extends ActionResponse> extends TransportAction<Request, Response> {
public abstract class TransportSingleShardAction<Request extends SingleShardRequest<Request>, Response extends ActionResponse> extends TransportAction<Request, Response> {
protected final ClusterService clusterService;

View File

@ -241,26 +241,26 @@ final class Security {
*/
static void addFilePermissions(Permissions policy, Environment environment) {
// read-only dirs
addPath(policy, "path.home", environment.binFile(), "read,readlink");
addPath(policy, "path.home", environment.libFile(), "read,readlink");
addPath(policy, "path.home", environment.modulesFile(), "read,readlink");
addPath(policy, "path.plugins", environment.pluginsFile(), "read,readlink");
addPath(policy, "path.conf", environment.configFile(), "read,readlink");
addPath(policy, "path.scripts", environment.scriptsFile(), "read,readlink");
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.binFile(), "read,readlink");
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libFile(), "read,readlink");
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), "read,readlink");
addPath(policy, Environment.PATH_PLUGINS_SETTING.getKey(), environment.pluginsFile(), "read,readlink");
addPath(policy, Environment.PATH_CONF_SETTING.getKey(), environment.configFile(), "read,readlink");
addPath(policy, Environment.PATH_SCRIPTS_SETTING.getKey(), environment.scriptsFile(), "read,readlink");
// read-write dirs
addPath(policy, "java.io.tmpdir", environment.tmpFile(), "read,readlink,write,delete");
addPath(policy, "path.logs", environment.logsFile(), "read,readlink,write,delete");
addPath(policy, Environment.PATH_LOGS_SETTING.getKey(), environment.logsFile(), "read,readlink,write,delete");
if (environment.sharedDataFile() != null) {
addPath(policy, "path.shared_data", environment.sharedDataFile(), "read,readlink,write,delete");
addPath(policy, Environment.PATH_SHARED_DATA_SETTING.getKey(), environment.sharedDataFile(), "read,readlink,write,delete");
}
for (Path path : environment.dataFiles()) {
addPath(policy, "path.data", path, "read,readlink,write,delete");
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
}
for (Path path : environment.dataWithClusterFiles()) {
addPath(policy, "path.data", path, "read,readlink,write,delete");
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
}
for (Path path : environment.repoFiles()) {
addPath(policy, "path.repo", path, "read,readlink,write,delete");
addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete");
}
if (environment.pidFile() != null) {
// we just need permission to remove the file if its elsewhere.

View File

@ -40,7 +40,8 @@ public interface ElasticsearchClient {
* @param <RequestBuilder> The request builder type.
* @return A future allowing to get back the response.
*/
<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(final Action<Request, Response, RequestBuilder> action, final Request request);
<Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
final Action<Request, Response, RequestBuilder> action, final Request request);
/**
* Executes a generic action, denoted by an {@link Action}.
@ -52,7 +53,8 @@ public interface ElasticsearchClient {
* @param <Response> The response type.
* @param <RequestBuilder> The request builder type.
*/
<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
<Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
/**
* Prepares a request builder to execute, specified by {@link Action}.
@ -63,7 +65,8 @@ public interface ElasticsearchClient {
* @param <RequestBuilder> The request builder.
* @return The request builder, that can, at a later stage, execute the request.
*/
<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(final Action<Request, Response, RequestBuilder> action);
<Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
final Action<Request, Response, RequestBuilder> action);
/**
* Returns the threadpool used to execute requests on this client

View File

@ -52,7 +52,8 @@ public abstract class FilterClient extends AbstractClient {
}
@Override
protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
protected <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
in().execute(action, request, listener);
}

View File

@ -56,7 +56,8 @@ public class NodeClient extends AbstractClient {
@SuppressWarnings("unchecked")
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
TransportAction<Request, Response> transportAction = actions.get(action);
if (transportAction == null) {
throw new IllegalStateException("failed to find action [" + action + "] to execute");

View File

@ -363,12 +363,14 @@ public abstract class AbstractClient extends AbstractComponent implements Client
}
@Override
public final <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(final Action<Request, Response, RequestBuilder> action) {
public final <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
final Action<Request, Response, RequestBuilder> action) {
return action.newRequestBuilder(this);
}
@Override
public final <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(Action<Request, Response, RequestBuilder> action, Request request) {
public final <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
Action<Request, Response, RequestBuilder> action, Request request) {
PlainActionFuture<Response> actionFuture = PlainActionFuture.newFuture();
execute(action, request, actionFuture);
return actionFuture;
@ -378,13 +380,14 @@ public abstract class AbstractClient extends AbstractComponent implements Client
* This is the single execution point of *all* clients.
*/
@Override
public final <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
public final <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
headers.applyTo(request);
listener = threadedWrapper.wrap(listener);
doExecute(action, request, listener);
}
protected abstract <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
protected abstract <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
@Override
public ActionFuture<IndexResponse> index(final IndexRequest request) {
@ -821,17 +824,20 @@ public abstract class AbstractClient extends AbstractComponent implements Client
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(Action<Request, Response, RequestBuilder> action, Request request) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
Action<Request, Response, RequestBuilder> action, Request request) {
return client.execute(action, request);
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
client.execute(action, request, listener);
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(Action<Request, Response, RequestBuilder> action) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
Action<Request, Response, RequestBuilder> action) {
return client.prepareExecute(action);
}
@ -1178,17 +1184,20 @@ public abstract class AbstractClient extends AbstractComponent implements Client
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(Action<Request, Response, RequestBuilder> action, Request request) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
Action<Request, Response, RequestBuilder> action, Request request) {
return client.execute(action, request);
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
client.execute(action, request, listener);
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(Action<Request, Response, RequestBuilder> action) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
Action<Request, Response, RequestBuilder> action) {
return client.prepareExecute(action);
}

View File

@ -19,6 +19,10 @@
package org.elasticsearch.client.transport;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.elasticsearch.Version;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
@ -36,6 +40,7 @@ import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
@ -54,10 +59,6 @@ import org.elasticsearch.threadpool.ThreadPoolModule;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.netty.NettyTransport;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
/**
@ -81,7 +82,7 @@ public class TransportClient extends AbstractClient {
*/
public static class Builder {
private Settings settings = Settings.EMPTY;
private Settings providedSettings = Settings.EMPTY;
private List<Class<? extends Plugin>> pluginClasses = new ArrayList<>();
/**
@ -95,7 +96,7 @@ public class TransportClient extends AbstractClient {
* The settings to configure the transport client with.
*/
public Builder settings(Settings settings) {
this.settings = settings;
this.providedSettings = settings;
return this;
}
@ -107,27 +108,29 @@ public class TransportClient extends AbstractClient {
return this;
}
private PluginsService newPluginService(final Settings settings) {
final Settings.Builder settingsBuilder = settingsBuilder()
.put(NettyTransport.PING_SCHEDULE, "5s") // enable by default the transport schedule ping interval
.put( InternalSettingsPreparer.prepareSettings(settings))
.put("network.server", false)
.put("node.client", true)
.put(CLIENT_TYPE_SETTING, CLIENT_TYPE);
return new PluginsService(settingsBuilder.build(), null, null, pluginClasses);
};
/**
* Builds a new instance of the transport client.
*/
public TransportClient build() {
Settings settings = InternalSettingsPreparer.prepareSettings(this.settings);
settings = settingsBuilder()
.put(NettyTransport.PING_SCHEDULE, "5s") // enable by default the transport schedule ping interval
.put(settings)
.put("network.server", false)
.put("node.client", true)
.put(CLIENT_TYPE_SETTING, CLIENT_TYPE)
.build();
PluginsService pluginsService = new PluginsService(settings, null, null, pluginClasses);
this.settings = pluginsService.updatedSettings();
final PluginsService pluginsService = newPluginService(providedSettings);
final Settings settings = pluginsService.updatedSettings();
Version version = Version.CURRENT;
final ThreadPool threadPool = new ThreadPool(settings);
final NetworkService networkService = new NetworkService(settings);
final SettingsFilter settingsFilter = new SettingsFilter(settings);
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
boolean success = false;
try {
ModulesBuilder modules = new ModulesBuilder();
@ -137,18 +140,18 @@ public class TransportClient extends AbstractClient {
modules.add(pluginModule);
}
modules.add(new PluginsModule(pluginsService));
modules.add(new SettingsModule(this.settings, settingsFilter ));
modules.add(new NetworkModule(networkService, this.settings, true));
modules.add(new ClusterNameModule(this.settings));
modules.add(new SettingsModule(settings, settingsFilter ));
modules.add(new NetworkModule(networkService, settings, true, namedWriteableRegistry));
modules.add(new ClusterNameModule(settings));
modules.add(new ThreadPoolModule(threadPool));
modules.add(new SearchModule() {
modules.add(new SearchModule(settings, namedWriteableRegistry) {
@Override
protected void configure() {
// noop
}
});
modules.add(new ActionModule(true));
modules.add(new CircuitBreakerModule(this.settings));
modules.add(new CircuitBreakerModule(settings));
pluginsService.processModules(modules);
@ -276,7 +279,7 @@ public class TransportClient extends AbstractClient {
}
@Override
protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
protected <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
proxy.execute(action, request, listener);
}
}

View File

@ -34,6 +34,7 @@ import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
@ -101,6 +102,11 @@ public class TransportClientNodesService extends AbstractComponent {
private volatile boolean closed;
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> CLIENT_TRANSPORT_PING_TIMEOUT = Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = Setting.boolSetting("client.transport.ignore_cluster_name", false, false, Setting.Scope.CLUSTER);
@Inject
public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService,
ThreadPool threadPool, Headers headers, Version version) {
@ -111,9 +117,9 @@ public class TransportClientNodesService extends AbstractComponent {
this.minCompatibilityVersion = version.minimumCompatibilityVersion();
this.headers = headers;
this.nodesSamplerInterval = this.settings.getAsTime("client.transport.nodes_sampler_interval", timeValueSeconds(5));
this.pingTimeout = this.settings.getAsTime("client.transport.ping_timeout", timeValueSeconds(5)).millis();
this.ignoreClusterName = this.settings.getAsBoolean("client.transport.ignore_cluster_name", false);
this.nodesSamplerInterval = CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.get(this.settings);
this.pingTimeout = CLIENT_TRANSPORT_PING_TIMEOUT.get(this.settings).millis();
this.ignoreClusterName = CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings);
if (logger.isDebugEnabled()) {
logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]");

View File

@ -23,7 +23,6 @@ import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction;
import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.IndexTemplateFilter;
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
@ -36,7 +35,6 @@ import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
import org.elasticsearch.cluster.node.DiscoveryNodeService;
import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
@ -56,26 +54,12 @@ import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocatio
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.cluster.service.InternalClusterService;
import org.elasticsearch.cluster.settings.DynamicSettings;
import org.elasticsearch.cluster.settings.Validator;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexingSlowLog;
import org.elasticsearch.index.search.stats.SearchSlowLog;
import org.elasticsearch.index.settings.IndexDynamicSettings;
import org.elasticsearch.index.MergePolicyConfig;
import org.elasticsearch.index.MergeSchedulerConfig;
import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.search.internal.DefaultSearchContext;
import java.util.Arrays;
import java.util.Collections;
@ -106,7 +90,6 @@ public class ClusterModule extends AbstractModule {
SnapshotInProgressAllocationDecider.class));
private final Settings settings;
private final DynamicSettings.Builder indexDynamicSettings = new DynamicSettings.Builder();
private final ExtensionPoint.SelectedType<ShardsAllocator> shardsAllocators = new ExtensionPoint.SelectedType<>("shards_allocator", ShardsAllocator.class);
private final ExtensionPoint.ClassSet<AllocationDecider> allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class);
private final ExtensionPoint.ClassSet<IndexTemplateFilter> indexTemplateFilters = new ExtensionPoint.ClassSet<>("index_template_filter", IndexTemplateFilter.class);
@ -116,9 +99,6 @@ public class ClusterModule extends AbstractModule {
public ClusterModule(Settings settings) {
this.settings = settings;
registerBuiltinIndexSettings();
for (Class<? extends AllocationDecider> decider : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) {
registerAllocationDecider(decider);
}
@ -126,68 +106,6 @@ public class ClusterModule extends AbstractModule {
registerShardsAllocator(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, BalancedShardsAllocator.class);
}
private void registerBuiltinIndexSettings() {
registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_TYPE, Validator.EMPTY);
registerIndexDynamicSetting(MergeSchedulerConfig.MAX_THREAD_COUNT, Validator.NON_NEGATIVE_INTEGER);
registerIndexDynamicSetting(MergeSchedulerConfig.MAX_MERGE_COUNT, Validator.EMPTY);
registerIndexDynamicSetting(MergeSchedulerConfig.AUTO_THROTTLE, Validator.EMPTY);
registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_REQUIRE_GROUP + "*", Validator.EMPTY);
registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "*", Validator.EMPTY);
registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_EXCLUDE_GROUP + "*", Validator.EMPTY);
registerIndexDynamicSetting(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, Validator.EMPTY);
registerIndexDynamicSetting(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Validator.EMPTY);
registerIndexDynamicSetting(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, Validator.NON_NEGATIVE_INTEGER);
registerIndexDynamicSetting(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, Validator.EMPTY);
registerIndexDynamicSetting(IndexMetaData.SETTING_READ_ONLY, Validator.EMPTY);
registerIndexDynamicSetting(IndexMetaData.SETTING_BLOCKS_READ, Validator.EMPTY);
registerIndexDynamicSetting(IndexMetaData.SETTING_BLOCKS_WRITE, Validator.EMPTY);
registerIndexDynamicSetting(IndexMetaData.SETTING_BLOCKS_METADATA, Validator.EMPTY);
registerIndexDynamicSetting(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, Validator.EMPTY);
registerIndexDynamicSetting(IndexMetaData.SETTING_PRIORITY, Validator.NON_NEGATIVE_INTEGER);
registerIndexDynamicSetting(IndicesTTLService.INDEX_TTL_DISABLE_PURGE, Validator.EMPTY);
registerIndexDynamicSetting(IndexSettings.INDEX_REFRESH_INTERVAL, Validator.TIME);
registerIndexDynamicSetting(PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS, Validator.EMPTY);
registerIndexDynamicSetting(IndexSettings.INDEX_GC_DELETES_SETTING, Validator.TIME);
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, Validator.TIME);
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, Validator.TIME);
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, Validator.TIME);
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE, Validator.TIME);
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT, Validator.EMPTY);
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL, Validator.EMPTY);
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG, Validator.EMPTY);
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN, Validator.TIME);
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO, Validator.TIME);
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG, Validator.TIME);
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE, Validator.TIME);
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN, Validator.TIME);
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO, Validator.TIME);
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG, Validator.TIME);
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE, Validator.TIME);
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_REFORMAT, Validator.EMPTY);
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL, Validator.EMPTY);
registerIndexDynamicSetting(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, Validator.INTEGER);
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED, Validator.DOUBLE);
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT, Validator.BYTES_SIZE);
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, Validator.INTEGER_GTE_2);
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT, Validator.INTEGER_GTE_2);
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT, Validator.BYTES_SIZE);
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, Validator.DOUBLE_GTE_2);
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT, Validator.NON_NEGATIVE_DOUBLE);
registerIndexDynamicSetting(MergePolicyConfig.INDEX_COMPOUND_FORMAT, Validator.EMPTY);
registerIndexDynamicSetting(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, Validator.BYTES_SIZE);
registerIndexDynamicSetting(IndexSettings.INDEX_TRANSLOG_DURABILITY, Validator.EMPTY);
registerIndexDynamicSetting(IndicesWarmer.INDEX_WARMER_ENABLED, Validator.EMPTY);
registerIndexDynamicSetting(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, Validator.BOOLEAN);
registerIndexDynamicSetting(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, Validator.TIME);
registerIndexDynamicSetting(DefaultSearchContext.MAX_RESULT_WINDOW, Validator.POSITIVE_INTEGER);
}
public void registerIndexDynamicSetting(String setting, Validator validator) {
indexDynamicSettings.addSetting(setting, validator);
}
public void registerAllocationDecider(Class<? extends AllocationDecider> allocationDecider) {
allocationDeciders.registerExtension(allocationDecider);
}
@ -202,8 +120,6 @@ public class ClusterModule extends AbstractModule {
@Override
protected void configure() {
bind(DynamicSettings.class).annotatedWith(IndexDynamicSettings.class).toInstance(indexDynamicSettings.build());
// bind ShardsAllocator
String shardsAllocatorType = shardsAllocators.bindType(binder(), settings, ClusterModule.SHARDS_ALLOCATOR_TYPE_KEY, ClusterModule.BALANCED_ALLOCATOR);
if (shardsAllocatorType.equals(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR)) {

View File

@ -406,10 +406,26 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
String nodeId = nodeStats.getNode().id();
String nodeName = nodeStats.getNode().getName();
if (logger.isTraceEnabled()) {
logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}", nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(), leastAvailablePath.getTotal(), leastAvailablePath.getAvailable());
logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}",
nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(),
leastAvailablePath.getTotal(), leastAvailablePath.getAvailable());
}
if (leastAvailablePath.getTotal().bytes() < 0) {
if (logger.isTraceEnabled()) {
logger.trace("node: [{}] least available path has less than 0 total bytes of disk [{}], skipping",
nodeId, leastAvailablePath.getTotal().bytes());
}
} else {
newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().bytes(), leastAvailablePath.getAvailable().bytes()));
}
if (mostAvailablePath.getTotal().bytes() < 0) {
if (logger.isTraceEnabled()) {
logger.trace("node: [{}] most available path has less than 0 total bytes of disk [{}], skipping",
nodeId, mostAvailablePath.getTotal().bytes());
}
} else {
newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().bytes(), mostAvailablePath.getAvailable().bytes()));
}
newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().bytes(), leastAvailablePath.getAvailable().bytes()));
newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().bytes(), mostAvailablePath.getAvailable().bytes()));
}
}

View File

@ -22,9 +22,11 @@ package org.elasticsearch.cluster.action.shard;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver;
import org.elasticsearch.cluster.ClusterStateTaskConfig;
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.MasterNodeChangePredicate;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
@ -42,13 +44,16 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.EmptyTransportResponseHandler;
import org.elasticsearch.transport.NodeDisconnectedException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
@ -60,55 +65,95 @@ import java.util.Locale;
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
public class ShardStateAction extends AbstractComponent {
public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started";
public static final String SHARD_FAILED_ACTION_NAME = "internal:cluster/shard/failure";
private final TransportService transportService;
private final ClusterService clusterService;
@Inject
public ShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService,
AllocationService allocationService, RoutingService routingService) {
super(settings);
this.transportService = transportService;
this.clusterService = clusterService;
transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ShardRoutingEntry::new, ThreadPool.Names.SAME, new ShardStartedTransportHandler(clusterService, new ShardStartedClusterStateTaskExecutor(allocationService, logger), logger));
transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ShardRoutingEntry::new, ThreadPool.Names.SAME, new ShardFailedTransportHandler(clusterService, new ShardFailedClusterStateTaskExecutor(allocationService, routingService, logger), logger));
}
public void shardFailed(final ClusterState clusterState, final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
shardFailed(clusterState, shardRouting, indexUUID, message, failure, null, listener);
}
public void resendShardFailed(final ClusterState clusterState, final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
logger.trace("{} re-sending failed shard [{}], index UUID [{}], reason [{}]", shardRouting.shardId(), failure, shardRouting, indexUUID, message);
shardFailed(clusterState, shardRouting, indexUUID, message, failure, listener);
}
public void shardFailed(final ClusterState clusterState, final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, TimeValue timeout, Listener listener) {
DiscoveryNode masterNode = clusterState.nodes().masterNode();
private void sendShardAction(final String actionName, final ClusterStateObserver observer, final ShardRoutingEntry shardRoutingEntry, final Listener listener) {
DiscoveryNode masterNode = observer.observedState().nodes().masterNode();
if (masterNode == null) {
logger.warn("{} no master known to fail shard [{}]", shardRouting.shardId(), shardRouting);
listener.onShardFailedNoMaster();
return;
}
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, failure);
TransportRequestOptions options = TransportRequestOptions.EMPTY;
if (timeout != null) {
options = TransportRequestOptions.builder().withTimeout(timeout).build();
}
transportService.sendRequest(masterNode,
SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
listener.onSuccess();
}
logger.warn("{} no master known for action [{}] for shard [{}]", shardRoutingEntry.getShardRouting().shardId(), actionName, shardRoutingEntry.getShardRouting());
waitForNewMasterAndRetry(actionName, observer, shardRoutingEntry, listener);
} else {
logger.debug("{} sending [{}] to [{}] for shard [{}]", shardRoutingEntry.getShardRouting().getId(), actionName, masterNode.getId(), shardRoutingEntry);
transportService.sendRequest(masterNode,
actionName, shardRoutingEntry, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
listener.onSuccess();
}
@Override
public void handleException(TransportException exp) {
logger.warn("{} unexpected failure while sending request to [{}] to fail shard [{}]", exp, shardRoutingEntry.shardRouting.shardId(), masterNode, shardRoutingEntry);
listener.onShardFailedFailure(masterNode, exp);
@Override
public void handleException(TransportException exp) {
if (isMasterChannelException(exp)) {
waitForNewMasterAndRetry(actionName, observer, shardRoutingEntry, listener);
} else {
logger.warn("{} unexpected failure while sending request [{}] to [{}] for shard [{}]", exp, shardRoutingEntry.getShardRouting().shardId(), actionName, masterNode, shardRoutingEntry);
listener.onFailure(exp.getCause());
}
}
});
}
}
private static Class[] MASTER_CHANNEL_EXCEPTIONS = new Class[]{
NotMasterException.class,
ConnectTransportException.class,
Discovery.FailedToCommitClusterStateException.class
};
private static boolean isMasterChannelException(TransportException exp) {
return ExceptionsHelper.unwrap(exp, MASTER_CHANNEL_EXCEPTIONS) != null;
}
public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger);
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, failure);
sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardRoutingEntry, listener);
}
public void resendShardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
logger.trace("{} re-sending failed shard [{}], index UUID [{}], reason [{}]", shardRouting.shardId(), failure, shardRouting, indexUUID, message);
shardFailed(shardRouting, indexUUID, message, failure, listener);
}
// visible for testing
protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardRoutingEntry shardRoutingEntry, Listener listener) {
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
if (logger.isTraceEnabled()) {
logger.trace("new cluster state [{}] after waiting for master election to fail shard [{}]", shardRoutingEntry.getShardRouting().shardId(), state.prettyPrint(), shardRoutingEntry);
}
});
sendShardAction(actionName, observer, shardRoutingEntry, listener);
}
@Override
public void onClusterServiceClose() {
logger.warn("{} node closed while execution action [{}] for shard [{}]", shardRoutingEntry.failure, shardRoutingEntry.getShardRouting().getId(), actionName, shardRoutingEntry.getShardRouting());
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
// we wait indefinitely for a new master
assert false;
}
}, MasterNodeChangePredicate.INSTANCE);
}
private static class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
@ -208,21 +253,10 @@ public class ShardStateAction extends AbstractComponent {
}
}
public void shardStarted(final ClusterState clusterState, final ShardRouting shardRouting, String indexUUID, final String reason) {
DiscoveryNode masterNode = clusterState.nodes().masterNode();
if (masterNode == null) {
logger.warn("{} no master known to start shard [{}]", shardRouting.shardId(), shardRouting);
return;
}
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason, null);
logger.debug("sending start shard [{}]", shardRoutingEntry);
transportService.sendRequest(masterNode,
SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleException(TransportException exp) {
logger.warn("{} failure sending start shard [{}] to [{}]", exp, shardRouting.shardId(), masterNode, shardRouting);
}
});
public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String message, Listener listener) {
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger);
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, null);
sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardRoutingEntry, listener);
}
private static class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
@ -334,10 +368,23 @@ public class ShardStateAction extends AbstractComponent {
default void onSuccess() {
}
default void onShardFailedNoMaster() {
}
default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) {
/**
* Notification for non-channel exceptions that are not handled
* by {@link ShardStateAction}.
*
* The exceptions that are handled by {@link ShardStateAction}
* are:
* - {@link NotMasterException}
* - {@link NodeDisconnectedException}
* - {@link Discovery.FailedToCommitClusterStateException}
*
* Any other exception is communicated to the requester via
* this notification.
*
* @param t the unexpected cause of the failure on the master
*/
default void onFailure(final Throwable t) {
}
}
}

View File

@ -306,16 +306,16 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
addIndexBlock(indexMetaData.getIndex(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
}
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_READ_ONLY, false)) {
if (IndexMetaData.INDEX_READ_ONLY_SETTING.get(indexMetaData.getSettings())) {
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_READ_ONLY_BLOCK);
}
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_READ, false)) {
if (IndexMetaData.INDEX_BLOCKS_READ_SETTING.get(indexMetaData.getSettings())) {
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_READ_BLOCK);
}
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_WRITE, false)) {
if (IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.get(indexMetaData.getSettings())) {
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK);
}
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_METADATA, false)) {
if (IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.get(indexMetaData.getSettings())) {
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK);
}
return this;

View File

@ -0,0 +1,92 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.metadata;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.settings.Setting;
/**
* This class acts as a functional wrapper around the <tt>index.auto_expand_replicas</tt> setting.
* This setting or rather it's value is expanded into a min and max value which requires special handling
* based on the number of datanodes in the cluster. This class handles all the parsing and streamlines the access to these values.
*/
final class AutoExpandReplicas {
// the value we recognize in the "max" position to mean all the nodes
private static final String ALL_NODES_VALUE = "all";
public static final Setting<AutoExpandReplicas> SETTING = new Setting<>(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "false", (value) -> {
final int min;
final int max;
if (Booleans.parseBoolean(value, true) == false) {
return new AutoExpandReplicas(0, 0, false);
}
final int dash = value.indexOf('-');
if (-1 == dash) {
throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash);
}
final String sMin = value.substring(0, dash);
try {
min = Integer.parseInt(sMin);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash, e);
}
String sMax = value.substring(dash + 1);
if (sMax.equals(ALL_NODES_VALUE)) {
max = Integer.MAX_VALUE;
} else {
try {
max = Integer.parseInt(sMax);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash, e);
}
}
return new AutoExpandReplicas(min, max, true);
}, true, Setting.Scope.INDEX);
private final int minReplicas;
private final int maxReplicas;
private final boolean enabled;
private AutoExpandReplicas(int minReplicas, int maxReplicas, boolean enabled) {
if (minReplicas > maxReplicas) {
throw new IllegalArgumentException("[" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] minReplicas must be =< maxReplicas but wasn't " + minReplicas + " > " + maxReplicas);
}
this.minReplicas = minReplicas;
this.maxReplicas = maxReplicas;
this.enabled = enabled;
}
int getMinReplicas() {
return minReplicas;
}
int getMaxReplicas(int numDataNodes) {
return Math.min(maxReplicas, numDataNodes-1);
}
@Override
public String toString() {
return enabled ? minReplicas + "-" + maxReplicas : "false";
}
boolean isEnabled() {
return enabled;
}
}

View File

@ -29,14 +29,17 @@ import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.loader.SettingsLoader;
import org.elasticsearch.common.xcontent.FromXContentBuilder;
@ -58,6 +61,7 @@ import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
@ -70,10 +74,6 @@ import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
*/
public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuilder<IndexMetaData>, ToXContent {
public static final IndexMetaData PROTO = IndexMetaData.builder("")
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
.numberOfShards(1).numberOfReplicas(0).build();
public interface Custom extends Diffable<Custom>, ToXContent {
String type();
@ -152,14 +152,29 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
}
public static final String INDEX_SETTING_PREFIX = "index.";
public static final String SETTING_NUMBER_OF_SHARDS = "index.number_of_shards";
public static final Setting<Integer> INDEX_NUMBER_OF_SHARDS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, false, Setting.Scope.INDEX);
public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas";
public static final Setting<Integer> INDEX_NUMBER_OF_REPLICAS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, true, Setting.Scope.INDEX);
public static final String SETTING_SHADOW_REPLICAS = "index.shadow_replicas";
public static final Setting<Boolean> INDEX_SHADOW_REPLICAS_SETTING = Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, false, Setting.Scope.INDEX);
public static final String SETTING_SHARED_FILESYSTEM = "index.shared_filesystem";
public static final Setting<Boolean> INDEX_SHARED_FILESYSTEM_SETTING = Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, false, Setting.Scope.INDEX);
public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas";
public static final Setting<AutoExpandReplicas> INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING;
public static final String SETTING_READ_ONLY = "index.blocks.read_only";
public static final Setting<Boolean> INDEX_READ_ONLY_SETTING = Setting.boolSetting(SETTING_READ_ONLY, false, true, Setting.Scope.INDEX);
public static final String SETTING_BLOCKS_READ = "index.blocks.read";
public static final Setting<Boolean> INDEX_BLOCKS_READ_SETTING = Setting.boolSetting(SETTING_BLOCKS_READ, false, true, Setting.Scope.INDEX);
public static final String SETTING_BLOCKS_WRITE = "index.blocks.write";
public static final Setting<Boolean> INDEX_BLOCKS_WRITE_SETTING = Setting.boolSetting(SETTING_BLOCKS_WRITE, false, true, Setting.Scope.INDEX);
public static final String SETTING_BLOCKS_METADATA = "index.blocks.metadata";
public static final Setting<Boolean> INDEX_BLOCKS_METADATA_SETTING = Setting.boolSetting(SETTING_BLOCKS_METADATA, false, true, Setting.Scope.INDEX);
public static final String SETTING_VERSION_CREATED = "index.version.created";
public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string";
public static final String SETTING_VERSION_UPGRADED = "index.version.upgraded";
@ -167,12 +182,23 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
public static final String SETTING_VERSION_MINIMUM_COMPATIBLE = "index.version.minimum_compatible";
public static final String SETTING_CREATION_DATE = "index.creation_date";
public static final String SETTING_PRIORITY = "index.priority";
public static final Setting<Integer> INDEX_PRIORITY_SETTING = Setting.intSetting("index.priority", 1, 0, true, Setting.Scope.INDEX);
public static final String SETTING_CREATION_DATE_STRING = "index.creation_date_string";
public static final String SETTING_INDEX_UUID = "index.uuid";
public static final String SETTING_DATA_PATH = "index.data_path";
public static final Setting<String> INDEX_DATA_PATH_SETTING = new Setting<>(SETTING_DATA_PATH, "", Function.identity(), false, Setting.Scope.INDEX);
public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
public static final Setting<Boolean> INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING = Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, true, Setting.Scope.INDEX);
public static final String INDEX_UUID_NA_VALUE = "_na_";
public static final Setting<Settings> INDEX_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.require.", true, Setting.Scope.INDEX);
public static final Setting<Settings> INDEX_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.include.", true, Setting.Scope.INDEX);
public static final Setting<Settings> INDEX_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.exclude.", true, Setting.Scope.INDEX);
public static final IndexMetaData PROTO = IndexMetaData.builder("")
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
.numberOfShards(1).numberOfReplicas(0).build();
public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations";
private final int numberOfShards;
@ -627,10 +653,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
return this;
}
public long creationDate() {
return settings.getAsLong(SETTING_CREATION_DATE, -1l);
}
public Builder settings(Settings.Builder settings) {
this.settings = settings.build();
return this;
@ -645,11 +667,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
return mappings.get(type);
}
public Builder removeMapping(String mappingType) {
mappings.remove(mappingType);
return this;
}
public Builder putMapping(String type, String source) throws IOException {
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
putMapping(new MappingMetaData(type, parser.mapOrdered()));
@ -692,24 +709,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
return this;
}
public Builder removeCustom(String type) {
this.customs.remove(type);
return this;
}
public Custom getCustom(String type) {
return this.customs.get(type);
}
public Builder putActiveAllocationIds(int shardId, Set<String> allocationIds) {
activeAllocationIds.put(shardId, new HashSet(allocationIds));
return this;
}
public Set<String> getActiveAllocationIds(int shardId) {
return activeAllocationIds.get(shardId);
}
public long version() {
return this.version;
}
@ -758,22 +762,21 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
filledActiveAllocationIds.put(i, Collections.emptySet());
}
}
Map<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap();
final Map<String, String> requireMap = INDEX_ROUTING_REQUIRE_GROUP_SETTING.get(settings).getAsMap();
final DiscoveryNodeFilters requireFilters;
if (requireMap.isEmpty()) {
requireFilters = null;
} else {
requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
}
Map<String, String> includeMap = settings.getByPrefix("index.routing.allocation.include.").getAsMap();
Map<String, String> includeMap = INDEX_ROUTING_INCLUDE_GROUP_SETTING.get(settings).getAsMap();
final DiscoveryNodeFilters includeFilters;
if (includeMap.isEmpty()) {
includeFilters = null;
} else {
includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
}
Map<String, String> excludeMap = settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
Map<String, String> excludeMap = INDEX_ROUTING_EXCLUDE_GROUP_SETTING.get(settings).getAsMap();
final DiscoveryNodeFilters excludeFilters;
if (excludeMap.isEmpty()) {
excludeFilters = null;

View File

@ -47,6 +47,7 @@ import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
@ -103,13 +104,14 @@ public class MetaDataCreateIndexService extends AbstractComponent {
private final IndexTemplateFilter indexTemplateFilter;
private final Environment env;
private final NodeServicesProvider nodeServicesProvider;
private final IndexScopedSettings indexScopedSettings;
@Inject
public MetaDataCreateIndexService(Settings settings, ClusterService clusterService,
IndicesService indicesService, AllocationService allocationService,
Version version, AliasValidator aliasValidator,
Set<IndexTemplateFilter> indexTemplateFilters, Environment env, NodeServicesProvider nodeServicesProvider) {
Set<IndexTemplateFilter> indexTemplateFilters, Environment env, NodeServicesProvider nodeServicesProvider, IndexScopedSettings indexScopedSettings) {
super(settings);
this.clusterService = clusterService;
this.indicesService = indicesService;
@ -118,6 +120,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
this.aliasValidator = aliasValidator;
this.env = env;
this.nodeServicesProvider = nodeServicesProvider;
this.indexScopedSettings = indexScopedSettings;
if (indexTemplateFilters.isEmpty()) {
this.indexTemplateFilter = DEFAULT_INDEX_TEMPLATE_FILTER;
@ -174,6 +177,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
public void createIndex(final CreateIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
Settings.Builder updatedSettingsBuilder = Settings.settingsBuilder();
updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
indexScopedSettings.validate(updatedSettingsBuilder);
request.settings(updatedSettingsBuilder.build());
clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]",
@ -313,7 +317,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
// first, add the default mapping
if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
try {
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(XContentFactory.jsonBuilder().map(mappings.get(MapperService.DEFAULT_MAPPING)).string()), false, request.updateAllTypes());
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(XContentFactory.jsonBuilder().map(mappings.get(MapperService.DEFAULT_MAPPING)).string()), MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes());
} catch (Exception e) {
removalReason = "failed on parsing default mapping on index creation";
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, MapperService.DEFAULT_MAPPING, e.getMessage());
@ -325,7 +329,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
}
try {
// apply the default here, its the first time we parse it
mapperService.merge(entry.getKey(), new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).string()), true, request.updateAllTypes());
mapperService.merge(entry.getKey(), new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).string()), MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes());
} catch (Exception e) {
removalReason = "failed on parsing mappings on index creation";
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage());
@ -460,16 +464,17 @@ public class MetaDataCreateIndexService extends AbstractComponent {
}
List<String> getIndexSettingsValidationErrors(Settings settings) {
String customPath = settings.get(IndexMetaData.SETTING_DATA_PATH, null);
String customPath = IndexMetaData.INDEX_DATA_PATH_SETTING.get(settings);
List<String> validationErrors = new ArrayList<>();
if (customPath != null && env.sharedDataFile() == null) {
if (Strings.isEmpty(customPath) == false && env.sharedDataFile() == null) {
validationErrors.add("path.shared_data must be set in order to use custom data paths");
} else if (customPath != null) {
} else if (Strings.isEmpty(customPath) == false) {
Path resolvedPath = PathUtils.get(new Path[]{env.sharedDataFile()}, customPath);
if (resolvedPath == null) {
validationErrors.add("custom path [" + customPath + "] is not a sub-path of path.shared_data [" + env.sharedDataFile() + "]");
}
}
//norelease - this can be removed?
Integer number_of_primaries = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
Integer number_of_replicas = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null);
if (number_of_primaries != null && number_of_primaries <= 0) {

View File

@ -104,12 +104,9 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
// temporarily create the index and add mappings so we can parse the filter
try {
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, false);
}
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(), false, false);
indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(), MapperService.MergeReason.MAPPING_RECOVERY, false);
}
} catch (Exception e) {
logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.getIndex());

View File

@ -20,12 +20,15 @@ package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.misc.IndexMergeTool;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.MergePolicyConfig;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.mapper.MapperService;
@ -33,6 +36,7 @@ import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.mapper.MapperRegistry;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import static java.util.Collections.unmodifiableSet;
@ -49,11 +53,13 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet;
public class MetaDataIndexUpgradeService extends AbstractComponent {
private final MapperRegistry mapperRegistry;
private final IndexScopedSettings indexScopedSettigns;
@Inject
public MetaDataIndexUpgradeService(Settings settings, MapperRegistry mapperRegistry) {
public MetaDataIndexUpgradeService(Settings settings, MapperRegistry mapperRegistry, IndexScopedSettings indexScopedSettings) {
super(settings);
this.mapperRegistry = mapperRegistry;
this.indexScopedSettigns = indexScopedSettings;
}
/**
@ -66,22 +72,25 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData) {
// Throws an exception if there are too-old segments:
if (isUpgraded(indexMetaData)) {
assert indexMetaData == archiveBrokenIndexSettings(indexMetaData) : "all settings must have been upgraded before";
return indexMetaData;
}
checkSupportedVersion(indexMetaData);
IndexMetaData newMetaData = indexMetaData;
newMetaData = addDefaultUnitsIfNeeded(newMetaData);
// we have to run this first otherwise in we try to create IndexSettings
// with broken settings and fail in checkMappingsCompatibility
newMetaData = archiveBrokenIndexSettings(newMetaData);
// only run the check with the upgraded settings!!
checkMappingsCompatibility(newMetaData);
newMetaData = markAsUpgraded(newMetaData);
return newMetaData;
return markAsUpgraded(newMetaData);
}
/**
* Checks if the index was already opened by this version of Elasticsearch and doesn't require any additional checks.
*/
private boolean isUpgraded(IndexMetaData indexMetaData) {
return indexMetaData.getUpgradedVersion().onOrAfter(Version.V_3_0_0);
boolean isUpgraded(IndexMetaData indexMetaData) {
return indexMetaData.getUpgradedVersion().onOrAfter(Version.CURRENT);
}
/**
@ -113,103 +122,6 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
return false;
}
/** All known byte-sized settings for an index. */
public static final Set<String> INDEX_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet(
"index.merge.policy.floor_segment",
"index.merge.policy.max_merged_segment",
"index.merge.policy.max_merge_size",
"index.merge.policy.min_merge_size",
"index.shard.recovery.file_chunk_size",
"index.shard.recovery.translog_size",
"index.store.throttle.max_bytes_per_sec",
"index.translog.flush_threshold_size",
"index.translog.fs.buffer_size",
"index.version_map_size"));
/** All known time settings for an index. */
public static final Set<String> INDEX_TIME_SETTINGS = unmodifiableSet(newHashSet(
"index.gateway.wait_for_mapping_update_post_recovery",
"index.shard.wait_for_mapping_update_post_recovery",
"index.gc_deletes",
"index.indexing.slowlog.threshold.index.debug",
"index.indexing.slowlog.threshold.index.info",
"index.indexing.slowlog.threshold.index.trace",
"index.indexing.slowlog.threshold.index.warn",
"index.refresh_interval",
"index.search.slowlog.threshold.fetch.debug",
"index.search.slowlog.threshold.fetch.info",
"index.search.slowlog.threshold.fetch.trace",
"index.search.slowlog.threshold.fetch.warn",
"index.search.slowlog.threshold.query.debug",
"index.search.slowlog.threshold.query.info",
"index.search.slowlog.threshold.query.trace",
"index.search.slowlog.threshold.query.warn",
"index.shadow.wait_for_initial_commit",
"index.store.stats_refresh_interval",
"index.translog.flush_threshold_period",
"index.translog.interval",
"index.translog.sync_interval",
"index.shard.inactive_time",
UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING));
/**
* Elasticsearch 2.0 requires units on byte/memory and time settings; this method adds the default unit to any such settings that are
* missing units.
*/
private IndexMetaData addDefaultUnitsIfNeeded(IndexMetaData indexMetaData) {
if (indexMetaData.getCreationVersion().before(Version.V_2_0_0_beta1)) {
// TODO: can we somehow only do this *once* for a pre-2.0 index? Maybe we could stuff a "fake marker setting" here? Seems hackish...
// Created lazily if we find any settings that are missing units:
Settings settings = indexMetaData.getSettings();
Settings.Builder newSettings = null;
for(String byteSizeSetting : INDEX_BYTES_SIZE_SETTINGS) {
String value = settings.get(byteSizeSetting);
if (value != null) {
try {
Long.parseLong(value);
} catch (NumberFormatException nfe) {
continue;
}
// It's a naked number that previously would be interpreted as default unit (bytes); now we add it:
logger.warn("byte-sized index setting [{}] with value [{}] is missing units; assuming default units (b) but in future versions this will be a hard error", byteSizeSetting, value);
if (newSettings == null) {
newSettings = Settings.builder();
newSettings.put(settings);
}
newSettings.put(byteSizeSetting, value + "b");
}
}
for(String timeSetting : INDEX_TIME_SETTINGS) {
String value = settings.get(timeSetting);
if (value != null) {
try {
Long.parseLong(value);
} catch (NumberFormatException nfe) {
continue;
}
// It's a naked number that previously would be interpreted as default unit (ms); now we add it:
logger.warn("time index setting [{}] with value [{}] is missing units; assuming default units (ms) but in future versions this will be a hard error", timeSetting, value);
if (newSettings == null) {
newSettings = Settings.builder();
newSettings.put(settings);
}
newSettings.put(timeSetting, value + "ms");
}
}
if (newSettings != null) {
// At least one setting was changed:
return IndexMetaData.builder(indexMetaData)
.version(indexMetaData.getVersion())
.settings(newSettings.build())
.build();
}
}
// No changes:
return indexMetaData;
}
/**
* Checks the mappings for compatibility with the current version
*/
@ -217,14 +129,14 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
try {
// We cannot instantiate real analysis server at this point because the node might not have
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings, Collections.emptyList());
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings);
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) {
try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, () -> null)) {
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), false, false);
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), MapperService.MergeReason.MAPPING_RECOVERY, false);
}
}
}
@ -270,4 +182,39 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
}
}
private static final String ARCHIVED_SETTINGS_PREFIX = "archived.";
IndexMetaData archiveBrokenIndexSettings(IndexMetaData indexMetaData) {
Settings settings = indexMetaData.getSettings();
Settings.Builder builder = Settings.builder();
boolean changed = false;
for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
try {
Setting<?> setting = indexScopedSettigns.get(entry.getKey());
if (setting != null) {
setting.get(settings);
builder.put(entry.getKey(), entry.getValue());
} else {
if (indexScopedSettigns.isPrivateSetting(entry.getKey()) || entry.getKey().startsWith(ARCHIVED_SETTINGS_PREFIX)) {
builder.put(entry.getKey(), entry.getValue());
} else {
changed = true;
logger.warn("[{}] found unknown index setting: {} value: {} - archiving", indexMetaData.getIndex(), entry.getKey(), entry.getValue());
// we put them back in here such that tools can check from the outside if there are any indices with broken settings. The setting can remain there
// but we want users to be aware that some of their setting are broken and they can research why and what they need to do to replace them.
builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue());
}
}
} catch (IllegalArgumentException ex) {
changed = true;
logger.warn("[{}] found invalid index setting: {} value: {} - archiving",ex, indexMetaData.getIndex(), entry.getKey(), entry.getValue());
// we put them back in here such that tools can check from the outside if there are any indices with broken settings. The setting can remain there
// but we want users to be aware that some of their setting sare broken and they can research why and what they need to do to replace them.
builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue());
}
}
return changed ? IndexMetaData.builder(indexMetaData).settings(builder.build()).build() : indexMetaData;
}
}

View File

@ -143,7 +143,7 @@ public class MetaDataMappingService extends AbstractComponent {
removeIndex = true;
for (ObjectCursor<MappingMetaData> metaData : indexMetaData.getMappings().values()) {
// don't apply the default mapping, it has been applied when the mapping was created
indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), false, true);
indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true);
}
}
@ -223,7 +223,7 @@ public class MetaDataMappingService extends AbstractComponent {
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
// add mappings for all types, we need them for cross-type validation
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), false, request.updateAllTypes());
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
}
}
}
@ -303,7 +303,7 @@ public class MetaDataMappingService extends AbstractComponent {
if (existingMapper != null) {
existingSource = existingMapper.mappingSource();
}
DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, true, request.updateAllTypes());
DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes());
CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) {

View File

@ -30,19 +30,20 @@ import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.settings.DynamicSettings;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.settings.IndexDynamicSettings;
import org.elasticsearch.index.IndexNotFoundException;
import java.util.ArrayList;
import java.util.HashMap;
@ -59,25 +60,21 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder;
*/
public class MetaDataUpdateSettingsService extends AbstractComponent implements ClusterStateListener {
// the value we recognize in the "max" position to mean all the nodes
private static final String ALL_NODES_VALUE = "all";
private final ClusterService clusterService;
private final AllocationService allocationService;
private final DynamicSettings dynamicSettings;
private final IndexNameExpressionResolver indexNameExpressionResolver;
private final IndexScopedSettings indexScopedSettings;
@Inject
public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService, @IndexDynamicSettings DynamicSettings dynamicSettings, IndexNameExpressionResolver indexNameExpressionResolver) {
public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService, IndexScopedSettings indexScopedSettings, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings);
this.clusterService = clusterService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.clusterService.add(this);
this.allocationService = allocationService;
this.dynamicSettings = dynamicSettings;
this.indexScopedSettings = indexScopedSettings;
}
@Override
@ -90,69 +87,43 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
final int dataNodeCount = event.state().nodes().dataNodes().size();
Map<Integer, List<String>> nrReplicasChanged = new HashMap<>();
// we need to do this each time in case it was changed by update settings
for (final IndexMetaData indexMetaData : event.state().metaData()) {
String autoExpandReplicas = indexMetaData.getSettings().get(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS);
if (autoExpandReplicas != null && Booleans.parseBoolean(autoExpandReplicas, true)) { // Booleans only work for false values, just as we want it here
try {
final int min;
final int max;
AutoExpandReplicas autoExpandReplicas = IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING.get(indexMetaData.getSettings());
if (autoExpandReplicas.isEnabled()) {
/*
* we have to expand the number of replicas for this index to at least min and at most max nodes here
* so we are bumping it up if we have to or reduce it depending on min/max and the number of datanodes.
* If we change the number of replicas we just let the shard allocator do it's thing once we updated it
* since it goes through the index metadata to figure out if something needs to be done anyway. Do do that
* we issue a cluster settings update command below and kicks off a reroute.
*/
final int min = autoExpandReplicas.getMinReplicas();
final int max = autoExpandReplicas.getMaxReplicas(dataNodeCount);
int numberOfReplicas = dataNodeCount - 1;
if (numberOfReplicas < min) {
numberOfReplicas = min;
} else if (numberOfReplicas > max) {
numberOfReplicas = max;
}
// same value, nothing to do there
if (numberOfReplicas == indexMetaData.getNumberOfReplicas()) {
continue;
}
final int dash = autoExpandReplicas.indexOf('-');
if (-1 == dash) {
logger.warn("failed to set [{}] for index [{}], it should be dash delimited [{}]",
IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.getIndex(), autoExpandReplicas);
continue;
}
final String sMin = autoExpandReplicas.substring(0, dash);
try {
min = Integer.parseInt(sMin);
} catch (NumberFormatException e) {
logger.warn("failed to set [{}] for index [{}], minimum value is not a number [{}]",
e, IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.getIndex(), sMin);
continue;
}
String sMax = autoExpandReplicas.substring(dash + 1);
if (sMax.equals(ALL_NODES_VALUE)) {
max = dataNodeCount - 1;
} else {
try {
max = Integer.parseInt(sMax);
} catch (NumberFormatException e) {
logger.warn("failed to set [{}] for index [{}], maximum value is neither [{}] nor a number [{}]",
e, IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.getIndex(), ALL_NODES_VALUE, sMax);
continue;
}
if (numberOfReplicas >= min && numberOfReplicas <= max) {
if (!nrReplicasChanged.containsKey(numberOfReplicas)) {
nrReplicasChanged.put(numberOfReplicas, new ArrayList<>());
}
int numberOfReplicas = dataNodeCount - 1;
if (numberOfReplicas < min) {
numberOfReplicas = min;
} else if (numberOfReplicas > max) {
numberOfReplicas = max;
}
// same value, nothing to do there
if (numberOfReplicas == indexMetaData.getNumberOfReplicas()) {
continue;
}
if (numberOfReplicas >= min && numberOfReplicas <= max) {
if (!nrReplicasChanged.containsKey(numberOfReplicas)) {
nrReplicasChanged.put(numberOfReplicas, new ArrayList<String>());
}
nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.getIndex());
}
} catch (Exception e) {
logger.warn("[{}] failed to parse auto expand replicas", e, indexMetaData.getIndex());
nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.getIndex());
}
}
}
if (nrReplicasChanged.size() > 0) {
// update settings and kick of a reroute (implicit) for them to take effect
for (final Integer fNumberOfReplicas : nrReplicasChanged.keySet()) {
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, fNumberOfReplicas).build();
final List<String> indices = nrReplicasChanged.get(fNumberOfReplicas);
@ -182,42 +153,30 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
}
public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
Settings.Builder updatedSettingsBuilder = Settings.settingsBuilder();
updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
final Settings normalizedSettings = Settings.settingsBuilder().put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
Settings.Builder settingsForClosedIndices = Settings.builder();
Settings.Builder settingsForOpenIndices = Settings.builder();
Settings.Builder skipppedSettings = Settings.builder();
indexScopedSettings.validate(normalizedSettings);
// never allow to change the number of shards
for (String key : updatedSettingsBuilder.internalMap().keySet()) {
if (key.equals(IndexMetaData.SETTING_NUMBER_OF_SHARDS)) {
for (Map.Entry<String, String> entry : normalizedSettings.getAsMap().entrySet()) {
if (entry.getKey().equals(IndexMetaData.SETTING_NUMBER_OF_SHARDS)) {
listener.onFailure(new IllegalArgumentException("can't change the number of shards for an index"));
return;
}
}
final Settings closeSettings = updatedSettingsBuilder.build();
final Set<String> removedSettings = new HashSet<>();
final Set<String> errors = new HashSet<>();
for (Map.Entry<String, String> setting : updatedSettingsBuilder.internalMap().entrySet()) {
if (!dynamicSettings.hasDynamicSetting(setting.getKey())) {
removedSettings.add(setting.getKey());
Setting setting = indexScopedSettings.get(entry.getKey());
assert setting != null; // we already validated the normalized settings
settingsForClosedIndices.put(entry.getKey(), entry.getValue());
if (setting.isDynamic()) {
settingsForOpenIndices.put(entry.getKey(), entry.getValue());
} else {
String error = dynamicSettings.validateDynamicSetting(setting.getKey(), setting.getValue(), clusterService.state());
if (error != null) {
errors.add("[" + setting.getKey() + "] - " + error);
}
skipppedSettings.put(entry.getKey(), entry.getValue());
}
}
if (!errors.isEmpty()) {
listener.onFailure(new IllegalArgumentException("can't process the settings: " + errors.toString()));
return;
}
if (!removedSettings.isEmpty()) {
for (String removedSetting : removedSettings) {
updatedSettingsBuilder.remove(removedSetting);
}
}
final Settings openSettings = updatedSettingsBuilder.build();
final Settings skippedSettigns = skipppedSettings.build();
final Settings closedSettings = settingsForClosedIndices.build();
final Settings openSettings = settingsForOpenIndices.build();
clusterService.submitStateUpdateTask("update-settings",
new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
@ -245,16 +204,16 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
}
}
if (closeIndices.size() > 0 && closeSettings.get(IndexMetaData.SETTING_NUMBER_OF_REPLICAS) != null) {
if (closeIndices.size() > 0 && closedSettings.get(IndexMetaData.SETTING_NUMBER_OF_REPLICAS) != null) {
throw new IllegalArgumentException(String.format(Locale.ROOT,
"Can't update [%s] on closed indices [%s] - can leave index in an unopenable state", IndexMetaData.SETTING_NUMBER_OF_REPLICAS,
closeIndices
));
}
if (!removedSettings.isEmpty() && !openIndices.isEmpty()) {
if (!skippedSettigns.getAsMap().isEmpty() && !openIndices.isEmpty()) {
throw new IllegalArgumentException(String.format(Locale.ROOT,
"Can't update non dynamic settings[%s] for open indices [%s]",
removedSettings,
skippedSettigns.getAsMap().keySet(),
openIndices
));
}
@ -267,57 +226,37 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
}
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
Boolean updatedReadOnly = openSettings.getAsBoolean(IndexMetaData.SETTING_READ_ONLY, null);
if (updatedReadOnly != null) {
for (String index : actualIndices) {
if (updatedReadOnly) {
blocks.addIndexBlock(index, IndexMetaData.INDEX_READ_ONLY_BLOCK);
} else {
blocks.removeIndexBlock(index, IndexMetaData.INDEX_READ_ONLY_BLOCK);
}
}
}
Boolean updateMetaDataBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_METADATA, null);
if (updateMetaDataBlock != null) {
for (String index : actualIndices) {
if (updateMetaDataBlock) {
blocks.addIndexBlock(index, IndexMetaData.INDEX_METADATA_BLOCK);
} else {
blocks.removeIndexBlock(index, IndexMetaData.INDEX_METADATA_BLOCK);
}
}
}
Boolean updateWriteBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_WRITE, null);
if (updateWriteBlock != null) {
for (String index : actualIndices) {
if (updateWriteBlock) {
blocks.addIndexBlock(index, IndexMetaData.INDEX_WRITE_BLOCK);
} else {
blocks.removeIndexBlock(index, IndexMetaData.INDEX_WRITE_BLOCK);
}
}
}
Boolean updateReadBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_READ, null);
if (updateReadBlock != null) {
for (String index : actualIndices) {
if (updateReadBlock) {
blocks.addIndexBlock(index, IndexMetaData.INDEX_READ_BLOCK);
} else {
blocks.removeIndexBlock(index, IndexMetaData.INDEX_READ_BLOCK);
}
}
}
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_BLOCK, IndexMetaData.INDEX_READ_ONLY_SETTING, openSettings);
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_METADATA_BLOCK, IndexMetaData.INDEX_BLOCKS_METADATA_SETTING, openSettings);
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_WRITE_BLOCK, IndexMetaData.INDEX_BLOCKS_WRITE_SETTING, openSettings);
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_SETTING, openSettings);
if (!openIndices.isEmpty()) {
String[] indices = openIndices.toArray(new String[openIndices.size()]);
metaDataBuilder.updateSettings(openSettings, indices);
for (String index : openIndices) {
IndexMetaData indexMetaData = metaDataBuilder.get(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}
Settings.Builder updates = Settings.builder();
Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings());
if (indexScopedSettings.updateDynamicSettings(openSettings, indexSettings, updates, index)) {
metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(indexSettings));
}
}
}
if (!closeIndices.isEmpty()) {
String[] indices = closeIndices.toArray(new String[closeIndices.size()]);
metaDataBuilder.updateSettings(closeSettings, indices);
for (String index : closeIndices) {
IndexMetaData indexMetaData = metaDataBuilder.get(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}
Settings.Builder updates = Settings.builder();
Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings());
if (indexScopedSettings.updateSettings(closedSettings, indexSettings, updates, index)) {
metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(indexSettings));
}
}
}
@ -326,12 +265,34 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
// now, reroute in case things change that require it (like number of replicas)
RoutingAllocation.Result routingResult = allocationService.reroute(updatedState, "settings update");
updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
for (String index : openIndices) {
indexScopedSettings.dryRun(updatedState.metaData().index(index).getSettings());
}
for (String index : closeIndices) {
indexScopedSettings.dryRun(updatedState.metaData().index(index).getSettings());
}
return updatedState;
}
});
}
/**
* Updates the cluster block only iff the setting exists in the given settings
*/
private static void maybeUpdateClusterBlock(String[] actualIndices, ClusterBlocks.Builder blocks, ClusterBlock block, Setting<Boolean> setting, Settings openSettings) {
if (setting.exists(openSettings)) {
final boolean updateReadBlock = setting.get(openSettings);
for (String index : actualIndices) {
if (updateReadBlock) {
blocks.addIndexBlock(index, block);
} else {
blocks.removeIndexBlock(index, block);
}
}
}
}
public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {

View File

@ -319,7 +319,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
throw new IllegalArgumentException("resolved [" + node + "] into [" + resolvedNodeIds.length + "] nodes, where expected to be resolved to a single node");
}
if (resolvedNodeIds.length == 0) {
throw new IllegalArgumentException("failed to resolve [" + node + " ], no matching nodes");
throw new IllegalArgumentException("failed to resolve [" + node + "], no matching nodes");
}
return nodes.get(resolvedNodeIds[0]);
}

View File

@ -114,6 +114,16 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
return shard;
}
/**
* All shards for the provided {@link ShardId}
* @return All the shard routing entries for the given index and shard id
* @throws IndexNotFoundException if provided index does not exist
* @throws ShardNotFoundException if provided shard id is unknown
*/
public IndexShardRoutingTable shardRoutingTable(ShardId shardId) {
return shardRoutingTable(shardId.getIndex(), shardId.getId());
}
public RoutingTable validateRaiseException(MetaData metaData) throws RoutingValidationException {
RoutingTableValidation validation = validate(metaData);
if (!validation.valid()) {

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
@ -41,10 +42,10 @@ import java.io.IOException;
public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime");
public static final String INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = "index.unassigned.node_left.delayed_timeout";
private static final TimeValue DEFAULT_DELAYED_NODE_LEFT_TIMEOUT = TimeValue.timeValueMinutes(1);
public static final Setting<TimeValue> INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, true, Setting.Scope.INDEX);
/**
* Reason why the shard is in unassigned state.
* <p>
@ -215,7 +216,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
if (reason != Reason.NODE_LEFT) {
return 0;
}
TimeValue delayTimeout = indexSettings.getAsTime(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, settings.getAsTime(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, DEFAULT_DELAYED_NODE_LEFT_TIMEOUT));
TimeValue delayTimeout = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings, settings);
return Math.max(0l, delayTimeout.nanos());
}

View File

@ -32,7 +32,7 @@ import org.elasticsearch.gateway.GatewayAllocator;
/**
* The {@link ShardsAllocator} class offers methods for allocating shard within a cluster.
* These methods include moving shards and re-balancing the cluster. It also allows management
* of shards by their state.
* of shards by their state.
*/
public class ShardsAllocators extends AbstractComponent implements ShardsAllocator {

View File

@ -0,0 +1,240 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation.command;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.StreamableReader;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
import java.util.function.Consumer;
/**
* Abstract base class for allocating an unassigned shard to a node
*/
public abstract class AbstractAllocateAllocationCommand implements AllocationCommand, ToXContent {
private static final String INDEX_KEY = "index";
private static final String SHARD_KEY = "shard";
private static final String NODE_KEY = "node";
protected static <T extends Builder> ObjectParser<T, Void> createAllocateParser(String command) {
ObjectParser<T, Void> parser = new ObjectParser<>(command);
parser.declareString(Builder::setIndex, new ParseField(INDEX_KEY));
parser.declareInt(Builder::setShard, new ParseField(SHARD_KEY));
parser.declareString(Builder::setNode, new ParseField(NODE_KEY));
return parser;
}
protected static abstract class Builder<T extends AbstractAllocateAllocationCommand> implements StreamableReader<Builder<T>> {
protected String index;
protected int shard = -1;
protected String node;
public void setIndex(String index) {
this.index = index;
}
public void setShard(int shard) {
this.shard = shard;
}
public void setNode(String node) {
this.node = node;
}
@Override
public Builder<T> readFrom(StreamInput in) throws IOException {
index = in.readString();
shard = in.readVInt();
node = in.readString();
return this;
}
public abstract Builder<T> parse(XContentParser parser) throws IOException;
public abstract T build();
protected void validate() {
if (index == null) {
throw new IllegalArgumentException("Argument [index] must be defined");
}
if (shard < 0) {
throw new IllegalArgumentException("Argument [shard] must be defined and non-negative");
}
if (node == null) {
throw new IllegalArgumentException("Argument [node] must be defined");
}
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.field(INDEX_KEY, shardId().index().name());
builder.field(SHARD_KEY, shardId().id());
builder.field(NODE_KEY, node());
return builder;
}
public void writeTo(StreamOutput out) throws IOException {
out.writeString(shardId.getIndex());
out.writeVInt(shardId.getId());
out.writeString(node);
}
public static abstract class Factory<T extends AbstractAllocateAllocationCommand> implements AllocationCommand.Factory<T> {
protected abstract Builder<T> newBuilder();
@Override
public T readFrom(StreamInput in) throws IOException {
return newBuilder().readFrom(in).build();
}
@Override
public void writeTo(T command, StreamOutput out) throws IOException {
command.writeTo(out);
}
@Override
public T fromXContent(XContentParser parser) throws IOException {
return newBuilder().parse(parser).build();
}
@Override
public void toXContent(T command, XContentBuilder builder, ToXContent.Params params, String objectName) throws IOException {
if (objectName == null) {
builder.startObject();
} else {
builder.startObject(objectName);
}
builder.endObject();
}
}
protected final ShardId shardId;
protected final String node;
protected AbstractAllocateAllocationCommand(ShardId shardId, String node) {
this.shardId = shardId;
this.node = node;
}
/**
* Get the shard id
*
* @return id of the shard
*/
public ShardId shardId() {
return this.shardId;
}
/**
* Get the id of the node
*
* @return id of the node
*/
public String node() {
return this.node;
}
/**
* Handle case where a disco node cannot be found in the routing table. Usually means that it's not a data node.
*/
protected RerouteExplanation explainOrThrowMissingRoutingNode(RoutingAllocation allocation, boolean explain, DiscoveryNode discoNode) {
if (!discoNode.dataNode()) {
return explainOrThrowRejectedCommand(explain, allocation, "allocation can only be done on data nodes, not [" + node + "]");
} else {
return explainOrThrowRejectedCommand(explain, allocation, "could not find [" + node + "] among the routing nodes");
}
}
/**
* Utility method for rejecting the current allocation command based on provided reason
*/
protected RerouteExplanation explainOrThrowRejectedCommand(boolean explain, RoutingAllocation allocation, String reason) {
if (explain) {
return new RerouteExplanation(this, allocation.decision(Decision.NO, name() + " (allocation command)", reason));
}
throw new IllegalArgumentException("[" + name() + "] " + reason);
}
/**
* Utility method for rejecting the current allocation command based on provided exception
*/
protected RerouteExplanation explainOrThrowRejectedCommand(boolean explain, RoutingAllocation allocation, RuntimeException rte) {
if (explain) {
return new RerouteExplanation(this, allocation.decision(Decision.NO, name() + " (allocation command)", rte.getMessage()));
}
throw rte;
}
/**
* Initializes an unassigned shard on a node and removes it from the unassigned
*
* @param allocation the allocation
* @param routingNodes the routing nodes
* @param routingNode the node to initialize it to
* @param shardRouting the shard routing that is to be matched in unassigned shards
*/
protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, RoutingNode routingNode, ShardRouting shardRouting) {
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, null);
}
/**
* Initializes an unassigned shard on a node and removes it from the unassigned
*
* @param allocation the allocation
* @param routingNodes the routing nodes
* @param routingNode the node to initialize it to
* @param shardRouting the shard routing that is to be matched in unassigned shards
* @param shardRoutingChanges changes to apply for shard routing in unassigned shards before initialization
*/
protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, RoutingNode routingNode,
ShardRouting shardRouting, @Nullable Consumer<ShardRouting> shardRoutingChanges) {
for (RoutingNodes.UnassignedShards.UnassignedIterator it = routingNodes.unassigned().iterator(); it.hasNext(); ) {
ShardRouting unassigned = it.next();
if (!unassigned.equalsIgnoringMetaData(shardRouting)) {
continue;
}
if (shardRoutingChanges != null) {
shardRoutingChanges.accept(unassigned);
}
it.initialize(routingNode.nodeId(), unassigned.version(),
allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
return;
}
assert false : "shard to initialize not found in list of unassigned shards";
}
}

View File

@ -1,240 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation.command;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
/**
* Allocates an unassigned shard to a specific node. Note, primary allocation will "force"
* allocation which might mean one will loose data if using local gateway..., use with care
* with the <tt>allowPrimary</tt> flag.
*/
public class AllocateAllocationCommand implements AllocationCommand {
public static final String NAME = "allocate";
public static class Factory implements AllocationCommand.Factory<AllocateAllocationCommand> {
@Override
public AllocateAllocationCommand readFrom(StreamInput in) throws IOException {
return new AllocateAllocationCommand(ShardId.readShardId(in), in.readString(), in.readBoolean());
}
@Override
public void writeTo(AllocateAllocationCommand command, StreamOutput out) throws IOException {
command.shardId().writeTo(out);
out.writeString(command.node());
out.writeBoolean(command.allowPrimary());
}
@Override
public AllocateAllocationCommand fromXContent(XContentParser parser) throws IOException {
String index = null;
int shardId = -1;
String nodeId = null;
boolean allowPrimary = false;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if ("index".equals(currentFieldName)) {
index = parser.text();
} else if ("shard".equals(currentFieldName)) {
shardId = parser.intValue();
} else if ("node".equals(currentFieldName)) {
nodeId = parser.text();
} else if ("allow_primary".equals(currentFieldName) || "allowPrimary".equals(currentFieldName)) {
allowPrimary = parser.booleanValue();
} else {
throw new ElasticsearchParseException("[{}] command does not support field [{}]", NAME, currentFieldName);
}
} else {
throw new ElasticsearchParseException("[{}] command does not support complex json tokens [{}]", NAME, token);
}
}
if (index == null) {
throw new ElasticsearchParseException("[{}] command missing the index parameter", NAME);
}
if (shardId == -1) {
throw new ElasticsearchParseException("[{}] command missing the shard parameter", NAME);
}
if (nodeId == null) {
throw new ElasticsearchParseException("[{}] command missing the node parameter", NAME);
}
return new AllocateAllocationCommand(new ShardId(index, shardId), nodeId, allowPrimary);
}
@Override
public void toXContent(AllocateAllocationCommand command, XContentBuilder builder, ToXContent.Params params, String objectName) throws IOException {
if (objectName == null) {
builder.startObject();
} else {
builder.startObject(objectName);
}
builder.field("index", command.shardId().index().name());
builder.field("shard", command.shardId().id());
builder.field("node", command.node());
builder.field("allow_primary", command.allowPrimary());
builder.endObject();
}
}
private final ShardId shardId;
private final String node;
private final boolean allowPrimary;
/**
* Create a new {@link AllocateAllocationCommand}
*
* @param shardId {@link ShardId} of the shrad to assign
* @param node Node to assign the shard to
* @param allowPrimary should the node be allow to allocate the shard as primary
*/
public AllocateAllocationCommand(ShardId shardId, String node, boolean allowPrimary) {
this.shardId = shardId;
this.node = node;
this.allowPrimary = allowPrimary;
}
@Override
public String name() {
return NAME;
}
/**
* Get the shards id
*
* @return id of the shard
*/
public ShardId shardId() {
return this.shardId;
}
/**
* Get the id of the Node
*
* @return id of the Node
*/
public String node() {
return this.node;
}
/**
* Determine if primary allocation is allowed
*
* @return <code>true</code> if primary allocation is allowed. Otherwise <code>false</code>
*/
public boolean allowPrimary() {
return this.allowPrimary;
}
@Override
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
final DiscoveryNode discoNode = allocation.nodes().resolveNode(node);
final RoutingNodes routingNodes = allocation.routingNodes();
ShardRouting shardRouting = null;
for (ShardRouting routing : routingNodes.unassigned()) {
if (routing.shardId().equals(shardId)) {
// prefer primaries first to allocate
if (shardRouting == null || routing.primary()) {
shardRouting = routing;
}
}
}
if (shardRouting == null) {
if (explain) {
return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command",
"failed to find " + shardId + " on the list of unassigned shards"));
}
throw new IllegalArgumentException("[allocate] failed to find " + shardId + " on the list of unassigned shards");
}
if (shardRouting.primary() && !allowPrimary) {
if (explain) {
return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command",
"trying to allocate a primary shard " + shardId + ", which is disabled"));
}
throw new IllegalArgumentException("[allocate] trying to allocate a primary shard " + shardId + ", which is disabled");
}
RoutingNode routingNode = routingNodes.node(discoNode.id());
if (routingNode == null) {
if (!discoNode.dataNode()) {
if (explain) {
return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command",
"Allocation can only be done on data nodes, not [" + node + "]"));
}
throw new IllegalArgumentException("Allocation can only be done on data nodes, not [" + node + "]");
} else {
if (explain) {
return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command",
"Could not find [" + node + "] among the routing nodes"));
}
throw new IllegalStateException("Could not find [" + node + "] among the routing nodes");
}
}
Decision decision = allocation.deciders().canAllocate(shardRouting, routingNode, allocation);
if (decision.type() == Decision.Type.NO) {
if (explain) {
return new RerouteExplanation(this, decision);
}
throw new IllegalArgumentException("[allocate] allocation of " + shardId + " on node " + discoNode + " is not allowed, reason: " + decision);
}
// go over and remove it from the unassigned
for (RoutingNodes.UnassignedShards.UnassignedIterator it = routingNodes.unassigned().iterator(); it.hasNext(); ) {
ShardRouting unassigned = it.next();
if (unassigned != shardRouting) {
continue;
}
// if we force allocation of a primary, we need to move the unassigned info back to treat it as if
// it was index creation
if (unassigned.primary() && unassigned.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED) {
unassigned.updateUnassignedInfo(new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
"force allocation from previous reason " + unassigned.unassignedInfo().getReason() + ", " + unassigned.unassignedInfo().getMessage(),
unassigned.unassignedInfo().getFailure(), System.nanoTime(), System.currentTimeMillis()));
}
it.initialize(routingNode.nodeId(), unassigned.version(), allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
break;
}
return new RerouteExplanation(this, decision);
}
}

View File

@ -0,0 +1,125 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation.command;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import java.io.IOException;
/**
* Allocates an unassigned empty primary shard to a specific node. Use with extreme care as this will result in data loss.
* Allocation deciders are ignored.
*/
public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocationCommand {
public static final String NAME = "allocate_empty_primary";
private static final ObjectParser<Builder, Void> EMPTY_PRIMARY_PARSER = BasePrimaryAllocationCommand.createAllocatePrimaryParser(NAME);
/**
* Creates a new {@link AllocateEmptyPrimaryAllocationCommand}
*
* @param shardId {@link ShardId} of the shard to assign
* @param node node id of the node to assign the shard to
* @param acceptDataLoss whether the user agrees to data loss
*/
public AllocateEmptyPrimaryAllocationCommand(ShardId shardId, String node, boolean acceptDataLoss) {
super(shardId, node, acceptDataLoss);
}
@Override
public String name() {
return NAME;
}
public static class Builder extends BasePrimaryAllocationCommand.Builder<AllocateEmptyPrimaryAllocationCommand> {
@Override
public Builder parse(XContentParser parser) throws IOException {
return EMPTY_PRIMARY_PARSER.parse(parser, this);
}
@Override
public AllocateEmptyPrimaryAllocationCommand build() {
validate();
return new AllocateEmptyPrimaryAllocationCommand(new ShardId(index, shard), node, acceptDataLoss);
}
}
public static class Factory extends AbstractAllocateAllocationCommand.Factory<AllocateEmptyPrimaryAllocationCommand> {
@Override
protected Builder newBuilder() {
return new Builder();
}
}
@Override
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
final DiscoveryNode discoNode;
try {
discoNode = allocation.nodes().resolveNode(node);
} catch (IllegalArgumentException e) {
return explainOrThrowRejectedCommand(explain, allocation, e);
}
final RoutingNodes routingNodes = allocation.routingNodes();
RoutingNode routingNode = routingNodes.node(discoNode.id());
if (routingNode == null) {
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
}
final ShardRouting shardRouting;
try {
shardRouting = allocation.routingTable().shardRoutingTable(shardId).primaryShard();
} catch (IndexNotFoundException | ShardNotFoundException e) {
return explainOrThrowRejectedCommand(explain, allocation, e);
}
if (shardRouting.unassigned() == false) {
return explainOrThrowRejectedCommand(explain, allocation, "primary " + shardId + " is already assigned");
}
if (shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && acceptDataLoss == false) {
return explainOrThrowRejectedCommand(explain, allocation,
"allocating an empty primary for " + shardId + " can result in data loss. Please confirm by setting the accept_data_loss parameter to true");
}
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting,
shr -> {
if (shr.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED) {
// we need to move the unassigned info back to treat it as if it was index creation
shr.updateUnassignedInfo(new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
"force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage(),
shardRouting.unassignedInfo().getFailure(), System.nanoTime(), System.currentTimeMillis()));
}
});
return new RerouteExplanation(this, allocation.decision(Decision.YES, name() + " (allocation command)", "ignore deciders"));
}
}

View File

@ -0,0 +1,131 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation.command;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import java.io.IOException;
import java.util.List;
/**
* Allocates an unassigned replica shard to a specific node. Checks if allocation deciders allow allocation.
*/
public class AllocateReplicaAllocationCommand extends AbstractAllocateAllocationCommand {
public static final String NAME = "allocate_replica";
private static final ObjectParser<AllocateReplicaAllocationCommand.Builder, Void> REPLICA_PARSER = createAllocateParser(NAME);
/**
* Creates a new {@link AllocateReplicaAllocationCommand}
*
* @param shardId {@link ShardId} of the shard to assign
* @param node node id of the node to assign the shard to
*/
public AllocateReplicaAllocationCommand(ShardId shardId, String node) {
super(shardId, node);
}
@Override
public String name() {
return NAME;
}
protected static class Builder extends AbstractAllocateAllocationCommand.Builder<AllocateReplicaAllocationCommand> {
@Override
public Builder parse(XContentParser parser) throws IOException {
return REPLICA_PARSER.parse(parser, this);
}
@Override
public AllocateReplicaAllocationCommand build() {
validate();
return new AllocateReplicaAllocationCommand(new ShardId(index, shard), node);
}
}
public static class Factory extends AbstractAllocateAllocationCommand.Factory<AllocateReplicaAllocationCommand> {
@Override
protected Builder newBuilder() {
return new Builder();
}
}
@Override
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
final DiscoveryNode discoNode;
try {
discoNode = allocation.nodes().resolveNode(node);
} catch (IllegalArgumentException e) {
return explainOrThrowRejectedCommand(explain, allocation, e);
}
final RoutingNodes routingNodes = allocation.routingNodes();
RoutingNode routingNode = routingNodes.node(discoNode.id());
if (routingNode == null) {
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
}
final ShardRouting primaryShardRouting;
try {
primaryShardRouting = allocation.routingTable().shardRoutingTable(shardId).primaryShard();
} catch (IndexNotFoundException | ShardNotFoundException e) {
return explainOrThrowRejectedCommand(explain, allocation, e);
}
if (primaryShardRouting.unassigned()) {
return explainOrThrowRejectedCommand(explain, allocation,
"trying to allocate a replica shard " + shardId + ", while corresponding primary shard is still unassigned");
}
List<ShardRouting> replicaShardRoutings = allocation.routingTable().shardRoutingTable(shardId).replicaShardsWithState(ShardRoutingState.UNASSIGNED);
ShardRouting shardRouting;
if (replicaShardRoutings.isEmpty()) {
return explainOrThrowRejectedCommand(explain, allocation,
"all copies of " + shardId +" are already assigned. Use the move allocation command instead");
} else {
shardRouting = replicaShardRoutings.get(0);
}
Decision decision = allocation.deciders().canAllocate(shardRouting, routingNode, allocation);
if (decision.type() == Decision.Type.NO) {
// don't use explainOrThrowRejectedCommand to keep the original "NO" decision
if (explain) {
return new RerouteExplanation(this, decision);
}
throw new IllegalArgumentException("[" + name() + "] allocation of " + shardId + " on node " + discoNode + " is not allowed, reason: " + decision);
}
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting);
return new RerouteExplanation(this, decision);
}
}

View File

@ -0,0 +1,124 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation.command;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import java.io.IOException;
/**
* Allocates an unassigned stale primary shard to a specific node. Use with extreme care as this will result in data loss.
* Allocation deciders are ignored.
*/
public class AllocateStalePrimaryAllocationCommand extends BasePrimaryAllocationCommand {
public static final String NAME = "allocate_stale_primary";
private static final ObjectParser<Builder, Void> STALE_PRIMARY_PARSER = BasePrimaryAllocationCommand.createAllocatePrimaryParser(NAME);
/**
* Creates a new {@link AllocateStalePrimaryAllocationCommand}
*
* @param shardId {@link ShardId} of the shard to assign
* @param node node id of the node to assign the shard to
* @param acceptDataLoss whether the user agrees to data loss
*/
public AllocateStalePrimaryAllocationCommand(ShardId shardId, String node, boolean acceptDataLoss) {
super(shardId, node, acceptDataLoss);
}
@Override
public String name() {
return NAME;
}
public static class Builder extends BasePrimaryAllocationCommand.Builder<AllocateStalePrimaryAllocationCommand> {
@Override
public Builder parse(XContentParser parser) throws IOException {
return STALE_PRIMARY_PARSER.parse(parser, this);
}
@Override
public AllocateStalePrimaryAllocationCommand build() {
validate();
return new AllocateStalePrimaryAllocationCommand(new ShardId(index, shard), node, acceptDataLoss);
}
}
public static class Factory extends AbstractAllocateAllocationCommand.Factory<AllocateStalePrimaryAllocationCommand> {
@Override
protected Builder newBuilder() {
return new Builder();
}
}
@Override
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
final DiscoveryNode discoNode;
try {
discoNode = allocation.nodes().resolveNode(node);
} catch (IllegalArgumentException e) {
return explainOrThrowRejectedCommand(explain, allocation, e);
}
final RoutingNodes routingNodes = allocation.routingNodes();
RoutingNode routingNode = routingNodes.node(discoNode.id());
if (routingNode == null) {
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
}
final ShardRouting shardRouting;
try {
shardRouting = allocation.routingTable().shardRoutingTable(shardId).primaryShard();
} catch (IndexNotFoundException | ShardNotFoundException e) {
return explainOrThrowRejectedCommand(explain, allocation, e);
}
if (shardRouting.unassigned() == false) {
return explainOrThrowRejectedCommand(explain, allocation, "primary " + shardId + " is already assigned");
}
if (acceptDataLoss == false) {
return explainOrThrowRejectedCommand(explain, allocation,
"allocating an empty primary for " + shardId + " can result in data loss. Please confirm by setting the accept_data_loss parameter to true");
}
final IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
if (shardRouting.allocatedPostIndexCreate(indexMetaData) == false) {
return explainOrThrowRejectedCommand(explain, allocation,
"trying to allocate an existing primary shard " + shardId + ", while no such shard has ever been active");
}
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting);
return new RerouteExplanation(this, allocation.decision(Decision.YES, name() + " (allocation command)", "ignore deciders"));
}
}

View File

@ -67,7 +67,9 @@ public class AllocationCommands {
}
static {
registerFactory(AllocateAllocationCommand.NAME, new AllocateAllocationCommand.Factory());
registerFactory(AllocateEmptyPrimaryAllocationCommand.NAME, new AllocateEmptyPrimaryAllocationCommand.Factory());
registerFactory(AllocateStalePrimaryAllocationCommand.NAME, new AllocateStalePrimaryAllocationCommand.Factory());
registerFactory(AllocateReplicaAllocationCommand.NAME, new AllocateReplicaAllocationCommand.Factory());
registerFactory(CancelAllocationCommand.NAME, new CancelAllocationCommand.Factory());
registerFactory(MoveAllocationCommand.NAME, new MoveAllocationCommand.Factory());
}
@ -76,7 +78,7 @@ public class AllocationCommands {
/**
* Creates a new set of {@link AllocationCommands}
*
*
* @param commands {@link AllocationCommand}s that are wrapped by this instance
*/
public AllocationCommands(AllocationCommand... commands) {
@ -122,7 +124,7 @@ public class AllocationCommands {
* Reads a {@link AllocationCommands} from a {@link StreamInput}
* @param in {@link StreamInput} to read from
* @return {@link AllocationCommands} read
*
*
* @throws IOException if something happens during read
*/
public static AllocationCommands readFrom(StreamInput in) throws IOException {
@ -137,7 +139,7 @@ public class AllocationCommands {
/**
* Writes {@link AllocationCommands} to a {@link StreamOutput}
*
*
* @param commands Commands to write
* @param out {@link StreamOutput} to write the commands to
* @throws IOException if something happens during write
@ -149,7 +151,7 @@ public class AllocationCommands {
lookupFactorySafe(command.name()).writeTo(command, out);
}
}
/**
* Reads {@link AllocationCommands} from a {@link XContentParser}
* <pre>
@ -161,7 +163,7 @@ public class AllocationCommands {
* </pre>
* @param parser {@link XContentParser} to read the commands from
* @return {@link AllocationCommands} read
* @throws IOException if something bad happens while reading the stream
* @throws IOException if something bad happens while reading the stream
*/
public static AllocationCommands fromXContent(XContentParser parser) throws IOException {
AllocationCommands commands = new AllocationCommands();
@ -203,10 +205,10 @@ public class AllocationCommands {
}
return commands;
}
/**
* Writes {@link AllocationCommands} to a {@link XContentBuilder}
*
*
* @param commands {@link AllocationCommands} to write
* @param builder {@link XContentBuilder} to use
* @param params Parameters to use for building

View File

@ -0,0 +1,88 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation.command;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
/**
* Abstract base class for allocating an unassigned primary shard to a node
*/
public abstract class BasePrimaryAllocationCommand extends AbstractAllocateAllocationCommand {
private static final String ACCEPT_DATA_LOSS_KEY = "accept_data_loss";
protected static <T extends Builder> ObjectParser<T, Void> createAllocatePrimaryParser(String command) {
ObjectParser<T, Void> parser = AbstractAllocateAllocationCommand.createAllocateParser(command);
parser.declareBoolean(Builder::setAcceptDataLoss, new ParseField(ACCEPT_DATA_LOSS_KEY));
return parser;
}
protected final boolean acceptDataLoss;
protected BasePrimaryAllocationCommand(ShardId shardId, String node, boolean acceptDataLoss) {
super(shardId, node);
this.acceptDataLoss = acceptDataLoss;
}
/**
* The operation only executes if the user explicitly agrees to possible data loss
*
* @return whether data loss is acceptable
*/
public boolean acceptDataLoss() {
return acceptDataLoss;
}
protected static abstract class Builder<T extends BasePrimaryAllocationCommand> extends AbstractAllocateAllocationCommand.Builder<T> {
protected boolean acceptDataLoss;
public void setAcceptDataLoss(boolean acceptDataLoss) {
this.acceptDataLoss = acceptDataLoss;
}
@Override
public Builder readFrom(StreamInput in) throws IOException {
super.readFrom(in);
acceptDataLoss = in.readBoolean();
return this;
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
super.toXContent(builder, params);
builder.field(ACCEPT_DATA_LOSS_KEY, acceptDataLoss);
return builder;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(acceptDataLoss);
}
}

View File

@ -125,7 +125,7 @@ public class CancelAllocationCommand implements AllocationCommand {
/**
* Creates a new {@link CancelAllocationCommand}
*
*
* @param shardId id of the shard which allocation should be canceled
* @param node id of the node that manages the shard which allocation should be canceled
*/

View File

@ -32,7 +32,7 @@ import java.util.Locale;
/**
* This allocation decider allows shard allocations / rebalancing via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} /
* {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE} / {@link #INDEX_ROUTING_REBALANCE_ENABLE}.
* {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link #INDEX_ROUTING_REBALANCE_ENABLE_SETTING}.
* The per index settings overrides the cluster wide setting.
*
* <p>
@ -61,10 +61,10 @@ public class EnableAllocationDecider extends AllocationDecider {
public static final String NAME = "enable";
public static final Setting<Allocation> CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.CLUSTER);
public static final String INDEX_ROUTING_ALLOCATION_ENABLE= "index.routing.allocation.enable";
public static final Setting<Allocation> INDEX_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("index.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.INDEX);
public static final Setting<Rebalance> CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.CLUSTER);
public static final String INDEX_ROUTING_REBALANCE_ENABLE = "index.routing.rebalance.enable";
public static final Setting<Rebalance> INDEX_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("index.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.INDEX);
private volatile Rebalance enableRebalance;
private volatile Allocation enableAllocation;
@ -92,11 +92,10 @@ public class EnableAllocationDecider extends AllocationDecider {
return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored");
}
IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
String enableIndexValue = indexMetaData.getSettings().get(INDEX_ROUTING_ALLOCATION_ENABLE);
final IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
final Allocation enable;
if (enableIndexValue != null) {
enable = Allocation.parse(enableIndexValue);
if (INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.exists(indexMetaData.getSettings())) {
enable = INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.get(indexMetaData.getSettings());
} else {
enable = this.enableAllocation;
}
@ -129,10 +128,9 @@ public class EnableAllocationDecider extends AllocationDecider {
}
Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings();
String enableIndexValue = indexSettings.get(INDEX_ROUTING_REBALANCE_ENABLE);
final Rebalance enable;
if (enableIndexValue != null) {
enable = Rebalance.parse(enableIndexValue);
if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) {
enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings);
} else {
enable = this.enableRebalance;
}
@ -160,7 +158,7 @@ public class EnableAllocationDecider extends AllocationDecider {
/**
* Allocation values or rather their string representation to be used used with
* {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE}
* {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE_SETTING}
* via cluster / index settings.
*/
public enum Allocation {
@ -186,7 +184,7 @@ public class EnableAllocationDecider extends AllocationDecider {
/**
* Rebalance values or rather their string representation to be used used with
* {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE}
* {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE_SETTING}
* via cluster / index settings.
*/
public enum Rebalance {

View File

@ -60,10 +60,6 @@ public class FilterAllocationDecider extends AllocationDecider {
public static final String NAME = "filter";
public static final String INDEX_ROUTING_REQUIRE_GROUP = "index.routing.allocation.require.";
public static final String INDEX_ROUTING_INCLUDE_GROUP = "index.routing.allocation.include.";
public static final String INDEX_ROUTING_EXCLUDE_GROUP = "index.routing.allocation.exclude.";
public static final Setting<Settings> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.CLUSTER);
public static final Setting<Settings> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.CLUSTER);
public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.CLUSTER);

View File

@ -32,12 +32,12 @@ import org.elasticsearch.common.settings.Settings;
/**
* This {@link AllocationDecider} limits the number of shards per node on a per
* index or node-wide basis. The allocator prevents a single node to hold more
* than {@value #INDEX_TOTAL_SHARDS_PER_NODE} per index and
* than <tt>index.routing.allocation.total_shards_per_node</tt> per index and
* <tt>cluster.routing.allocation.total_shards_per_node</tt> globally during the allocation
* process. The limits of this decider can be changed in real-time via a the
* index settings API.
* <p>
* If {@value #INDEX_TOTAL_SHARDS_PER_NODE} is reset to a negative value shards
* If <tt>index.routing.allocation.total_shards_per_node</tt> is reset to a negative value shards
* per index are unlimited per node. Shards currently in the
* {@link ShardRoutingState#RELOCATING relocating} state are ignored by this
* {@link AllocationDecider} until the shard changed its state to either
@ -59,12 +59,13 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
* Controls the maximum number of shards per index on a single Elasticsearch
* node. Negative values are interpreted as unlimited.
*/
public static final String INDEX_TOTAL_SHARDS_PER_NODE = "index.routing.allocation.total_shards_per_node";
public static final Setting<Integer> INDEX_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("index.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.INDEX);
/**
* Controls the maximum number of shards per node on a global level.
* Negative values are interpreted as unlimited.
*/
public static final Setting<Integer> CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, true, Setting.Scope.CLUSTER);
public static final Setting<Integer> CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.CLUSTER);
@Inject
@ -81,7 +82,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
int indexShardLimit = indexMd.getSettings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
// Capture the limit here in case it changes during this method's
// execution
final int clusterShardLimit = this.clusterShardLimit;
@ -118,7 +119,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
@Override
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
int indexShardLimit = indexMd.getSettings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
// Capture the limit here in case it changes during this method's
// execution
final int clusterShardLimit = this.clusterShardLimit;

View File

@ -630,7 +630,11 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
}
executor.clusterStatePublished(newClusterState);
try {
executor.clusterStatePublished(newClusterState);
} catch (Exception e) {
logger.error("exception thrown while notifying executor of new cluster state publication [{}]", e, source);
}
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());

View File

@ -1,74 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.settings;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.regex.Regex;
/**
* A container for setting names and validation methods for those settings.
*/
public class DynamicSettings {
private final ImmutableOpenMap<String, Validator> dynamicSettings;
public static class Builder {
private ImmutableOpenMap.Builder<String, Validator> settings = ImmutableOpenMap.builder();
public void addSetting(String setting, Validator validator) {
Validator old = settings.put(setting, validator);
if (old != null) {
throw new IllegalArgumentException("Cannot register setting [" + setting + "] twice");
}
}
public DynamicSettings build() {
return new DynamicSettings(settings.build());
}
}
private DynamicSettings(ImmutableOpenMap<String, Validator> settings) {
this.dynamicSettings = settings;
}
public boolean isDynamicOrLoggingSetting(String key) {
return hasDynamicSetting(key) || key.startsWith("logger.");
}
public boolean hasDynamicSetting(String key) {
for (ObjectCursor<String> dynamicSetting : dynamicSettings.keys()) {
if (Regex.simpleMatch(dynamicSetting.value, key)) {
return true;
}
}
return false;
}
public String validateDynamicSetting(String dynamicSetting, String value, ClusterState clusterState) {
for (ObjectObjectCursor<String, Validator> setting : dynamicSettings) {
if (Regex.simpleMatch(setting.key, dynamicSetting)) {
return setting.value.validate(dynamicSetting, value, clusterState);
}
}
return null;
}
}

View File

@ -1,307 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.settings;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue;
import static org.elasticsearch.common.unit.MemorySizeValue.parseBytesSizeValueOrHeapRatio;
/**
* Validates a setting, returning a failure message if applicable.
*/
public interface Validator {
String validate(String setting, String value, ClusterState clusterState);
Validator EMPTY = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
return null;
}
};
Validator TIME = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
if (value == null) {
throw new NullPointerException("value must not be null");
}
try {
// This never returns null:
TimeValue.parseTimeValue(value, null, setting);
} catch (ElasticsearchParseException ex) {
return ex.getMessage();
}
return null;
}
};
Validator TIMEOUT = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
if (value == null) {
throw new NullPointerException("value must not be null");
}
TimeValue timeValue = TimeValue.parseTimeValue(value, null, setting);
assert timeValue != null;
if (timeValue.millis() < 0 && timeValue.millis() != -1) {
return "cannot parse value [" + value + "] as a timeout";
}
} catch (ElasticsearchParseException ex) {
return ex.getMessage();
}
return null;
}
};
Validator TIME_NON_NEGATIVE = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
if (value == null) {
throw new NullPointerException("value must not be null");
}
TimeValue timeValue = TimeValue.parseTimeValue(value, null, setting);
assert timeValue != null;
if (timeValue.millis() < 0) {
return "cannot parse value [" + value + "] as non negative time";
}
} catch (ElasticsearchParseException ex) {
return ex.getMessage();
}
return null;
}
};
Validator FLOAT = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
Float.parseFloat(value);
} catch (NumberFormatException ex) {
return "cannot parse value [" + value + "] as a float";
}
return null;
}
};
Validator NON_NEGATIVE_FLOAT = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
if (Float.parseFloat(value) < 0.0) {
return "the value of the setting " + setting + " must be a non negative float";
}
} catch (NumberFormatException ex) {
return "cannot parse value [" + value + "] as a double";
}
return null;
}
};
Validator DOUBLE = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
Double.parseDouble(value);
} catch (NumberFormatException ex) {
return "cannot parse value [" + value + "] as a double";
}
return null;
}
};
Validator NON_NEGATIVE_DOUBLE = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
if (Double.parseDouble(value) < 0.0) {
return "the value of the setting " + setting + " must be a non negative double";
}
} catch (NumberFormatException ex) {
return "cannot parse value [" + value + "] as a double";
}
return null;
}
};
Validator DOUBLE_GTE_2 = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
if (Double.parseDouble(value) < 2.0) {
return "the value of the setting " + setting + " must be >= 2.0";
}
} catch (NumberFormatException ex) {
return "cannot parse value [" + value + "] as a double";
}
return null;
}
};
Validator INTEGER = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
Integer.parseInt(value);
} catch (NumberFormatException ex) {
return "cannot parse value [" + value + "] as an integer";
}
return null;
}
};
Validator POSITIVE_INTEGER = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
if (Integer.parseInt(value) <= 0) {
return "the value of the setting " + setting + " must be a positive integer";
}
} catch (NumberFormatException ex) {
return "cannot parse value [" + value + "] as an integer";
}
return null;
}
};
Validator NON_NEGATIVE_INTEGER = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
if (Integer.parseInt(value) < 0) {
return "the value of the setting " + setting + " must be a non negative integer";
}
} catch (NumberFormatException ex) {
return "cannot parse value [" + value + "] as an integer";
}
return null;
}
};
Validator INTEGER_GTE_2 = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
if (Integer.parseInt(value) < 2) {
return "the value of the setting " + setting + " must be >= 2";
}
} catch (NumberFormatException ex) {
return "cannot parse value [" + value + "] as an integer";
}
return null;
}
};
Validator BYTES_SIZE = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
parseBytesSizeValue(value, setting);
} catch (ElasticsearchParseException ex) {
return ex.getMessage();
}
return null;
}
};
Validator POSITIVE_BYTES_SIZE = new Validator() {
@Override
public String validate(String setting, String value, ClusterState state) {
try {
ByteSizeValue byteSizeValue = parseBytesSizeValue(value, setting);
if (byteSizeValue.getBytes() <= 0) {
return setting + " must be a positive byte size value";
}
} catch (ElasticsearchParseException ex) {
return ex.getMessage();
}
return null;
}
};
Validator PERCENTAGE = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
if (value == null) {
return "the value of " + setting + " can not be null";
}
if (!value.endsWith("%")) {
return "the value [" + value + "] for " + setting + " must end with %";
}
final double asDouble = Double.parseDouble(value.substring(0, value.length() - 1));
if (asDouble < 0.0 || asDouble > 100.0) {
return "the value [" + value + "] for " + setting + " must be a percentage between 0% and 100%";
}
} catch (NumberFormatException ex) {
return ex.getMessage();
}
return null;
}
};
Validator BYTES_SIZE_OR_PERCENTAGE = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
String byteSize = BYTES_SIZE.validate(setting, value, clusterState);
if (byteSize != null) {
String percentage = PERCENTAGE.validate(setting, value, clusterState);
if (percentage == null) {
return null;
}
return percentage + " or be a valid bytes size value, like [16mb]";
}
return null;
}
};
Validator MEMORY_SIZE = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
parseBytesSizeValueOrHeapRatio(value, setting);
} catch (ElasticsearchParseException ex) {
return ex.getMessage();
}
return null;
}
};
public static final Validator BOOLEAN = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
if (value != null && (Booleans.isExplicitFalse(value) || Booleans.isExplicitTrue(value))) {
return null;
}
return "cannot parse value [" + value + "] as a boolean";
}
};
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.common;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import java.lang.reflect.Method;
@ -40,7 +41,7 @@ import java.util.concurrent.ThreadLocalRandom;
* setting a reproducible seed. When running the Elasticsearch server
* process, non-reproducible sources of randomness are provided (unless
* a setting is provided for a module that exposes a seed setting (e.g.,
* DiscoveryService#SETTING_DISCOVERY_SEED)).
* DiscoveryService#DISCOVERY_SEED_SETTING)).
*/
public final class Randomness {
private static final Method currentMethod;
@ -68,13 +69,12 @@ public final class Randomness {
* seed in the settings with the key setting.
*
* @param settings the settings containing the seed
* @param setting the key to access the seed
* @param setting the setting to access the seed
* @return a reproducible source of randomness
*/
public static Random get(Settings settings, String setting) {
Long maybeSeed = settings.getAsLong(setting, null);
if (maybeSeed != null) {
return new Random(maybeSeed);
public static Random get(Settings settings, Setting<Long> setting) {
if (setting.exists(settings)) {
return new Random(setting.get(settings));
} else {
return get();
}

View File

@ -36,7 +36,7 @@ public class CircleBuilder extends ShapeBuilder {
public static final String FIELD_RADIUS = "radius";
public static final GeoShapeType TYPE = GeoShapeType.CIRCLE;
static final CircleBuilder PROTOTYPE = new CircleBuilder();
public static final CircleBuilder PROTOTYPE = new CircleBuilder();
private DistanceUnit unit = DistanceUnit.DEFAULT;
private double radius;

View File

@ -33,7 +33,7 @@ public class EnvelopeBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE;
static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(new Coordinate(-1.0, 1.0), new Coordinate(1.0, -1.0));
public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(new Coordinate(-1.0, 1.0), new Coordinate(1.0, -1.0));
private Coordinate topLeft;
private Coordinate bottomRight;

View File

@ -36,7 +36,7 @@ public class GeometryCollectionBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.GEOMETRYCOLLECTION;
static final GeometryCollectionBuilder PROTOTYPE = new GeometryCollectionBuilder();
public static final GeometryCollectionBuilder PROTOTYPE = new GeometryCollectionBuilder();
protected final ArrayList<ShapeBuilder> shapes = new ArrayList<>();

View File

@ -57,7 +57,7 @@ public class LineStringBuilder extends CoordinateCollection<LineStringBuilder> {
public static final GeoShapeType TYPE = GeoShapeType.LINESTRING;
static final LineStringBuilder PROTOTYPE = new LineStringBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(1.0, 1.0));
public static final LineStringBuilder PROTOTYPE = new LineStringBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(1.0, 1.0));
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {

View File

@ -37,7 +37,7 @@ public class MultiLineStringBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.MULTILINESTRING;
static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder();
public static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder();
private final ArrayList<LineStringBuilder> lines = new ArrayList<>();

View File

@ -37,7 +37,7 @@ public class MultiPointBuilder extends CoordinateCollection<MultiPointBuilder> {
public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT;
final static MultiPointBuilder PROTOTYPE = new MultiPointBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).build());
public static final MultiPointBuilder PROTOTYPE = new MultiPointBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).build());
/**
* Create a new {@link MultiPointBuilder}.

View File

@ -36,7 +36,7 @@ import java.util.Objects;
public class MultiPolygonBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.MULTIPOLYGON;
static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder();
public static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder();
private final ArrayList<PolygonBuilder> polygons = new ArrayList<>();

View File

@ -32,7 +32,7 @@ import java.util.Objects;
public class PointBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.POINT;
static final PointBuilder PROTOTYPE = new PointBuilder();
public static final PointBuilder PROTOTYPE = new PointBuilder();
private Coordinate coordinate;

View File

@ -53,7 +53,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
public class PolygonBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.POLYGON;
static final PolygonBuilder PROTOTYPE = new PolygonBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(0.0, 1.0)
public static final PolygonBuilder PROTOTYPE = new PolygonBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(0.0, 1.0)
.coordinate(1.0, 0.0).coordinate(0.0, 0.0));
private static final Coordinate[][] EMPTY = new Coordinate[0][];

View File

@ -1,45 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.geo.builders;
import org.elasticsearch.common.geo.ShapesAvailability;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
/**
* Register the shape builder prototypes with the {@link NamedWriteableRegistry}
*/
public class ShapeBuilderRegistry {
@Inject
public ShapeBuilderRegistry(NamedWriteableRegistry namedWriteableRegistry) {
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE);
}
}
}

View File

@ -49,8 +49,8 @@ public abstract class AbstractMatcher<T> implements Matcher<T> {
@Override
public boolean equals(Object other) {
return other instanceof AndMatcher
&& ((AndMatcher) other).a.equals(a)
&& ((AndMatcher) other).b.equals(b);
&& ((AndMatcher<?>) other).a.equals(a)
&& ((AndMatcher<?>) other).b.equals(b);
}
@Override
@ -80,8 +80,8 @@ public abstract class AbstractMatcher<T> implements Matcher<T> {
@Override
public boolean equals(Object other) {
return other instanceof OrMatcher
&& ((OrMatcher) other).a.equals(a)
&& ((OrMatcher) other).b.equals(b);
&& ((OrMatcher<?>) other).a.equals(a)
&& ((OrMatcher<?>) other).b.equals(b);
}
@Override

View File

@ -20,9 +20,11 @@
package org.elasticsearch.common.lucene;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReader.CoreClosedListener;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardUtils;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@ -72,7 +74,7 @@ public final class ShardCoreKeyMap {
}
final boolean added = objects.add(coreKey);
assert added;
reader.addCoreClosedListener(ownerCoreCacheKey -> {
CoreClosedListener listener = ownerCoreCacheKey -> {
assert coreKey == ownerCoreCacheKey;
synchronized (ShardCoreKeyMap.this) {
coreKeyToShard.remove(ownerCoreCacheKey);
@ -83,7 +85,20 @@ public final class ShardCoreKeyMap {
indexToCoreKey.remove(index);
}
}
});
};
boolean addedListener = false;
try {
reader.addCoreClosedListener(listener);
addedListener = true;
} finally {
if (false == addedListener) {
try {
listener.onClose(coreKey);
} catch (IOException e) {
throw new RuntimeException("Blow up trying to recover from failure to add listener", e);
}
}
}
}
}
}

View File

@ -19,13 +19,18 @@
package org.elasticsearch.common.network;
import java.util.Arrays;
import java.util.List;
import org.elasticsearch.client.support.Headers;
import org.elasticsearch.client.transport.TransportClientNodesService;
import org.elasticsearch.client.transport.support.TransportProxyClient;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.http.HttpServer;
import org.elasticsearch.http.HttpServerTransport;
@ -135,9 +140,6 @@ import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.local.LocalTransport;
import org.elasticsearch.transport.netty.NettyTransport;
import java.util.Arrays;
import java.util.List;
/**
* A module to handle registering and binding all network related classes.
*/
@ -150,7 +152,7 @@ public class NetworkModule extends AbstractModule {
public static final String NETTY_TRANSPORT = "netty";
public static final String HTTP_TYPE_KEY = "http.type";
public static final String HTTP_ENABLED = "http.enabled";
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, false, Scope.CLUSTER);
private static final List<Class<? extends RestHandler>> builtinRestHandlers = Arrays.asList(
RestMainAction.class,
@ -291,6 +293,7 @@ public class NetworkModule extends AbstractModule {
private final ExtensionPoint.ClassSet<RestHandler> restHandlers = new ExtensionPoint.ClassSet<>("rest_handler", RestHandler.class);
// we must separate the cat rest handlers so RestCatAction can collect them...
private final ExtensionPoint.ClassSet<AbstractCatAction> catHandlers = new ExtensionPoint.ClassSet<>("cat_handler", AbstractCatAction.class);
private final NamedWriteableRegistry namedWriteableRegistry;
/**
* Creates a network module that custom networking classes can be plugged into.
@ -298,11 +301,13 @@ public class NetworkModule extends AbstractModule {
* @param networkService A constructed network service object to bind.
* @param settings The settings for the node
* @param transportClient True if only transport classes should be allowed to be registered, false otherwise.
* @param namedWriteableRegistry registry for named writeables for use during streaming
*/
public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient) {
public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient, NamedWriteableRegistry namedWriteableRegistry) {
this.networkService = networkService;
this.settings = settings;
this.transportClient = transportClient;
this.namedWriteableRegistry = namedWriteableRegistry;
registerTransportService(NETTY_TRANSPORT, TransportService.class);
registerTransport(LOCAL_TRANSPORT, LocalTransport.class);
registerTransport(NETTY_TRANSPORT, NettyTransport.class);
@ -354,7 +359,7 @@ public class NetworkModule extends AbstractModule {
@Override
protected void configure() {
bind(NetworkService.class).toInstance(networkService);
bind(NamedWriteableRegistry.class).asEagerSingleton();
bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, NETTY_TRANSPORT);
String defaultTransport = DiscoveryNode.localNode(settings) ? LOCAL_TRANSPORT : NETTY_TRANSPORT;
@ -365,7 +370,7 @@ public class NetworkModule extends AbstractModule {
bind(TransportProxyClient.class).asEagerSingleton();
bind(TransportClientNodesService.class).asEagerSingleton();
} else {
if (settings.getAsBoolean(HTTP_ENABLED, true)) {
if (HTTP_ENABLED.get(settings)) {
bind(HttpServer.class).asEagerSingleton();
httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_KEY, NETTY_TRANSPORT);
}

View File

@ -19,7 +19,9 @@
package org.elasticsearch.common.network;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
@ -41,31 +43,30 @@ public class NetworkService extends AbstractComponent {
/** By default, we bind to loopback interfaces */
public static final String DEFAULT_NETWORK_HOST = "_local_";
private static final String GLOBAL_NETWORK_HOST_SETTING = "network.host";
private static final String GLOBAL_NETWORK_BINDHOST_SETTING = "network.bind_host";
private static final String GLOBAL_NETWORK_PUBLISHHOST_SETTING = "network.publish_host";
public static final Setting<List<String>> GLOBAL_NETWORK_HOST_SETTING = Setting.listSetting("network.host", Arrays.asList(DEFAULT_NETWORK_HOST),
s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> GLOBAL_NETWORK_BINDHOST_SETTING = Setting.listSetting("network.bind_host", GLOBAL_NETWORK_HOST_SETTING,
s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> GLOBAL_NETWORK_PUBLISHHOST_SETTING = Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING,
s -> s, false, Setting.Scope.CLUSTER);
public static final class TcpSettings {
public static final String TCP_NO_DELAY = "network.tcp.no_delay";
public static final String TCP_KEEP_ALIVE = "network.tcp.keep_alive";
public static final String TCP_REUSE_ADDRESS = "network.tcp.reuse_address";
public static final String TCP_SEND_BUFFER_SIZE = "network.tcp.send_buffer_size";
public static final String TCP_RECEIVE_BUFFER_SIZE = "network.tcp.receive_buffer_size";
public static final String TCP_BLOCKING = "network.tcp.blocking";
public static final String TCP_BLOCKING_SERVER = "network.tcp.blocking_server";
public static final String TCP_BLOCKING_CLIENT = "network.tcp.blocking_client";
public static final String TCP_CONNECT_TIMEOUT = "network.tcp.connect_timeout";
public static final ByteSizeValue TCP_DEFAULT_SEND_BUFFER_SIZE = null;
public static final ByteSizeValue TCP_DEFAULT_RECEIVE_BUFFER_SIZE = null;
public static final TimeValue TCP_DEFAULT_CONNECT_TIMEOUT = new TimeValue(30, TimeUnit.SECONDS);
public static final Setting<Boolean> TCP_NO_DELAY = Setting.boolSetting("network.tcp.no_delay", true, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_KEEP_ALIVE = Setting.boolSetting("network.tcp.keep_alive", true, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_REUSE_ADDRESS = Setting.boolSetting("network.tcp.reuse_address", NetworkUtils.defaultReuseAddress(), false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING = Setting.boolSetting("network.tcp.blocking", false, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_SERVER = Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_CLIENT = Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER);
}
/**
* A custom name resolver can support custom lookup keys (my_net_key:ipv4) and also change
* the default inet address used in case no settings is provided.
*/
public static interface CustomNameResolver {
public interface CustomNameResolver {
/**
* Resolves the default value if possible. If not, return <tt>null</tt>.
*/
@ -94,6 +95,7 @@ public class NetworkService extends AbstractComponent {
/**
* Resolves {@code bindHosts} to a list of internet addresses. The list will
* not contain duplicate addresses.
*
* @param bindHosts list of hosts to bind to. this may contain special pseudo-hostnames
* such as _local_ (see the documentation). if it is null, it will be populated
* based on global default settings.
@ -102,21 +104,22 @@ public class NetworkService extends AbstractComponent {
public InetAddress[] resolveBindHostAddresses(String bindHosts[]) throws IOException {
// first check settings
if (bindHosts == null) {
bindHosts = settings.getAsArray(GLOBAL_NETWORK_BINDHOST_SETTING, settings.getAsArray(GLOBAL_NETWORK_HOST_SETTING, null));
}
// next check any registered custom resolvers
if (bindHosts == null) {
for (CustomNameResolver customNameResolver : customNameResolvers) {
InetAddress addresses[] = customNameResolver.resolveDefault();
if (addresses != null) {
return addresses;
if (GLOBAL_NETWORK_BINDHOST_SETTING.exists(settings) || GLOBAL_NETWORK_HOST_SETTING.exists(settings)) {
// if we have settings use them (we have a fallback to GLOBAL_NETWORK_HOST_SETTING inline
bindHosts = GLOBAL_NETWORK_BINDHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
} else {
// next check any registered custom resolvers
for (CustomNameResolver customNameResolver : customNameResolvers) {
InetAddress addresses[] = customNameResolver.resolveDefault();
if (addresses != null) {
return addresses;
}
}
// we know it's not here. get the defaults
bindHosts = GLOBAL_NETWORK_BINDHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
}
}
// finally, fill with our default
if (bindHosts == null) {
bindHosts = new String[] { DEFAULT_NETWORK_HOST };
}
InetAddress addresses[] = resolveInetAddresses(bindHosts);
// try to deal with some (mis)configuration
@ -138,6 +141,7 @@ public class NetworkService extends AbstractComponent {
* only one address is just a current limitation.
* <p>
* If {@code publishHosts} resolves to more than one address, <b>then one is selected with magic</b>
*
* @param publishHosts list of hosts to publish as. this may contain special pseudo-hostnames
* such as _local_ (see the documentation). if it is null, it will be populated
* based on global default settings.
@ -145,23 +149,23 @@ public class NetworkService extends AbstractComponent {
*/
// TODO: needs to be InetAddress[]
public InetAddress resolvePublishHostAddresses(String publishHosts[]) throws IOException {
// first check settings
if (publishHosts == null) {
publishHosts = settings.getAsArray(GLOBAL_NETWORK_PUBLISHHOST_SETTING, settings.getAsArray(GLOBAL_NETWORK_HOST_SETTING, null));
}
// next check any registered custom resolvers
if (publishHosts == null) {
for (CustomNameResolver customNameResolver : customNameResolvers) {
InetAddress addresses[] = customNameResolver.resolveDefault();
if (addresses != null) {
return addresses[0];
if (GLOBAL_NETWORK_PUBLISHHOST_SETTING.exists(settings) || GLOBAL_NETWORK_HOST_SETTING.exists(settings)) {
// if we have settings use them (we have a fallback to GLOBAL_NETWORK_HOST_SETTING inline
publishHosts = GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
} else {
// next check any registered custom resolvers
for (CustomNameResolver customNameResolver : customNameResolvers) {
InetAddress addresses[] = customNameResolver.resolveDefault();
if (addresses != null) {
return addresses[0];
}
}
// we know it's not here. get the defaults
publishHosts = GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
}
}
// finally, fill with our default
if (publishHosts == null) {
publishHosts = new String[] { DEFAULT_NETWORK_HOST };
}
InetAddress addresses[] = resolveInetAddresses(publishHosts);
// TODO: allow publishing multiple addresses
// for now... the hack begins
@ -184,17 +188,17 @@ public class NetworkService extends AbstractComponent {
throw new IllegalArgumentException("publish address: {" + NetworkAddress.format(address) + "} is wildcard, but multiple addresses specified: this makes no sense");
}
}
// 3. if we end out with multiple publish addresses, select by preference.
// don't warn the user, or they will get confused by bind_host vs publish_host etc.
if (addresses.length > 1) {
List<InetAddress> sorted = new ArrayList<>(Arrays.asList(addresses));
NetworkUtils.sortAddresses(sorted);
addresses = new InetAddress[] { sorted.get(0) };
addresses = new InetAddress[]{sorted.get(0)};
}
return addresses[0];
}
/** resolves (and deduplicates) host specification */
private InetAddress[] resolveInetAddresses(String hosts[]) throws IOException {
if (hosts.length == 0) {

View File

@ -21,14 +21,20 @@ package org.elasticsearch.common.settings;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.util.set.Sets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.regex.Pattern;
/**
* A basic setting service that can be used for per-index and per-cluster settings.
@ -36,24 +42,54 @@ import java.util.function.Consumer;
*/
public abstract class AbstractScopedSettings extends AbstractComponent {
private Settings lastSettingsApplied = Settings.EMPTY;
private final List<SettingUpdater> settingUpdaters = new ArrayList<>();
private final Map<String, Setting<?>> complexMatchers = new HashMap<>();
private final Map<String, Setting<?>> keySettings = new HashMap<>();
private final List<SettingUpdater<?>> settingUpdaters = new CopyOnWriteArrayList<>();
private final Map<String, Setting<?>> complexMatchers;
private final Map<String, Setting<?>> keySettings;
private final Setting.Scope scope;
private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$");
private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$");
protected AbstractScopedSettings(Settings settings, Set<Setting<?>> settingsSet, Setting.Scope scope) {
super(settings);
for (Setting<?> entry : settingsSet) {
if (entry.getScope() != scope) {
throw new IllegalArgumentException("Setting must be a cluster setting but was: " + entry.getScope());
this.lastSettingsApplied = Settings.EMPTY;
this.scope = scope;
Map<String, Setting<?>> complexMatchers = new HashMap<>();
Map<String, Setting<?>> keySettings = new HashMap<>();
for (Setting<?> setting : settingsSet) {
if (setting.getScope() != scope) {
throw new IllegalArgumentException("Setting must be a " + scope + " setting but was: " + setting.getScope());
}
if (entry.hasComplexMatcher()) {
complexMatchers.put(entry.getKey(), entry);
if (isValidKey(setting.getKey()) == false && (setting.isGroupSetting() && isValidGroupKey(setting.getKey())) == false) {
throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "]");
}
if (setting.hasComplexMatcher()) {
complexMatchers.putIfAbsent(setting.getKey(), setting);
} else {
keySettings.put(entry.getKey(), entry);
keySettings.putIfAbsent(setting.getKey(), setting);
}
}
this.scope = scope;
this.complexMatchers = Collections.unmodifiableMap(complexMatchers);
this.keySettings = Collections.unmodifiableMap(keySettings);
}
protected AbstractScopedSettings(Settings nodeSettings, Settings scopeSettings, AbstractScopedSettings other) {
super(nodeSettings);
this.lastSettingsApplied = scopeSettings;
this.scope = other.scope;
complexMatchers = other.complexMatchers;
keySettings = other.keySettings;
settingUpdaters.addAll(other.settingUpdaters);
}
/**
* Returns <code>true</code> iff the given key is a valid settings key otherwise <code>false</code>
*/
public static boolean isValidKey(String key) {
return KEY_PATTERN.matcher(key).matches();
}
private static boolean isValidGroupKey(String key) {
return GROUP_KEY_PATTERN.matcher(key).matches();
}
public Setting.Scope getScope() {
@ -68,7 +104,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
final Settings current = Settings.builder().put(this.settings).put(settings).build();
final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build();
List<RuntimeException> exceptions = new ArrayList<>();
for (SettingUpdater settingUpdater : settingUpdaters) {
for (SettingUpdater<?> settingUpdater : settingUpdaters) {
try {
if (settingUpdater.hasChanged(current, previous)) {
settingUpdater.getValue(current, previous);
@ -99,7 +135,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build();
try {
List<Runnable> applyRunnables = new ArrayList<>();
for (SettingUpdater settingUpdater : settingUpdaters) {
for (SettingUpdater<?> settingUpdater : settingUpdaters) {
try {
applyRunnables.add(settingUpdater.updater(current, previous));
} catch (Exception ex) {
@ -161,9 +197,38 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
addSettingsUpdateConsumer(setting, consumer, (s) -> {});
}
/**
* Validates that all settings in the builder are registered and valid
*/
public final void validate(Settings.Builder settingsBuilder) {
validate(settingsBuilder.build());
}
/**
* * Validates that all given settings are registered and valid
*/
public final void validate(Settings settings) {
for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
validate(entry.getKey(), settings);
}
}
/**
* Validates that the setting is valid
*/
public final void validate(String key, Settings settings) {
Setting setting = get(key);
if (setting == null) {
throw new IllegalArgumentException("unknown setting [" + key + "]");
}
setting.get(settings);
}
/**
* Transactional interface to update settings.
* @see Setting
* @param <T> the type of the value of the setting
*/
public interface SettingUpdater<T> {
@ -216,17 +281,16 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
/**
* Returns the {@link Setting} for the given key or <code>null</code> if the setting can not be found.
*/
public Setting get(String key) {
public Setting<?> get(String key) {
Setting<?> setting = keySettings.get(key);
if (setting == null) {
for (Map.Entry<String, Setting<?>> entry : complexMatchers.entrySet()) {
if (entry.getValue().match(key)) {
return entry.getValue();
}
}
} else {
if (setting != null) {
return setting;
}
for (Map.Entry<String, Setting<?>> entry : complexMatchers.entrySet()) {
if (entry.getValue().match(key)) {
return entry.getValue();
}
}
return null;
}
@ -234,7 +298,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
* Returns <code>true</code> if the setting for the given key is dynamically updateable. Otherwise <code>false</code>.
*/
public boolean hasDynamicSetting(String key) {
final Setting setting = get(key);
final Setting<?> setting = get(key);
return setting != null && setting.isDynamic();
}
@ -253,4 +317,93 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
return builder.build();
}
/**
* Returns the value for the given setting.
*/
public <T> T get(Setting<T> setting) {
if (setting.getScope() != scope) {
throw new IllegalArgumentException("settings scope doesn't match the setting scope [" + this.scope + "] != [" + setting.getScope() + "]");
}
if (get(setting.getKey()) == null) {
throw new IllegalArgumentException("setting " + setting.getKey() + " has not been registered");
}
return setting.get(this.lastSettingsApplied, settings);
}
/**
* Updates a target settings builder with new, updated or deleted settings from a given settings builder.
* <p>
* Note: This method will only allow updates to dynamic settings. if a non-dynamic setting is updated an {@link IllegalArgumentException} is thrown instead.
*</p>
* @param toApply the new settings to apply
* @param target the target settings builder that the updates are applied to. All keys that have explicit null value in toApply will be removed from this builder
* @param updates a settings builder that holds all updates applied to target
* @param type a free text string to allow better exceptions messages
* @return <code>true</code> if the target has changed otherwise <code>false</code>
*/
public boolean updateDynamicSettings(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) {
return updateSettings(toApply, target, updates, type, true);
}
/**
* Updates a target settings builder with new, updated or deleted settings from a given settings builder.
* @param toApply the new settings to apply
* @param target the target settings builder that the updates are applied to. All keys that have explicit null value in toApply will be removed from this builder
* @param updates a settings builder that holds all updates applied to target
* @param type a free text string to allow better exceptions messages
* @return <code>true</code> if the target has changed otherwise <code>false</code>
*/
public boolean updateSettings(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) {
return updateSettings(toApply, target, updates, type, false);
}
/**
* Updates a target settings builder with new, updated or deleted settings from a given settings builder.
* @param toApply the new settings to apply
* @param target the target settings builder that the updates are applied to. All keys that have explicit null value in toApply will be removed from this builder
* @param updates a settings builder that holds all updates applied to target
* @param type a free text string to allow better exceptions messages
* @param onlyDynamic if <code>false</code> all settings are updated otherwise only dynamic settings are updated. if set to <code>true</code> and a non-dynamic setting is updated an exception is thrown.
* @return <code>true</code> if the target has changed otherwise <code>false</code>
*/
private boolean updateSettings(Settings toApply, Settings.Builder target, Settings.Builder updates, String type, boolean onlyDynamic) {
boolean changed = false;
final Set<String> toRemove = new HashSet<>();
Settings.Builder settingsBuilder = Settings.settingsBuilder();
for (Map.Entry<String, String> entry : toApply.getAsMap().entrySet()) {
if (entry.getValue() == null) {
toRemove.add(entry.getKey());
} else if ((onlyDynamic == false && get(entry.getKey()) != null) || hasDynamicSetting(entry.getKey())) {
validate(entry.getKey(), toApply);
settingsBuilder.put(entry.getKey(), entry.getValue());
updates.put(entry.getKey(), entry.getValue());
changed = true;
} else {
throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable");
}
}
changed |= applyDeletes(toRemove, target);
target.put(settingsBuilder.build());
return changed;
}
private static final boolean applyDeletes(Set<String> deletes, Settings.Builder builder) {
boolean changed = false;
for (String entry : deletes) {
Set<String> keysToRemove = new HashSet<>();
Set<String> keySet = builder.internalMap().keySet();
for (String key : keySet) {
if (Regex.simpleMatch(entry, key)) {
keysToRemove.add(key);
}
}
for (String key : keysToRemove) {
builder.remove(key);
changed = true;
}
}
return changed;
}
}

View File

@ -20,6 +20,7 @@ package org.elasticsearch.common.settings;
import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.client.transport.TransportClientNodesService;
import org.elasticsearch.cluster.InternalClusterInfoService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.metadata.MetaData;
@ -35,13 +36,32 @@ import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAl
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.cluster.service.InternalClusterService;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.env.Environment;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.discovery.zen.fd.FaultDetection;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.http.netty.NettyHttpServerTransport;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.analysis.HunspellService;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.node.Node;
import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.repositories.uri.URLRepository;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.Transport;
@ -62,7 +82,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
super(settings, settingsSet, Setting.Scope.CLUSTER);
}
@Override
public synchronized Settings applySettings(Settings newSettings) {
Settings settings = super.applySettings(newSettings);
@ -83,6 +102,11 @@ public final class ClusterSettings extends AbstractScopedSettings {
return settings;
}
@Override
public boolean hasDynamicSetting(String key) {
return isLoggerSetting(key) || super.hasDynamicSetting(key);
}
/**
* Returns <code>true</code> if the settings is a logger setting.
*/
@ -92,6 +116,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
@ -104,6 +131,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING,
FsRepository.REPOSITORIES_COMPRESS_SETTING,
FsRepository.REPOSITORIES_LOCATION_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
@ -133,6 +163,19 @@ public final class ClusterSettings extends AbstractScopedSettings {
DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING,
DiscoverySettings.COMMIT_TIMEOUT_SETTING,
DiscoverySettings.NO_MASTER_BLOCK_SETTING,
GatewayService.EXPECTED_DATA_NODES_SETTING,
GatewayService.EXPECTED_MASTER_NODES_SETTING,
GatewayService.EXPECTED_NODES_SETTING,
GatewayService.RECOVER_AFTER_DATA_NODES_SETTING,
GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_TIME_SETTING,
NetworkModule.HTTP_ENABLED,
NettyHttpServerTransport.SETTING_CORS_ALLOW_CREDENTIALS,
NettyHttpServerTransport.SETTING_CORS_ENABLED,
NettyHttpServerTransport.SETTING_CORS_MAX_AGE,
NettyHttpServerTransport.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
NettyHttpServerTransport.SETTING_PIPELINING,
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
@ -149,5 +192,65 @@ public final class ClusterSettings extends AbstractScopedSettings {
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
Transport.TRANSPORT_PROFILES_SETTING,
Transport.TRANSPORT_TCP_COMPRESS)));
Transport.TRANSPORT_TCP_COMPRESS,
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
NetworkService.TcpSettings.TCP_NO_DELAY,
NetworkService.TcpSettings.TCP_KEEP_ALIVE,
NetworkService.TcpSettings.TCP_REUSE_ADDRESS,
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_BLOCKING,
NetworkService.TcpSettings.TCP_BLOCKING_SERVER,
NetworkService.TcpSettings.TCP_BLOCKING_CLIENT,
NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT,
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING,
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
HunspellService.HUNSPELL_LAZY_LOAD,
HunspellService.HUNSPELL_IGNORE_CASE,
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
Environment.PATH_CONF_SETTING,
Environment.PATH_DATA_SETTING,
Environment.PATH_HOME_SETTING,
Environment.PATH_LOGS_SETTING,
Environment.PATH_PLUGINS_SETTING,
Environment.PATH_REPO_SETTING,
Environment.PATH_SCRIPTS_SETTING,
Environment.PATH_SHARED_DATA_SETTING,
Environment.PIDFILE_SETTING,
DiscoveryService.DISCOVERY_SEED_SETTING,
DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING,
DiscoveryModule.DISCOVERY_TYPE_SETTING,
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,
FaultDetection.PING_RETRIES_SETTING,
FaultDetection.PING_TIMEOUT_SETTING,
FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING,
FaultDetection.PING_INTERVAL_SETTING,
FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING,
ZenDiscovery.PING_TIMEOUT_SETTING,
ZenDiscovery.JOIN_TIMEOUT_SETTING,
ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING,
ZenDiscovery.JOIN_RETRY_DELAY_SETTING,
ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING,
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING,
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
SearchService.DEFAULT_KEEPALIVE_SETTING,
SearchService.KEEPALIVE_INTERVAL_SETTING,
Node.WRITE_PORTS_FIELD_SETTING,
URLRepository.ALLOWED_URLS_SETTING,
URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING,
URLRepository.REPOSITORIES_URL_SETTING,
URLRepository.SUPPORTED_PROTOCOLS_SETTING)));
}

View File

@ -0,0 +1,168 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.settings;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexingSlowLog;
import org.elasticsearch.index.MergePolicyConfig;
import org.elasticsearch.index.MergeSchedulerConfig;
import org.elasticsearch.index.SearchSlowLog;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
import org.elasticsearch.index.store.FsDirectoryService;
import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
import org.elasticsearch.search.SearchService;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.function.Predicate;
/**
* Encapsulates all valid index level settings.
* @see org.elasticsearch.common.settings.Setting.Scope#INDEX
*/
public final class IndexScopedSettings extends AbstractScopedSettings {
public static final Predicate<String> INDEX_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(IndexMetaData.INDEX_SETTING_PREFIX);
public static Set<Setting<?>> BUILT_IN_INDEX_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
IndexSettings.INDEX_TTL_DISABLE_PURGE_SETTING,
IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING,
IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
MergeSchedulerConfig.AUTO_THROTTLE_SETTING,
MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING,
MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING,
IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING,
IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING,
IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING,
IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING,
IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING,
IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING,
IndexMetaData.INDEX_SHADOW_REPLICAS_SETTING,
IndexMetaData.INDEX_SHARED_FILESYSTEM_SETTING,
IndexMetaData.INDEX_READ_ONLY_SETTING,
IndexMetaData.INDEX_BLOCKS_READ_SETTING,
IndexMetaData.INDEX_BLOCKS_WRITE_SETTING,
IndexMetaData.INDEX_BLOCKS_METADATA_SETTING,
IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING,
IndexMetaData.INDEX_PRIORITY_SETTING,
IndexMetaData.INDEX_DATA_PATH_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_REFORMAT,
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING,
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING,
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING,
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING,
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING,
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING,
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING,
MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING,
MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING,
MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING,
MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING,
MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING,
MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING,
MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING,
MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING,
IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING,
IndexSettings.INDEX_WARMER_ENABLED_SETTING,
IndexSettings.INDEX_REFRESH_INTERVAL_SETTING,
IndexSettings.MAX_RESULT_WINDOW_SETTING,
IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING,
IndexSettings.DEFAULT_FIELD_SETTING,
IndexSettings.QUERY_STRING_LENIENT_SETTING,
IndexSettings.ALLOW_UNMAPPED,
IndexSettings.INDEX_CHECK_ON_STARTUP,
ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING,
IndexSettings.INDEX_GC_DELETES_SETTING,
IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING,
UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING,
EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING,
EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING,
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTTING,
IndexFieldDataService.INDEX_FIELDDATA_CACHE_KEY,
FieldMapper.IGNORE_MALFORMED_SETTING,
FieldMapper.COERCE_SETTING,
Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING,
PercolatorQueriesRegistry.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING,
MapperService.INDEX_MAPPER_DYNAMIC_SETTING,
MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING,
BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING,
IndexModule.INDEX_STORE_TYPE_SETTING,
IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING,
IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING,
PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS_SETTING,
FsDirectoryService.INDEX_LOCK_FACTOR_SETTING,
EngineConfig.INDEX_CODEC_SETTING,
SearchService.INDEX_NORMS_LOADING_SETTING,
// this sucks but we can't really validate all the analyzers/similarity in here
Setting.groupSetting("index.similarity.", false, Setting.Scope.INDEX), // this allows similarity settings to be passed
Setting.groupSetting("index.analysis.", false, Setting.Scope.INDEX) // this allows analysis settings to be passed
)));
public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS);
public IndexScopedSettings(Settings settings, Set<Setting<?>> settingsSet) {
super(settings, settingsSet, Setting.Scope.INDEX);
}
private IndexScopedSettings(Settings settings, IndexScopedSettings other, IndexMetaData metaData) {
super(settings, metaData.getSettings(), other);
}
public IndexScopedSettings copy(Settings settings, IndexMetaData metaData) {
return new IndexScopedSettings(settings, this, metaData);
}
public boolean isPrivateSetting(String key) {
switch (key) {
case IndexMetaData.SETTING_CREATION_DATE:
case IndexMetaData.SETTING_INDEX_UUID:
case IndexMetaData.SETTING_VERSION_CREATED:
case IndexMetaData.SETTING_VERSION_UPGRADED:
case MergePolicyConfig.INDEX_MERGE_ENABLED:
return true;
default:
return false;
}
}
}

View File

@ -36,12 +36,16 @@ import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
/**
* A setting. Encapsulates typical stuff like default value, parsing, and scope.
* Some (dynamic=true) can by modified at run time using the API.
*/
public class Setting<T> extends ToXContentToBytes {
private final String key;
@ -67,8 +71,21 @@ public class Setting<T> extends ToXContentToBytes {
this.scope = scope;
}
/**
* Creates a new Setting instance
* @param key the settings key for this setting.
* @param fallBackSetting a setting to fall back to if the current setting is not set.
* @param parser a parser that parses the string rep into a complex datatype.
* @param dynamic true iff this setting can be dynamically updateable
* @param scope the scope of this setting
*/
public Setting(String key, Setting<T> fallBackSetting, Function<String, T> parser, boolean dynamic, Scope scope) {
this(key, fallBackSetting::getRaw, parser, dynamic, scope);
}
/**
* Returns the settings key or a prefix if this setting is a group setting
*
* @see #isGroupSetting()
*/
public final String getKey() {
@ -103,13 +120,21 @@ public class Setting<T> extends ToXContentToBytes {
}
/**
* Returns the default values string representation for this setting.
* Returns the default value string representation for this setting.
* @param settings a settings object for settings that has a default value depending on another setting if available
*/
public final String getDefault(Settings settings) {
public final String getDefaultRaw(Settings settings) {
return defaultValue.apply(settings);
}
/**
* Returns the default value for this setting.
* @param settings a settings object for settings that has a default value depending on another setting if available
*/
public final T getDefault(Settings settings) {
return parser.apply(getDefaultRaw(settings));
}
/**
* Returns <code>true</code> iff this setting is present in the given settings object. Otherwise <code>false</code>
*/
@ -165,6 +190,16 @@ public class Setting<T> extends ToXContentToBytes {
return builder;
}
/**
* Returns the value for this setting but falls back to the second provided settings object
*/
public final T get(Settings primary, Settings secondary) {
if (exists(primary)) {
return get(primary);
}
return get(secondary);
}
/**
* The settings scope - settings can either be cluster settings or per index settings.
*/
@ -173,11 +208,18 @@ public class Setting<T> extends ToXContentToBytes {
INDEX;
}
final AbstractScopedSettings.SettingUpdater newUpdater(Consumer<T> consumer, ESLogger logger) {
/**
* Build a new updater with a noop validator.
*/
final AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, ESLogger logger) {
return newUpdater(consumer, logger, (s) -> {});
}
AbstractScopedSettings.SettingUpdater newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) {
/**
* Build the updater responsible for validating new values, logging the new
* value, and eventually setting the value where it belongs.
*/
AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) {
if (isDynamic()) {
return new Updater(consumer, logger, validator);
} else {
@ -216,7 +258,7 @@ public class Setting<T> extends ToXContentToBytes {
}
private class Updater implements AbstractScopedSettings.SettingUpdater<T> {
private final class Updater implements AbstractScopedSettings.SettingUpdater<T> {
private final Consumer<T> consumer;
private final ESLogger logger;
private final Consumer<T> accept;
@ -256,8 +298,8 @@ public class Setting<T> extends ToXContentToBytes {
}
@Override
public void apply(T value, Settings current, Settings previous) {
logger.info("update [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current));
public final void apply(T value, Settings current, Settings previous) {
logger.info("updating [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current));
consumer.accept(value);
}
}
@ -285,6 +327,14 @@ public class Setting<T> extends ToXContentToBytes {
return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), dynamic, scope);
}
public static Setting<Long> longSetting(String key, long defaultValue, long minValue, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), dynamic, scope);
}
public static Setting<String> simpleString(String key, boolean dynamic, Scope scope) {
return new Setting<>(key, "", Function.identity(), dynamic, scope);
}
public static int parseInt(String s, int minValue, String key) {
int value = Integer.parseInt(s);
if (value < minValue) {
@ -293,6 +343,14 @@ public class Setting<T> extends ToXContentToBytes {
return value;
}
public static long parseLong(String s, long minValue, String key) {
long value = Long.parseLong(s);
if (value < minValue) {
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
}
return value;
}
public static Setting<Integer> intSetting(String key, int defaultValue, boolean dynamic, Scope scope) {
return intSetting(key, defaultValue, Integer.MIN_VALUE, dynamic, scope);
}
@ -301,6 +359,10 @@ public class Setting<T> extends ToXContentToBytes {
return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope);
}
public static Setting<Boolean> boolSetting(String key, Setting<Boolean> fallbackSetting, boolean dynamic, Scope scope) {
return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, dynamic, scope);
}
public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope);
}
@ -316,25 +378,15 @@ public class Setting<T> extends ToXContentToBytes {
public static <T> Setting<List<T>> listSetting(String key, List<String> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
return listSetting(key, (s) -> defaultStringValue, singleValueParser, dynamic, scope);
}
public static <T> Setting<List<T>> listSetting(String key, Setting<List<T>> fallbackSetting, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
return listSetting(key, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), singleValueParser, dynamic, scope);
}
public static <T> Setting<List<T>> listSetting(String key, Function<Settings, List<String>> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
Function<String, List<T>> parser = (s) -> {
try (XContentParser xContentParser = XContentType.JSON.xContent().createParser(s)){
XContentParser.Token token = xContentParser.nextToken();
if (token != XContentParser.Token.START_ARRAY) {
throw new IllegalArgumentException("expected START_ARRAY but got " + token);
}
ArrayList<T> list = new ArrayList<>();
while ((token = xContentParser.nextToken()) !=XContentParser.Token.END_ARRAY) {
if (token != XContentParser.Token.VALUE_STRING) {
throw new IllegalArgumentException("expected VALUE_STRING but got " + token);
}
list.add(singleValueParser.apply(xContentParser.text()));
}
return list;
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse array", e);
}
};
Function<String, List<T>> parser = (s) ->
parseableStringToList(s).stream().map(singleValueParser).collect(Collectors.toList());
return new Setting<List<T>>(key, (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) {
private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?");
@Override
@ -343,6 +395,7 @@ public class Setting<T> extends ToXContentToBytes {
return array == null ? defaultValue.apply(settings) : arrayToParsableString(array);
}
@Override
public boolean match(String toTest) {
return pattern.matcher(toTest).matches();
}
@ -354,6 +407,26 @@ public class Setting<T> extends ToXContentToBytes {
};
}
private static List<String> parseableStringToList(String parsableString) {
try (XContentParser xContentParser = XContentType.JSON.xContent().createParser(parsableString)) {
XContentParser.Token token = xContentParser.nextToken();
if (token != XContentParser.Token.START_ARRAY) {
throw new IllegalArgumentException("expected START_ARRAY but got " + token);
}
ArrayList<String> list = new ArrayList<>();
while ((token = xContentParser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token != XContentParser.Token.VALUE_STRING) {
throw new IllegalArgumentException("expected VALUE_STRING but got " + token);
}
list.add(xContentParser.text());
}
return list;
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse array", e);
}
}
private static String arrayToParsableString(String[] array) {
try {
XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent());
@ -420,6 +493,7 @@ public class Setting<T> extends ToXContentToBytes {
@Override
public void apply(Settings value, Settings current, Settings previous) {
logger.info("updating [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current));
consumer.accept(value);
}
@ -460,4 +534,16 @@ public class Setting<T> extends ToXContentToBytes {
}, dynamic, scope);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Setting<?> setting = (Setting<?>) o;
return Objects.equals(key, setting.key);
}
@Override
public int hashCode() {
return Objects.hash(key);
}
}

View File

@ -58,6 +58,7 @@ import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@ -212,6 +213,19 @@ public final class Settings implements ToXContent {
return builder.build();
}
/**
* Returns a new settings object that contains all setting of the current one filtered by the given settings key predicate.
*/
public Settings filter(Predicate<String> predicate) {
Builder builder = new Builder();
for (Map.Entry<String, String> entry : getAsMap().entrySet()) {
if (predicate.test(entry.getKey())) {
builder.put(entry.getKey(), entry.getValue());
}
}
return builder.build();
}
/**
* Returns the settings mapped to the given setting name.
*/

View File

@ -34,8 +34,8 @@ public class SettingsModule extends AbstractModule {
private final Settings settings;
private final SettingsFilter settingsFilter;
private final Map<String, Setting<?>> clusterDynamicSettings = new HashMap<>();
private final Map<String, Setting<?>> clusterSettings = new HashMap<>();
private final Map<String, Setting<?>> indexSettings = new HashMap<>();
public SettingsModule(Settings settings, SettingsFilter settingsFilter) {
this.settings = settings;
@ -43,26 +43,50 @@ public class SettingsModule extends AbstractModule {
for (Setting<?> setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) {
registerSetting(setting);
}
for (Setting<?> setting : IndexScopedSettings.BUILT_IN_INDEX_SETTINGS) {
registerSetting(setting);
}
}
@Override
protected void configure() {
final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, new HashSet<>(this.indexSettings.values()));
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.clusterSettings.values()));
// by now we are fully configured, lets check node level settings for unregistered index settings
indexScopedSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE));
// we can't call this method yet since we have not all node level settings registered.
// yet we can validate the ones we have registered to not have invalid values. this is better than nothing
// and progress over perfection and we fail as soon as possible.
// clusterSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE.negate()));
for (Map.Entry<String, String> entry : settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE.negate()).getAsMap().entrySet()) {
if (clusterSettings.get(entry.getKey()) != null) {
clusterSettings.validate(entry.getKey(), settings);
} else if (AbstractScopedSettings.isValidKey(entry.getKey()) == false) {
throw new IllegalArgumentException("illegal settings key: [" + entry.getKey() + "]");
}
}
bind(Settings.class).toInstance(settings);
bind(SettingsFilter.class).toInstance(settingsFilter);
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(clusterDynamicSettings.values()));
bind(ClusterSettings.class).toInstance(clusterSettings);
bind(IndexScopedSettings.class).toInstance(indexScopedSettings);
}
public void registerSetting(Setting<?> setting) {
switch (setting.getScope()) {
case CLUSTER:
if (clusterDynamicSettings.containsKey(setting.getKey())) {
if (clusterSettings.containsKey(setting.getKey())) {
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
}
clusterDynamicSettings.put(setting.getKey(), setting);
clusterSettings.put(setting.getKey(), setting);
break;
case INDEX:
throw new UnsupportedOperationException("not yet implemented");
if (indexSettings.containsKey(setting.getKey())) {
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
}
indexSettings.put(setting.getKey(), setting);
break;
}
}

View File

@ -20,12 +20,10 @@
package org.elasticsearch.common.unit;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Settings;
import java.io.IOException;
import java.util.Locale;
@ -176,7 +174,6 @@ public class ByteSizeValue implements Streamable {
public static ByteSizeValue parseBytesSizeValue(String sValue, ByteSizeValue defaultValue, String settingName) throws ElasticsearchParseException {
settingName = Objects.requireNonNull(settingName);
assert settingName.startsWith("index.") == false || MetaDataIndexUpgradeService.INDEX_BYTES_SIZE_SETTINGS.contains(settingName);
if (sValue == null) {
return defaultValue;
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.common.unit;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
@ -32,7 +33,7 @@ import java.io.IOException;
* the earth ellipsoid defined in {@link GeoUtils}. The default unit used within
* this project is <code>METERS</code> which is defined by <code>DEFAULT</code>
*/
public enum DistanceUnit {
public enum DistanceUnit implements Writeable<DistanceUnit> {
INCH(0.0254, "in", "inch"),
YARD(0.9144, "yd", "yards"),
FEET(0.3048, "ft", "feet"),
@ -322,4 +323,24 @@ public enum DistanceUnit {
return new Distance(Double.parseDouble(distance), defaultUnit);
}
}
private static final DistanceUnit PROTOTYPE = DEFAULT;
@Override
public DistanceUnit readFrom(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown DistanceUnit ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
public static DistanceUnit readUnitFrom(StreamInput in) throws IOException {
return PROTOTYPE.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(this.ordinal());
}
}

View File

@ -67,6 +67,8 @@ public final class Fuzziness implements ToXContent, Writeable<Fuzziness> {
/**
* Creates a {@link Fuzziness} instance from an edit distance. The value must be one of <tt>[0, 1, 2]</tt>
*
* Note: Using this method only makes sense if the field you are applying Fuzziness to is some sort of string.
*/
public static Fuzziness fromEdits(int edits) {
return new Fuzziness(edits);

View File

@ -20,12 +20,10 @@
package org.elasticsearch.common.unit;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Settings;
import org.joda.time.Period;
import org.joda.time.PeriodType;
import org.joda.time.format.PeriodFormat;
@ -254,7 +252,6 @@ public class TimeValue implements Streamable {
public static TimeValue parseTimeValue(String sValue, TimeValue defaultValue, String settingName) {
settingName = Objects.requireNonNull(settingName);
assert settingName.startsWith("index.") == false || MetaDataIndexUpgradeService.INDEX_TIME_SETTINGS.contains(settingName) : settingName;
if (sValue == null) {
return defaultValue;
}

View File

@ -1,383 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.util;
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.shard.ShardStateMetaData;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.DirectoryStream;
import java.nio.file.FileStore;
import java.nio.file.FileVisitResult;
import java.nio.file.FileVisitor;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
*/
public class MultiDataPathUpgrader {
private final NodeEnvironment nodeEnvironment;
private final ESLogger logger = Loggers.getLogger(getClass());
/**
* Creates a new upgrader instance
* @param nodeEnvironment the node env to operate on.
*
*/
public MultiDataPathUpgrader(NodeEnvironment nodeEnvironment) {
this.nodeEnvironment = nodeEnvironment;
}
/**
* Upgrades the given shard Id from multiple shard paths into the given target path.
*
* @see #pickShardPath(org.elasticsearch.index.shard.ShardId)
*/
public void upgrade(ShardId shard, ShardPath targetPath) throws IOException {
final Path[] paths = nodeEnvironment.availableShardPaths(shard); // custom data path doesn't need upgrading
if (isTargetPathConfigured(paths, targetPath) == false) {
throw new IllegalArgumentException("shard path must be one of the shards data paths");
}
assert needsUpgrading(shard) : "Should not upgrade a path that needs no upgrading";
logger.info("{} upgrading multi data dir to {}", shard, targetPath.getDataPath());
final ShardStateMetaData loaded = ShardStateMetaData.FORMAT.loadLatestState(logger, paths);
if (loaded == null) {
throw new IllegalStateException(shard + " no shard state found in any of: " + Arrays.toString(paths) + " please check and remove them if possible");
}
logger.info("{} loaded shard state {}", shard, loaded);
ShardStateMetaData.FORMAT.write(loaded, loaded.version, targetPath.getShardStatePath());
Files.createDirectories(targetPath.resolveIndex());
try (SimpleFSDirectory directory = new SimpleFSDirectory(targetPath.resolveIndex())) {
try (final Lock lock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
upgradeFiles(shard, targetPath, targetPath.resolveIndex(), ShardPath.INDEX_FOLDER_NAME, paths);
} catch (LockObtainFailedException ex) {
throw new IllegalStateException("Can't obtain lock on " + targetPath.resolveIndex(), ex);
}
}
upgradeFiles(shard, targetPath, targetPath.resolveTranslog(), ShardPath.TRANSLOG_FOLDER_NAME, paths);
logger.info("{} wipe upgraded directories", shard);
for (Path path : paths) {
if (path.equals(targetPath.getShardStatePath()) == false) {
logger.info("{} wipe shard directories: [{}]", shard, path);
IOUtils.rm(path);
}
}
if (FileSystemUtils.files(targetPath.resolveIndex()).length == 0) {
throw new IllegalStateException("index folder [" + targetPath.resolveIndex() + "] is empty");
}
if (FileSystemUtils.files(targetPath.resolveTranslog()).length == 0) {
throw new IllegalStateException("translog folder [" + targetPath.resolveTranslog() + "] is empty");
}
}
/**
* Runs check-index on the target shard and throws an exception if it failed
*/
public void checkIndex(ShardPath targetPath) throws IOException {
BytesStreamOutput os = new BytesStreamOutput();
PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name());
try (Directory directory = new SimpleFSDirectory(targetPath.resolveIndex());
final CheckIndex checkIndex = new CheckIndex(directory)) {
checkIndex.setInfoStream(out);
CheckIndex.Status status = checkIndex.checkIndex();
out.flush();
if (!status.clean) {
logger.warn("check index [failure]\n{}", new String(os.bytes().toBytes(), StandardCharsets.UTF_8));
throw new IllegalStateException("index check failure");
}
}
}
/**
* Returns true iff the given shard needs upgrading.
*/
public boolean needsUpgrading(ShardId shard) {
final Path[] paths = nodeEnvironment.availableShardPaths(shard);
// custom data path doesn't need upgrading neither single path envs
if (paths.length > 1) {
int numPathsExist = 0;
for (Path path : paths) {
if (Files.exists(path.resolve(MetaDataStateFormat.STATE_DIR_NAME))) {
numPathsExist++;
if (numPathsExist > 1) {
return true;
}
}
}
}
return false;
}
/**
* Picks a target ShardPath to allocate and upgrade the given shard to. It picks the target based on a simple
* heuristic:
* <ul>
* <li>if the smallest datapath has 2x more space available that the shards total size the datapath with the most bytes for that shard is picked to minimize the amount of bytes to copy</li>
* <li>otherwise the largest available datapath is used as the target no matter how big of a slice of the shard it already holds.</li>
* </ul>
*/
public ShardPath pickShardPath(ShardId shard) throws IOException {
if (needsUpgrading(shard) == false) {
throw new IllegalStateException("Shard doesn't need upgrading");
}
final NodeEnvironment.NodePath[] paths = nodeEnvironment.nodePaths();
// if we need upgrading make sure we have all paths.
for (NodeEnvironment.NodePath path : paths) {
Files.createDirectories(path.resolve(shard));
}
final ShardFileInfo[] shardFileInfo = getShardFileInfo(shard, paths);
long totalBytesUsedByShard = 0;
long leastUsableSpace = Long.MAX_VALUE;
long mostUsableSpace = Long.MIN_VALUE;
assert shardFileInfo.length == nodeEnvironment.availableShardPaths(shard).length;
for (ShardFileInfo info : shardFileInfo) {
totalBytesUsedByShard += info.spaceUsedByShard;
leastUsableSpace = Math.min(leastUsableSpace, info.usableSpace + info.spaceUsedByShard);
mostUsableSpace = Math.max(mostUsableSpace, info.usableSpace + info.spaceUsedByShard);
}
if (mostUsableSpace < totalBytesUsedByShard) {
throw new IllegalStateException("Can't upgrade path available space: " + new ByteSizeValue(mostUsableSpace) + " required space: " + new ByteSizeValue(totalBytesUsedByShard));
}
ShardFileInfo target = shardFileInfo[0];
if (leastUsableSpace >= (2 * totalBytesUsedByShard)) {
for (ShardFileInfo info : shardFileInfo) {
if (info.spaceUsedByShard > target.spaceUsedByShard) {
target = info;
}
}
} else {
for (ShardFileInfo info : shardFileInfo) {
if (info.usableSpace > target.usableSpace) {
target = info;
}
}
}
return new ShardPath(false, target.path, target.path, IndexMetaData.INDEX_UUID_NA_VALUE /* we don't know */, shard);
}
private ShardFileInfo[] getShardFileInfo(ShardId shard, NodeEnvironment.NodePath[] paths) throws IOException {
final ShardFileInfo[] info = new ShardFileInfo[paths.length];
for (int i = 0; i < info.length; i++) {
Path path = paths[i].resolve(shard);
final long usabelSpace = getUsabelSpace(paths[i]);
info[i] = new ShardFileInfo(path, usabelSpace, getSpaceUsedByShard(path));
}
return info;
}
protected long getSpaceUsedByShard(Path path) throws IOException {
final long[] spaceUsedByShard = new long[] {0};
if (Files.exists(path)) {
Files.walkFileTree(path, new FileVisitor<Path>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
if (attrs.isRegularFile()) {
spaceUsedByShard[0] += attrs.size();
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
return FileVisitResult.CONTINUE;
}
});
}
return spaceUsedByShard[0];
}
protected long getUsabelSpace(NodeEnvironment.NodePath path) throws IOException {
FileStore fileStore = path.fileStore;
return fileStore.getUsableSpace();
}
static class ShardFileInfo {
final Path path;
final long usableSpace;
final long spaceUsedByShard;
ShardFileInfo(Path path, long usableSpace, long spaceUsedByShard) {
this.path = path;
this.usableSpace = usableSpace;
this.spaceUsedByShard = spaceUsedByShard;
}
}
private void upgradeFiles(ShardId shard, ShardPath targetPath, final Path targetDir, String folderName, Path[] paths) throws IOException {
List<Path> movedFiles = new ArrayList<>();
for (Path path : paths) {
if (path.equals(targetPath.getDataPath()) == false) {
final Path sourceDir = path.resolve(folderName);
if (Files.exists(sourceDir)) {
logger.info("{} upgrading [{}] from [{}] to [{}]", shard, folderName, sourceDir, targetDir);
try (DirectoryStream<Path> stream = Files.newDirectoryStream(sourceDir)) {
Files.createDirectories(targetDir);
for (Path file : stream) {
if (IndexWriter.WRITE_LOCK_NAME.equals(file.getFileName().toString()) || Files.isDirectory(file)) {
continue; // skip write.lock
}
logger.info("{} move file [{}] size: [{}]", shard, file.getFileName(), Files.size(file));
final Path targetFile = targetDir.resolve(file.getFileName());
/* We are pessimistic and do a copy first to the other path and then and atomic move to rename it such that
in the worst case the file exists twice but is never lost or half written.*/
final Path targetTempFile = Files.createTempFile(targetDir, "upgrade_", "_" + file.getFileName().toString());
Files.copy(file, targetTempFile, StandardCopyOption.COPY_ATTRIBUTES, StandardCopyOption.REPLACE_EXISTING);
Files.move(targetTempFile, targetFile, StandardCopyOption.ATOMIC_MOVE); // we are on the same FS - this must work otherwise all bets are off
Files.delete(file);
movedFiles.add(targetFile);
}
}
}
}
}
if (movedFiles.isEmpty() == false) {
// fsync later it might be on disk already
logger.info("{} fsync files", shard);
for (Path moved : movedFiles) {
logger.info("{} syncing [{}]", shard, moved.getFileName());
IOUtils.fsync(moved, false);
}
logger.info("{} syncing directory [{}]", shard, targetDir);
IOUtils.fsync(targetDir, true);
}
}
/**
* Returns <code>true</code> iff the target path is one of the given paths.
*/
private boolean isTargetPathConfigured(final Path[] paths, ShardPath targetPath) {
for (Path path : paths) {
if (path.equals(targetPath.getDataPath())) {
return true;
}
}
return false;
}
/**
* Runs an upgrade on all shards located under the given node environment if there is more than 1 data.path configured
* otherwise this method will return immediately.
*/
public static void upgradeMultiDataPath(NodeEnvironment nodeEnv, ESLogger logger) throws IOException {
if (nodeEnv.nodeDataPaths().length > 1) {
final MultiDataPathUpgrader upgrader = new MultiDataPathUpgrader(nodeEnv);
final Set<String> allIndices = nodeEnv.findAllIndices();
for (String index : allIndices) {
for (ShardId shardId : findAllShardIds(nodeEnv.indexPaths(new Index(index)))) {
try (ShardLock lock = nodeEnv.shardLock(shardId, 0)) {
if (upgrader.needsUpgrading(shardId)) {
final ShardPath shardPath = upgrader.pickShardPath(shardId);
upgrader.upgrade(shardId, shardPath);
// we have to check if the index path exists since we might
// have only upgraded the shard state that is written under /indexname/shardid/_state
// in the case we upgraded a dedicated index directory index
if (Files.exists(shardPath.resolveIndex())) {
upgrader.checkIndex(shardPath);
}
} else {
logger.debug("{} no upgrade needed - already upgraded");
}
}
}
}
}
}
private static Set<ShardId> findAllShardIds(Path... locations) throws IOException {
final Set<ShardId> shardIds = new HashSet<>();
for (final Path location : locations) {
if (Files.isDirectory(location)) {
shardIds.addAll(findAllShardsForIndex(location));
}
}
return shardIds;
}
private static Set<ShardId> findAllShardsForIndex(Path indexPath) throws IOException {
Set<ShardId> shardIds = new HashSet<>();
if (Files.isDirectory(indexPath)) {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) {
String currentIndex = indexPath.getFileName().toString();
for (Path shardPath : stream) {
String fileName = shardPath.getFileName().toString();
if (Files.isDirectory(shardPath) && fileName.chars().allMatch(Character::isDigit)) {
int shardId = Integer.parseInt(fileName);
ShardId id = new ShardId(currentIndex, shardId);
shardIds.add(id);
}
}
}
}
return shardIds;
}
}

Some files were not shown because too many files have changed in this diff Show More