Merge branch 'master' into trash_context_and_headers
This commit is contained in:
commit
3d0cedbabb
|
@ -57,8 +57,7 @@ class ClusterConfiguration {
|
|||
@Input
|
||||
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
|
||||
File tmpFile = new File(node.cwd, 'wait.success')
|
||||
ant.echo(message: "[${LocalDateTime.now()}] Waiting for elasticsearch node", level: "info")
|
||||
ant.get(src: "http://${node.httpUri()}",
|
||||
ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}",
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
|
||||
retries: 10)
|
||||
|
|
|
@ -46,9 +46,9 @@ class ClusterFormationTasks {
|
|||
/**
|
||||
* Adds dependent tasks to the given task to start and stop a cluster with the given configuration.
|
||||
*
|
||||
* Returns an object that will resolve at execution time of the given task to a uri for the cluster.
|
||||
* Returns a NodeInfo object for the first node in the cluster.
|
||||
*/
|
||||
static Object setup(Project project, Task task, ClusterConfiguration config) {
|
||||
static NodeInfo setup(Project project, Task task, ClusterConfiguration config) {
|
||||
if (task.getEnabled() == false) {
|
||||
// no need to add cluster formation tasks if the task won't run!
|
||||
return
|
||||
|
@ -66,7 +66,7 @@ class ClusterFormationTasks {
|
|||
task.dependsOn(wait)
|
||||
|
||||
// delay the resolution of the uri by wrapping in a closure, so it is not used until read for tests
|
||||
return "${-> nodes[0].transportUri()}"
|
||||
return nodes[0]
|
||||
}
|
||||
|
||||
/** Adds a dependency on the given distribution */
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.gradle.test
|
|||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.internal.tasks.options.Option
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
|
@ -61,8 +60,12 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
|||
// this must run after all projects have been configured, so we know any project
|
||||
// references can be accessed as a fully configured
|
||||
project.gradle.projectsEvaluated {
|
||||
Object clusterUri = ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
systemProperty('tests.cluster', clusterUri)
|
||||
NodeInfo node = ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
systemProperty('tests.rest.cluster', "${-> node.httpUri()}")
|
||||
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
|
||||
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
|
||||
// both as separate sysprops
|
||||
systemProperty('tests.cluster', "${-> node.transportUri()}")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -200,7 +200,7 @@ public class ActionModule extends AbstractModule {
|
|||
private final Map<String, ActionEntry> actions = new HashMap<>();
|
||||
private final List<Class<? extends ActionFilter>> actionFilters = new ArrayList<>();
|
||||
|
||||
static class ActionEntry<Request extends ActionRequest, Response extends ActionResponse> {
|
||||
static class ActionEntry<Request extends ActionRequest<Request>, Response extends ActionResponse> {
|
||||
public final GenericAction<Request, Response> action;
|
||||
public final Class<? extends TransportAction<Request, Response>> transportAction;
|
||||
public final Class[] supportTransportActions;
|
||||
|
@ -229,7 +229,7 @@ public class ActionModule extends AbstractModule {
|
|||
* @param <Request> The request type.
|
||||
* @param <Response> The response type.
|
||||
*/
|
||||
public <Request extends ActionRequest, Response extends ActionResponse> void registerAction(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse> void registerAction(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
|
||||
actions.put(action.name(), new ActionEntry<>(action, transportAction, supportTransportActions));
|
||||
}
|
||||
|
||||
|
|
|
@ -28,10 +28,13 @@ import java.io.IOException;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class ActionRequest<T extends ActionRequest> extends TransportRequest {
|
||||
public abstract class ActionRequest<Request extends ActionRequest<Request>> extends TransportRequest {
|
||||
|
||||
public ActionRequest() {
|
||||
super();
|
||||
// this does not set the listenerThreaded API, if needed, its up to the caller to set it
|
||||
// since most times, we actually want it to not be threaded...
|
||||
// this.listenerThreaded = request.listenerThreaded();
|
||||
}
|
||||
|
||||
public abstract ActionRequestValidationException validate();
|
||||
|
|
|
@ -22,14 +22,9 @@ package org.elasticsearch.action.admin.cluster.settings;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.cluster.ClusterState.builder;
|
||||
|
||||
/**
|
||||
|
@ -57,11 +52,11 @@ final class SettingsUpdater {
|
|||
boolean changed = false;
|
||||
Settings.Builder transientSettings = Settings.settingsBuilder();
|
||||
transientSettings.put(currentState.metaData().transientSettings());
|
||||
changed |= apply(transientToApply, transientSettings, transientUpdates, "transient");
|
||||
changed |= clusterSettings.updateDynamicSettings(transientToApply, transientSettings, transientUpdates, "transient");
|
||||
|
||||
Settings.Builder persistentSettings = Settings.settingsBuilder();
|
||||
persistentSettings.put(currentState.metaData().persistentSettings());
|
||||
changed |= apply(persistentToApply, persistentSettings, persistentUpdates, "persistent");
|
||||
changed |= clusterSettings.updateDynamicSettings(persistentToApply, persistentSettings, persistentUpdates, "persistent");
|
||||
|
||||
if (!changed) {
|
||||
return currentState;
|
||||
|
@ -86,42 +81,5 @@ final class SettingsUpdater {
|
|||
return build;
|
||||
}
|
||||
|
||||
private boolean apply(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) {
|
||||
boolean changed = false;
|
||||
final Set<String> toRemove = new HashSet<>();
|
||||
Settings.Builder settingsBuilder = Settings.settingsBuilder();
|
||||
for (Map.Entry<String, String> entry : toApply.getAsMap().entrySet()) {
|
||||
if (entry.getValue() == null) {
|
||||
toRemove.add(entry.getKey());
|
||||
} else if (clusterSettings.isLoggerSetting(entry.getKey()) || clusterSettings.hasDynamicSetting(entry.getKey())) {
|
||||
settingsBuilder.put(entry.getKey(), entry.getValue());
|
||||
updates.put(entry.getKey(), entry.getValue());
|
||||
changed = true;
|
||||
} else {
|
||||
throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable");
|
||||
}
|
||||
|
||||
}
|
||||
changed |= applyDeletes(toRemove, target);
|
||||
target.put(settingsBuilder.build());
|
||||
return changed;
|
||||
}
|
||||
|
||||
private final boolean applyDeletes(Set<String> deletes, Settings.Builder builder) {
|
||||
boolean changed = false;
|
||||
for (String entry : deletes) {
|
||||
Set<String> keysToRemove = new HashSet<>();
|
||||
Set<String> keySet = builder.internalMap().keySet();
|
||||
for (String key : keySet) {
|
||||
if (Regex.simpleMatch(entry, key)) {
|
||||
keysToRemove.add(key);
|
||||
}
|
||||
}
|
||||
for (String key : keysToRemove) {
|
||||
builder.remove(key);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -179,6 +179,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||
this.failures = failures;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FailedNodeException[] failures() {
|
||||
return failures;
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ public class GetAliasesRequest extends MasterNodeReadRequest<GetAliasesRequest>
|
|||
private String[] indices = Strings.EMPTY_ARRAY;
|
||||
private String[] aliases = Strings.EMPTY_ARRAY;
|
||||
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen();
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpand();
|
||||
|
||||
public GetAliasesRequest(String[] aliases) {
|
||||
this.aliases = aliases;
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.indices.refresh;
|
|||
import org.elasticsearch.action.ReplicationResponse;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.BasicReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
|
@ -37,7 +37,7 @@ import java.util.List;
|
|||
/**
|
||||
* Refresh action.
|
||||
*/
|
||||
public class TransportRefreshAction extends TransportBroadcastReplicationAction<RefreshRequest, RefreshResponse, ReplicationRequest, ReplicationResponse> {
|
||||
public class TransportRefreshAction extends TransportBroadcastReplicationAction<RefreshRequest, RefreshResponse, BasicReplicationRequest, ReplicationResponse> {
|
||||
|
||||
@Inject
|
||||
public TransportRefreshAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
|
@ -53,8 +53,8 @@ public class TransportRefreshAction extends TransportBroadcastReplicationAction<
|
|||
}
|
||||
|
||||
@Override
|
||||
protected ReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) {
|
||||
return new ReplicationRequest(shardId);
|
||||
protected BasicReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) {
|
||||
return new BasicReplicationRequest(shardId);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.action.admin.indices.refresh;
|
|||
|
||||
import org.elasticsearch.action.ReplicationResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.BasicReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
||||
|
@ -41,7 +41,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportShardRefreshAction extends TransportReplicationAction<ReplicationRequest, ReplicationRequest, ReplicationResponse> {
|
||||
public class TransportShardRefreshAction extends TransportReplicationAction<BasicReplicationRequest, BasicReplicationRequest, ReplicationResponse> {
|
||||
|
||||
public static final String NAME = RefreshAction.NAME + "[s]";
|
||||
|
||||
|
@ -51,7 +51,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
|
|||
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
|
||||
actionFilters, indexNameExpressionResolver, ReplicationRequest::new, ReplicationRequest::new, ThreadPool.Names.REFRESH);
|
||||
actionFilters, indexNameExpressionResolver, BasicReplicationRequest::new, BasicReplicationRequest::new, ThreadPool.Names.REFRESH);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -60,7 +60,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Tuple<ReplicationResponse, ReplicationRequest> shardOperationOnPrimary(MetaData metaData, ReplicationRequest shardRequest) throws Throwable {
|
||||
protected Tuple<ReplicationResponse, BasicReplicationRequest> shardOperationOnPrimary(MetaData metaData, BasicReplicationRequest shardRequest) throws Throwable {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
|
||||
indexShard.refresh("api");
|
||||
logger.trace("{} refresh request executed on primary", indexShard.shardId());
|
||||
|
@ -68,7 +68,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void shardOperationOnReplica(ReplicationRequest request) {
|
||||
protected void shardOperationOnReplica(BasicReplicationRequest request) {
|
||||
final ShardId shardId = request.shardId();
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
|
||||
indexShard.refresh("api");
|
||||
|
|
|
@ -62,7 +62,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
|
|||
if (globalBlock != null) {
|
||||
return globalBlock;
|
||||
}
|
||||
if (request.settings().getAsMap().size() == 1 && (request.settings().get(IndexMetaData.SETTING_BLOCKS_METADATA) != null || request.settings().get(IndexMetaData.SETTING_READ_ONLY) != null )) {
|
||||
if (request.settings().getAsMap().size() == 1 && IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.exists(request.settings()) || IndexMetaData.INDEX_READ_ONLY_SETTING.exists(request.settings())) {
|
||||
return null;
|
||||
}
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
|
|
|
@ -25,9 +25,11 @@ import org.elasticsearch.cluster.ClusterService;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -38,13 +40,15 @@ import org.elasticsearch.transport.TransportService;
|
|||
public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<PutIndexTemplateRequest, PutIndexTemplateResponse> {
|
||||
|
||||
private final MetaDataIndexTemplateService indexTemplateService;
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
|
||||
@Inject
|
||||
public TransportPutIndexTemplateAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, MetaDataIndexTemplateService indexTemplateService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndexScopedSettings indexScopedSettings) {
|
||||
super(settings, PutIndexTemplateAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutIndexTemplateRequest::new);
|
||||
this.indexTemplateService = indexTemplateService;
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -69,11 +73,13 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
|
|||
if (cause.length() == 0) {
|
||||
cause = "api";
|
||||
}
|
||||
|
||||
final Settings.Builder templateSettingsBuilder = Settings.settingsBuilder();
|
||||
templateSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
indexScopedSettings.validate(templateSettingsBuilder);
|
||||
indexTemplateService.putTemplate(new MetaDataIndexTemplateService.PutRequest(cause, request.name())
|
||||
.template(request.template())
|
||||
.order(request.order())
|
||||
.settings(request.settings())
|
||||
.settings(templateSettingsBuilder.build())
|
||||
.mappings(request.mappings())
|
||||
.aliases(request.aliases())
|
||||
.customs(request.customs())
|
||||
|
|
|
@ -179,7 +179,7 @@ public class BulkProcessor implements Closeable {
|
|||
|
||||
|
||||
private final ScheduledThreadPoolExecutor scheduler;
|
||||
private final ScheduledFuture scheduledFuture;
|
||||
private final ScheduledFuture<?> scheduledFuture;
|
||||
|
||||
private final AtomicLong executionIdGen = new AtomicLong();
|
||||
|
||||
|
@ -250,24 +250,24 @@ public class BulkProcessor implements Closeable {
|
|||
* (for example, if no id is provided, one will be generated, or usage of the create flag).
|
||||
*/
|
||||
public BulkProcessor add(IndexRequest request) {
|
||||
return add((ActionRequest) request);
|
||||
return add((ActionRequest<?>) request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an {@link DeleteRequest} to the list of actions to execute.
|
||||
*/
|
||||
public BulkProcessor add(DeleteRequest request) {
|
||||
return add((ActionRequest) request);
|
||||
return add((ActionRequest<?>) request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds either a delete or an index request.
|
||||
*/
|
||||
public BulkProcessor add(ActionRequest request) {
|
||||
public BulkProcessor add(ActionRequest<?> request) {
|
||||
return add(request, null);
|
||||
}
|
||||
|
||||
public BulkProcessor add(ActionRequest request, @Nullable Object payload) {
|
||||
public BulkProcessor add(ActionRequest<?> request, @Nullable Object payload) {
|
||||
internalAdd(request, payload);
|
||||
return this;
|
||||
}
|
||||
|
@ -282,7 +282,7 @@ public class BulkProcessor implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
private synchronized void internalAdd(ActionRequest request, @Nullable Object payload) {
|
||||
private synchronized void internalAdd(ActionRequest<?> request, @Nullable Object payload) {
|
||||
ensureOpen();
|
||||
bulkRequest.add(request, payload);
|
||||
executeIfNeeded();
|
||||
|
|
|
@ -56,7 +56,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
|
||||
private static final int REQUEST_OVERHEAD = 50;
|
||||
|
||||
final List<ActionRequest> requests = new ArrayList<>();
|
||||
final List<ActionRequest<?>> requests = new ArrayList<>();
|
||||
List<Object> payloads = null;
|
||||
|
||||
protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT;
|
||||
|
@ -71,14 +71,14 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
/**
|
||||
* Adds a list of requests to be executed. Either index or delete requests.
|
||||
*/
|
||||
public BulkRequest add(ActionRequest... requests) {
|
||||
for (ActionRequest request : requests) {
|
||||
public BulkRequest add(ActionRequest<?>... requests) {
|
||||
for (ActionRequest<?> request : requests) {
|
||||
add(request, null);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public BulkRequest add(ActionRequest request) {
|
||||
public BulkRequest add(ActionRequest<?> request) {
|
||||
return add(request, null);
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
* @param payload Optional payload
|
||||
* @return the current bulk request
|
||||
*/
|
||||
public BulkRequest add(ActionRequest request, @Nullable Object payload) {
|
||||
public BulkRequest add(ActionRequest<?> request, @Nullable Object payload) {
|
||||
if (request instanceof IndexRequest) {
|
||||
add((IndexRequest) request, payload);
|
||||
} else if (request instanceof DeleteRequest) {
|
||||
|
@ -104,8 +104,8 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
/**
|
||||
* Adds a list of requests to be executed. Either index or delete requests.
|
||||
*/
|
||||
public BulkRequest add(Iterable<ActionRequest> requests) {
|
||||
for (ActionRequest request : requests) {
|
||||
public BulkRequest add(Iterable<ActionRequest<?>> requests) {
|
||||
for (ActionRequest<?> request : requests) {
|
||||
add(request);
|
||||
}
|
||||
return this;
|
||||
|
@ -188,15 +188,14 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
/**
|
||||
* The list of requests in this bulk request.
|
||||
*/
|
||||
public List<ActionRequest> requests() {
|
||||
public List<ActionRequest<?>> requests() {
|
||||
return this.requests;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public List<? extends IndicesRequest> subRequests() {
|
||||
List<IndicesRequest> indicesRequests = new ArrayList<>();
|
||||
for (ActionRequest request : requests) {
|
||||
for (ActionRequest<?> request : requests) {
|
||||
assert request instanceof IndicesRequest;
|
||||
indicesRequests.add((IndicesRequest) request);
|
||||
}
|
||||
|
@ -478,7 +477,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
if (requests.isEmpty()) {
|
||||
validationException = addValidationError("no requests added", validationException);
|
||||
}
|
||||
for (ActionRequest request : requests) {
|
||||
for (ActionRequest<?> request : requests) {
|
||||
// We first check if refresh has been set
|
||||
if ((request instanceof DeleteRequest && ((DeleteRequest)request).refresh()) ||
|
||||
(request instanceof UpdateRequest && ((UpdateRequest)request).refresh()) ||
|
||||
|
@ -527,7 +526,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
super.writeTo(out);
|
||||
out.writeByte(consistencyLevel.id());
|
||||
out.writeVInt(requests.size());
|
||||
for (ActionRequest request : requests) {
|
||||
for (ActionRequest<?> request : requests) {
|
||||
if (request instanceof IndexRequest) {
|
||||
out.writeByte((byte) 0);
|
||||
} else if (request instanceof DeleteRequest) {
|
||||
|
|
|
@ -52,10 +52,6 @@ public class PercolateShardRequest extends BroadcastShardRequest {
|
|||
this.startTime = request.startTime;
|
||||
}
|
||||
|
||||
public PercolateShardRequest(ShardId shardId, OriginalIndices originalIndices) {
|
||||
super(shardId, originalIndices);
|
||||
}
|
||||
|
||||
PercolateShardRequest(ShardId shardId, PercolateRequest request) {
|
||||
super(shardId, request);
|
||||
this.documentType = request.documentType();
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.ElasticsearchException;
|
|||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.action.support.single.shard.SingleShardRequest;
|
||||
|
@ -109,7 +108,7 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
|
|||
}
|
||||
|
||||
|
||||
public static class Request extends SingleShardRequest implements IndicesRequest {
|
||||
public static class Request extends SingleShardRequest<Request> implements IndicesRequest {
|
||||
|
||||
private int shardId;
|
||||
private String preference;
|
||||
|
@ -160,12 +159,8 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
|
|||
items = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
int slot = in.readVInt();
|
||||
OriginalIndices originalIndices = OriginalIndices.readOriginalIndices(in);
|
||||
PercolateShardRequest shardRequest = new PercolateShardRequest(new ShardId(index, shardId), originalIndices);
|
||||
shardRequest.documentType(in.readString());
|
||||
shardRequest.source(in.readBytesReference());
|
||||
shardRequest.docSource(in.readBytesReference());
|
||||
shardRequest.onlyCount(in.readBoolean());
|
||||
PercolateShardRequest shardRequest = new PercolateShardRequest();
|
||||
shardRequest.readFrom(in);
|
||||
Item item = new Item(slot, shardRequest);
|
||||
items.add(item);
|
||||
}
|
||||
|
@ -179,11 +174,7 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
|
|||
out.writeVInt(items.size());
|
||||
for (Item item : items) {
|
||||
out.writeVInt(item.slot);
|
||||
OriginalIndices.writeOriginalIndices(item.request.originalIndices(), out);
|
||||
out.writeString(item.request.documentType());
|
||||
out.writeBytesReference(item.request.source());
|
||||
out.writeBytesReference(item.request.docSource());
|
||||
out.writeBoolean(item.request.onlyCount());
|
||||
item.request.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -245,7 +236,7 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
|
|||
shardResponse.readFrom(in);
|
||||
items.add(new Item(slot, shardResponse));
|
||||
} else {
|
||||
items.add(new Item(slot, (Throwable)in.readThrowable()));
|
||||
items.add(new Item(slot, in.readThrowable()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -421,7 +421,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
* @return this for chaining
|
||||
*/
|
||||
public SearchRequestBuilder addRescorer(RescoreBuilder.Rescorer rescorer) {
|
||||
sourceBuilder().addRescorer(new RescoreBuilder().rescorer(rescorer));
|
||||
sourceBuilder().addRescorer(new RescoreBuilder(rescorer));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -433,7 +433,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
* @return this for chaining
|
||||
*/
|
||||
public SearchRequestBuilder addRescorer(RescoreBuilder.Rescorer rescorer, int window) {
|
||||
sourceBuilder().addRescorer(new RescoreBuilder().rescorer(rescorer).windowSize(window));
|
||||
sourceBuilder().addRescorer(new RescoreBuilder(rescorer).windowSize(window));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -40,13 +40,13 @@ public interface ActionFilter {
|
|||
* Enables filtering the execution of an action on the request side, either by sending a response through the
|
||||
* {@link ActionListener} or by continuing the execution through the given {@link ActionFilterChain chain}
|
||||
*/
|
||||
void apply(Task task, String action, ActionRequest request, ActionListener listener, ActionFilterChain chain);
|
||||
void apply(Task task, String action, ActionRequest<?> request, ActionListener<?> listener, ActionFilterChain chain);
|
||||
|
||||
/**
|
||||
* Enables filtering the execution of an action on the response side, either by sending a response through the
|
||||
* {@link ActionListener} or by continuing the execution through the given {@link ActionFilterChain chain}
|
||||
*/
|
||||
void apply(String action, ActionResponse response, ActionListener listener, ActionFilterChain chain);
|
||||
void apply(String action, ActionResponse response, ActionListener<?> listener, ActionFilterChain chain);
|
||||
|
||||
/**
|
||||
* A simple base class for injectable action filters that spares the implementation from handling the
|
||||
|
@ -60,7 +60,7 @@ public interface ActionFilter {
|
|||
}
|
||||
|
||||
@Override
|
||||
public final void apply(Task task, String action, ActionRequest request, ActionListener listener, ActionFilterChain chain) {
|
||||
public final void apply(Task task, String action, ActionRequest<?> request, ActionListener<?> listener, ActionFilterChain chain) {
|
||||
if (apply(action, request, listener)) {
|
||||
chain.proceed(task, action, request, listener);
|
||||
}
|
||||
|
@ -70,10 +70,10 @@ public interface ActionFilter {
|
|||
* Applies this filter and returns {@code true} if the execution chain should proceed, or {@code false}
|
||||
* if it should be aborted since the filter already handled the request and called the given listener.
|
||||
*/
|
||||
protected abstract boolean apply(String action, ActionRequest request, ActionListener listener);
|
||||
protected abstract boolean apply(String action, ActionRequest<?> request, ActionListener<?> listener);
|
||||
|
||||
@Override
|
||||
public final void apply(String action, ActionResponse response, ActionListener listener, ActionFilterChain chain) {
|
||||
public final void apply(String action, ActionResponse response, ActionListener<?> listener, ActionFilterChain chain) {
|
||||
if (apply(action, response, listener)) {
|
||||
chain.proceed(action, response, listener);
|
||||
}
|
||||
|
@ -83,6 +83,6 @@ public interface ActionFilter {
|
|||
* Applies this filter and returns {@code true} if the execution chain should proceed, or {@code false}
|
||||
* if it should be aborted since the filter already handled the response by calling the given listener.
|
||||
*/
|
||||
protected abstract boolean apply(String action, ActionResponse response, ActionListener listener);
|
||||
protected abstract boolean apply(String action, ActionResponse response, ActionListener<?> listener);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ public final class AutoCreateIndex {
|
|||
@Inject
|
||||
public AutoCreateIndex(Settings settings, IndexNameExpressionResolver resolver) {
|
||||
this.resolver = resolver;
|
||||
dynamicMappingDisabled = !settings.getAsBoolean(MapperService.INDEX_MAPPER_DYNAMIC_SETTING, MapperService.INDEX_MAPPER_DYNAMIC_DEFAULT);
|
||||
dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings);
|
||||
String value = settings.get("action.auto_create_index");
|
||||
if (value == null || Booleans.isExplicitTrue(value)) {
|
||||
needToCheck = true;
|
||||
|
|
|
@ -34,8 +34,8 @@ import java.util.function.Supplier;
|
|||
/**
|
||||
* A TransportAction that self registers a handler into the transport service
|
||||
*/
|
||||
public abstract class HandledTransportAction<Request extends ActionRequest, Response extends ActionResponse> extends TransportAction<Request,Response>{
|
||||
|
||||
public abstract class HandledTransportAction<Request extends ActionRequest<Request>, Response extends ActionResponse>
|
||||
extends TransportAction<Request, Response> {
|
||||
protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request) {
|
||||
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager());
|
||||
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new TransportHandler());
|
||||
|
|
|
@ -40,7 +40,7 @@ import static org.elasticsearch.action.support.PlainActionFuture.newFuture;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class TransportAction<Request extends ActionRequest, Response extends ActionResponse> extends AbstractComponent {
|
||||
public abstract class TransportAction<Request extends ActionRequest<Request>, Response extends ActionResponse> extends AbstractComponent {
|
||||
|
||||
protected final ThreadPool threadPool;
|
||||
protected final String actionName;
|
||||
|
@ -66,7 +66,7 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
|
|||
return future;
|
||||
}
|
||||
|
||||
public final void execute(Request request, ActionListener<Response> listener) {
|
||||
public final Task execute(Request request, ActionListener<Response> listener) {
|
||||
Task task = taskManager.register("transport", actionName, request);
|
||||
if (task == null) {
|
||||
execute(null, request, listener);
|
||||
|
@ -85,6 +85,7 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
|
|||
}
|
||||
});
|
||||
}
|
||||
return task;
|
||||
}
|
||||
|
||||
private final void execute(Task task, Request request, ActionListener<Response> listener) {
|
||||
|
@ -114,7 +115,7 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
|
|||
|
||||
protected abstract void doExecute(Request request, ActionListener<Response> listener);
|
||||
|
||||
private static class RequestFilterChain<Request extends ActionRequest, Response extends ActionResponse> implements ActionFilterChain {
|
||||
private static class RequestFilterChain<Request extends ActionRequest<Request>, Response extends ActionResponse> implements ActionFilterChain {
|
||||
|
||||
private final TransportAction<Request, Response> action;
|
||||
private final AtomicInteger index = new AtomicInteger();
|
||||
|
|
|
@ -31,7 +31,7 @@ import java.io.IOException;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public class BroadcastRequest<T extends BroadcastRequest> extends ActionRequest<T> implements IndicesRequest.Replaceable {
|
||||
public class BroadcastRequest<Request extends BroadcastRequest<Request>> extends ActionRequest<Request> implements IndicesRequest.Replaceable {
|
||||
|
||||
protected String[] indices;
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed();
|
||||
|
@ -50,9 +50,9 @@ public class BroadcastRequest<T extends BroadcastRequest> extends ActionRequest<
|
|||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public final T indices(String... indices) {
|
||||
public final Request indices(String... indices) {
|
||||
this.indices = indices;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -66,9 +66,9 @@ public class BroadcastRequest<T extends BroadcastRequest> extends ActionRequest<
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T indicesOptions(IndicesOptions indicesOptions) {
|
||||
public final Request indicesOptions(IndicesOptions indicesOptions) {
|
||||
this.indicesOptions = indicesOptions;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -49,7 +49,7 @@ import java.util.function.Supplier;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class TransportBroadcastAction<Request extends BroadcastRequest, Response extends BroadcastResponse, ShardRequest extends BroadcastShardRequest, ShardResponse extends BroadcastShardResponse>
|
||||
public abstract class TransportBroadcastAction<Request extends BroadcastRequest<Request>, Response extends BroadcastResponse, ShardRequest extends BroadcastShardRequest, ShardResponse extends BroadcastShardResponse>
|
||||
extends HandledTransportAction<Request, Response> {
|
||||
|
||||
protected final ClusterService clusterService;
|
||||
|
|
|
@ -74,7 +74,7 @@ import java.util.function.Supplier;
|
|||
* @param <Response> the response to the client request
|
||||
* @param <ShardOperationResult> per-shard operation results
|
||||
*/
|
||||
public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRequest,
|
||||
public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRequest<Request>,
|
||||
Response extends BroadcastResponse,
|
||||
ShardOperationResult extends Streamable> extends HandledTransportAction<Request, Response> {
|
||||
|
||||
|
@ -446,10 +446,12 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
return nodeId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] indices() {
|
||||
return indicesLevelRequest.indices();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndicesOptions indicesOptions() {
|
||||
return indicesLevelRequest.indicesOptions();
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
|||
* Abstract class that allows to mark action requests that support acknowledgements.
|
||||
* Facilitates consistency across different api.
|
||||
*/
|
||||
public abstract class AcknowledgedRequest<T extends MasterNodeRequest> extends MasterNodeRequest<T> implements AckedRequest {
|
||||
public abstract class AcknowledgedRequest<Request extends MasterNodeRequest<Request>> extends MasterNodeRequest<Request> implements AckedRequest {
|
||||
|
||||
public static final TimeValue DEFAULT_ACK_TIMEOUT = timeValueSeconds(30);
|
||||
|
||||
|
@ -48,9 +48,9 @@ public abstract class AcknowledgedRequest<T extends MasterNodeRequest> extends M
|
|||
* @return the request itself
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T timeout(String timeout) {
|
||||
public final Request timeout(String timeout) {
|
||||
this.timeout = TimeValue.parseTimeValue(timeout, this.timeout, getClass().getSimpleName() + ".timeout");
|
||||
return (T)this;
|
||||
return (Request)this;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -59,9 +59,9 @@ public abstract class AcknowledgedRequest<T extends MasterNodeRequest> extends M
|
|||
* @return the request itself
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T timeout(TimeValue timeout) {
|
||||
public final Request timeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -27,14 +27,14 @@ import java.io.IOException;
|
|||
/**
|
||||
* Base request for master based read operations that allows to read the cluster state from the local node if needed
|
||||
*/
|
||||
public abstract class MasterNodeReadRequest<T extends MasterNodeReadRequest> extends MasterNodeRequest<T> {
|
||||
public abstract class MasterNodeReadRequest<Request extends MasterNodeReadRequest<Request>> extends MasterNodeRequest<Request> {
|
||||
|
||||
protected boolean local = false;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T local(boolean local) {
|
||||
public final Request local(boolean local) {
|
||||
this.local = local;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
public final boolean local() {
|
||||
|
|
|
@ -29,7 +29,7 @@ import java.io.IOException;
|
|||
/**
|
||||
* A based request for master based operation.
|
||||
*/
|
||||
public abstract class MasterNodeRequest<T extends MasterNodeRequest> extends ActionRequest<T> {
|
||||
public abstract class MasterNodeRequest<Request extends MasterNodeRequest<Request>> extends ActionRequest<Request> {
|
||||
|
||||
public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30);
|
||||
|
||||
|
@ -42,15 +42,15 @@ public abstract class MasterNodeRequest<T extends MasterNodeRequest> extends Act
|
|||
* A timeout value in case the master has not been discovered yet or disconnected.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T masterNodeTimeout(TimeValue timeout) {
|
||||
public final Request masterNodeTimeout(TimeValue timeout) {
|
||||
this.masterNodeTimeout = timeout;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A timeout value in case the master has not been discovered yet or disconnected.
|
||||
*/
|
||||
public final T masterNodeTimeout(String timeout) {
|
||||
public final Request masterNodeTimeout(String timeout) {
|
||||
return masterNodeTimeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".masterNodeTimeout"));
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ import java.util.function.Supplier;
|
|||
/**
|
||||
* A base class for operations that needs to be performed on the master node.
|
||||
*/
|
||||
public abstract class TransportMasterNodeAction<Request extends MasterNodeRequest, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
|
||||
public abstract class TransportMasterNodeAction<Request extends MasterNodeRequest<Request>, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
|
||||
protected final TransportService transportService;
|
||||
protected final ClusterService clusterService;
|
||||
|
||||
|
|
|
@ -33,7 +33,8 @@ import java.util.function.Supplier;
|
|||
* A base class for read operations that needs to be performed on the master node.
|
||||
* Can also be executed on the local node if needed.
|
||||
*/
|
||||
public abstract class TransportMasterNodeReadAction<Request extends MasterNodeReadRequest, Response extends ActionResponse> extends TransportMasterNodeAction<Request, Response> {
|
||||
public abstract class TransportMasterNodeReadAction<Request extends MasterNodeReadRequest<Request>, Response extends ActionResponse>
|
||||
extends TransportMasterNodeAction<Request, Response> {
|
||||
|
||||
public static final String FORCE_LOCAL_SETTING = "action.master.force_local";
|
||||
|
||||
|
|
|
@ -30,30 +30,30 @@ import java.io.IOException;
|
|||
|
||||
/**
|
||||
*/
|
||||
public abstract class ClusterInfoRequest<T extends ClusterInfoRequest> extends MasterNodeReadRequest<T> implements IndicesRequest.Replaceable {
|
||||
public abstract class ClusterInfoRequest<Request extends ClusterInfoRequest<Request>> extends MasterNodeReadRequest<Request> implements IndicesRequest.Replaceable {
|
||||
|
||||
private String[] indices = Strings.EMPTY_ARRAY;
|
||||
private String[] types = Strings.EMPTY_ARRAY;
|
||||
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen();
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public T indices(String... indices) {
|
||||
@SuppressWarnings("unchecked")
|
||||
public Request indices(String... indices) {
|
||||
this.indices = indices;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public T types(String... types) {
|
||||
public Request types(String... types) {
|
||||
this.types = types;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public T indicesOptions(IndicesOptions indicesOptions) {
|
||||
public Request indicesOptions(IndicesOptions indicesOptions) {
|
||||
this.indicesOptions = indicesOptions;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -33,7 +33,8 @@ import java.util.function.Supplier;
|
|||
|
||||
/**
|
||||
*/
|
||||
public abstract class TransportClusterInfoAction<Request extends ClusterInfoRequest, Response extends ActionResponse> extends TransportMasterNodeReadAction<Request, Response> {
|
||||
public abstract class TransportClusterInfoAction<Request extends ClusterInfoRequest<Request>, Response extends ActionResponse>
|
||||
extends TransportMasterNodeReadAction<Request, Response> {
|
||||
|
||||
public TransportClusterInfoAction(Settings settings, String actionName, TransportService transportService,
|
||||
ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
|
|
|
@ -31,7 +31,7 @@ import java.io.IOException;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class BaseNodesRequest<T extends BaseNodesRequest> extends ActionRequest<T> {
|
||||
public abstract class BaseNodesRequest<Request extends BaseNodesRequest<Request>> extends ActionRequest<Request> {
|
||||
|
||||
public static String[] ALL_NODES = Strings.EMPTY_ARRAY;
|
||||
|
||||
|
@ -52,9 +52,9 @@ public abstract class BaseNodesRequest<T extends BaseNodesRequest> extends Actio
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T nodesIds(String... nodesIds) {
|
||||
public final Request nodesIds(String... nodesIds) {
|
||||
this.nodesIds = nodesIds;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
public TimeValue timeout() {
|
||||
|
@ -62,15 +62,15 @@ public abstract class BaseNodesRequest<T extends BaseNodesRequest> extends Actio
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T timeout(TimeValue timeout) {
|
||||
public final Request timeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T timeout(String timeout) {
|
||||
public final Request timeout(String timeout) {
|
||||
this.timeout = TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout");
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -50,7 +50,7 @@ import java.util.function.Supplier;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest, NodesResponse extends BaseNodesResponse, NodeRequest extends BaseNodeRequest, NodeResponse extends BaseNodeResponse> extends HandledTransportAction<NodesRequest, NodesResponse> {
|
||||
public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest<NodesRequest>, NodesResponse extends BaseNodesResponse, NodeRequest extends BaseNodeRequest, NodeResponse extends BaseNodeResponse> extends HandledTransportAction<NodesRequest, NodesResponse> {
|
||||
|
||||
protected final ClusterName clusterName;
|
||||
protected final ClusterService clusterService;
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
/**
|
||||
* A replication request that has no more information than ReplicationRequest.
|
||||
* Unfortunately ReplicationRequest can't be declared as a type parameter
|
||||
* because it has a self referential type parameter of its own. So use this
|
||||
* instead.
|
||||
*/
|
||||
public class BasicReplicationRequest extends ReplicationRequest<BasicReplicationRequest> {
|
||||
public BasicReplicationRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new request with resolved shard id
|
||||
*/
|
||||
public BasicReplicationRequest(ShardId shardId) {
|
||||
super(shardId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy constructor that creates a new request that is a copy of the one
|
||||
* provided as an argument.
|
||||
*/
|
||||
protected BasicReplicationRequest(BasicReplicationRequest request) {
|
||||
super(request);
|
||||
}
|
||||
|
||||
}
|
|
@ -38,7 +38,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequest<T> implements IndicesRequest {
|
||||
public abstract class ReplicationRequest<Request extends ReplicationRequest<Request>> extends ActionRequest<Request> implements IndicesRequest {
|
||||
|
||||
public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);
|
||||
|
||||
|
@ -71,7 +71,7 @@ public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequ
|
|||
* Copy constructor that creates a new request that is a copy of the one provided as an argument.
|
||||
* The new request will inherit though headers and context from the original request that caused it.
|
||||
*/
|
||||
protected ReplicationRequest(T request) {
|
||||
protected ReplicationRequest(Request request) {
|
||||
this.timeout = request.timeout();
|
||||
this.index = request.index();
|
||||
this.consistencyLevel = request.consistencyLevel();
|
||||
|
@ -81,15 +81,15 @@ public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequ
|
|||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T timeout(TimeValue timeout) {
|
||||
public final Request timeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
*/
|
||||
public final T timeout(String timeout) {
|
||||
public final Request timeout(String timeout) {
|
||||
return timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout"));
|
||||
}
|
||||
|
||||
|
@ -102,9 +102,9 @@ public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequ
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T index(String index) {
|
||||
public final Request index(String index) {
|
||||
this.index = index;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -135,9 +135,9 @@ public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequ
|
|||
* Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T consistencyLevel(WriteConsistencyLevel consistencyLevel) {
|
||||
public final Request consistencyLevel(WriteConsistencyLevel consistencyLevel) {
|
||||
this.consistencyLevel = consistencyLevel;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -180,9 +180,10 @@ public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequ
|
|||
* Sets the target shard id for the request. The shard id is set when a
|
||||
* index/delete request is resolved by the transport action
|
||||
*/
|
||||
public T setShardId(ShardId shardId) {
|
||||
@SuppressWarnings("unchecked")
|
||||
public Request setShardId(ShardId shardId) {
|
||||
this.shardId = shardId;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ReplicationResponse;
|
||||
|
@ -52,7 +53,8 @@ import java.util.function.Supplier;
|
|||
* Base class for requests that should be executed on all shards of an index or several indices.
|
||||
* This action sends shard requests to all primary shards of the indices and they are then replicated like write requests
|
||||
*/
|
||||
public abstract class TransportBroadcastReplicationAction<Request extends BroadcastRequest, Response extends BroadcastResponse, ShardRequest extends ReplicationRequest, ShardResponse extends ReplicationResponse> extends HandledTransportAction<Request, Response> {
|
||||
public abstract class TransportBroadcastReplicationAction<Request extends BroadcastRequest<Request>, Response extends BroadcastResponse, ShardRequest extends ReplicationRequest<ShardRequest>, ShardResponse extends ReplicationResponse>
|
||||
extends HandledTransportAction<Request, Response> {
|
||||
|
||||
private final TransportReplicationAction replicatedBroadcastShardAction;
|
||||
private final ClusterService clusterService;
|
||||
|
|
|
@ -65,7 +65,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
||||
import org.elasticsearch.transport.ReceiveTimeoutTransportException;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportChannelResponseHandler;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
|
@ -77,6 +76,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
@ -91,9 +91,7 @@ import java.util.function.Supplier;
|
|||
* primary node to validate request before primary operation followed by sampling state again for resolving
|
||||
* nodes with replica copies to perform replication.
|
||||
*/
|
||||
public abstract class TransportReplicationAction<Request extends ReplicationRequest, ReplicaRequest extends ReplicationRequest, Response extends ReplicationResponse> extends TransportAction<Request, Response> {
|
||||
|
||||
public static final String SHARD_FAILURE_TIMEOUT = "action.support.replication.shard.failure_timeout";
|
||||
public abstract class TransportReplicationAction<Request extends ReplicationRequest<Request>, ReplicaRequest extends ReplicationRequest<ReplicaRequest>, Response extends ReplicationResponse> extends TransportAction<Request, Response> {
|
||||
|
||||
protected final TransportService transportService;
|
||||
protected final ClusterService clusterService;
|
||||
|
@ -102,7 +100,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
protected final WriteConsistencyLevel defaultWriteConsistencyLevel;
|
||||
protected final TransportRequestOptions transportOptions;
|
||||
protected final MappingUpdatedAction mappingUpdatedAction;
|
||||
private final TimeValue shardFailedTimeout;
|
||||
|
||||
final String transportReplicaAction;
|
||||
final String transportPrimaryAction;
|
||||
|
@ -134,8 +131,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
this.transportOptions = transportOptions();
|
||||
|
||||
this.defaultWriteConsistencyLevel = WriteConsistencyLevel.fromString(settings.get("action.write_consistency", "quorum"));
|
||||
// TODO: set a default timeout
|
||||
shardFailedTimeout = settings.getAsTime(SHARD_FAILURE_TIMEOUT, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -616,7 +611,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version());
|
||||
}
|
||||
replicationPhase = new ReplicationPhase(primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference, shardFailedTimeout);
|
||||
replicationPhase = new ReplicationPhase(primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
|
||||
} catch (Throwable e) {
|
||||
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
|
@ -740,15 +735,13 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
private final AtomicInteger pending;
|
||||
private final int totalShards;
|
||||
private final Releasable indexShardReference;
|
||||
private final TimeValue shardFailedTimeout;
|
||||
|
||||
public ReplicationPhase(ReplicaRequest replicaRequest, Response finalResponse, ShardId shardId,
|
||||
TransportChannel channel, Releasable indexShardReference, TimeValue shardFailedTimeout) {
|
||||
TransportChannel channel, Releasable indexShardReference) {
|
||||
this.replicaRequest = replicaRequest;
|
||||
this.channel = channel;
|
||||
this.finalResponse = finalResponse;
|
||||
this.indexShardReference = indexShardReference;
|
||||
this.shardFailedTimeout = shardFailedTimeout;
|
||||
this.shardId = shardId;
|
||||
|
||||
// we have to get a new state after successfully indexing into the primary in order to honour recovery semantics.
|
||||
|
@ -890,15 +883,32 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
if (ignoreReplicaException(exp)) {
|
||||
onReplicaFailure(nodeId, exp);
|
||||
} else {
|
||||
logger.warn("{} failed to perform {} on node {}", exp, shardId, transportReplicaAction, node);
|
||||
shardStateAction.shardFailed(clusterService.state(), shard, indexUUID, "failed to perform " + transportReplicaAction + " on replica on node " + node, exp, shardFailedTimeout, new ReplicationFailedShardStateListener(nodeId, exp));
|
||||
String message = String.format(Locale.ROOT, "failed to perform %s on replica on node %s", transportReplicaAction, node);
|
||||
logger.warn("{} {}", exp, shardId, message);
|
||||
shardStateAction.shardFailed(
|
||||
shard,
|
||||
indexUUID,
|
||||
message,
|
||||
exp,
|
||||
new ShardStateAction.Listener() {
|
||||
@Override
|
||||
public void onSuccess() {
|
||||
onReplicaFailure(nodeId, exp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
// TODO: handle catastrophic non-channel failures
|
||||
onReplicaFailure(nodeId, exp);
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
void onReplicaFailure(String nodeId, @Nullable Throwable e) {
|
||||
// Only version conflict should be ignored from being put into the _shards header?
|
||||
if (e != null && ignoreReplicaException(e) == false) {
|
||||
|
@ -963,34 +973,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
public class ReplicationFailedShardStateListener implements ShardStateAction.Listener {
|
||||
private final String nodeId;
|
||||
private Throwable failure;
|
||||
|
||||
public ReplicationFailedShardStateListener(String nodeId, Throwable failure) {
|
||||
this.nodeId = nodeId;
|
||||
this.failure = failure;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onSuccess() {
|
||||
onReplicaFailure(nodeId, failure);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onShardFailedNoMaster() {
|
||||
onReplicaFailure(nodeId, failure);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onShardFailedFailure(DiscoveryNode master, TransportException e) {
|
||||
if (e instanceof ReceiveTimeoutTransportException) {
|
||||
logger.trace("timeout sending shard failure to master [{}]", e, master);
|
||||
}
|
||||
onReplicaFailure(nodeId, failure);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -34,7 +34,8 @@ import java.util.concurrent.TimeUnit;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class InstanceShardOperationRequest<T extends InstanceShardOperationRequest> extends ActionRequest<T> implements IndicesRequest {
|
||||
public abstract class InstanceShardOperationRequest<Request extends InstanceShardOperationRequest<Request>> extends ActionRequest<Request>
|
||||
implements IndicesRequest {
|
||||
|
||||
public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);
|
||||
|
||||
|
@ -77,9 +78,9 @@ public abstract class InstanceShardOperationRequest<T extends InstanceShardOpera
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T index(String index) {
|
||||
public final Request index(String index) {
|
||||
this.index = index;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
public TimeValue timeout() {
|
||||
|
@ -90,15 +91,15 @@ public abstract class InstanceShardOperationRequest<T extends InstanceShardOpera
|
|||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T timeout(TimeValue timeout) {
|
||||
public final Request timeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
*/
|
||||
public final T timeout(String timeout) {
|
||||
public final Request timeout(String timeout) {
|
||||
return timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout"));
|
||||
}
|
||||
|
||||
|
|
|
@ -54,8 +54,8 @@ import java.util.function.Supplier;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class TransportInstanceSingleOperationAction<Request extends InstanceShardOperationRequest, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
|
||||
|
||||
public abstract class TransportInstanceSingleOperationAction<Request extends InstanceShardOperationRequest<Request>, Response extends ActionResponse>
|
||||
extends HandledTransportAction<Request, Response> {
|
||||
protected final ClusterService clusterService;
|
||||
protected final TransportService transportService;
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ import java.io.IOException;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class SingleShardRequest<T extends SingleShardRequest> extends ActionRequest<T> implements IndicesRequest {
|
||||
public abstract class SingleShardRequest<Request extends SingleShardRequest<Request>> extends ActionRequest<Request> implements IndicesRequest {
|
||||
|
||||
public static final IndicesOptions INDICES_OPTIONS = IndicesOptions.strictSingleIndexNoExpandForbidClosed();
|
||||
|
||||
|
@ -52,7 +52,7 @@ public abstract class SingleShardRequest<T extends SingleShardRequest> extends A
|
|||
public SingleShardRequest() {
|
||||
}
|
||||
|
||||
public SingleShardRequest(String index) {
|
||||
protected SingleShardRequest(String index) {
|
||||
this.index = index;
|
||||
}
|
||||
|
||||
|
@ -82,9 +82,9 @@ public abstract class SingleShardRequest<T extends SingleShardRequest> extends A
|
|||
* Sets the index.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T index(String index) {
|
||||
public final Request index(String index) {
|
||||
this.index = index;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -108,9 +108,9 @@ public abstract class SingleShardRequest<T extends SingleShardRequest> extends A
|
|||
* Controls if the operation will be executed on a separate thread when executed locally.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T operationThreaded(boolean threadedOperation) {
|
||||
public final Request operationThreaded(boolean threadedOperation) {
|
||||
this.threadedOperation = threadedOperation;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -54,7 +54,7 @@ import static org.elasticsearch.action.support.TransportActions.isShardNotAvaila
|
|||
* the read operation can be performed on other shard copies. Concrete implementations can provide their own list
|
||||
* of candidate shards to try the read operation on.
|
||||
*/
|
||||
public abstract class TransportSingleShardAction<Request extends SingleShardRequest, Response extends ActionResponse> extends TransportAction<Request, Response> {
|
||||
public abstract class TransportSingleShardAction<Request extends SingleShardRequest<Request>, Response extends ActionResponse> extends TransportAction<Request, Response> {
|
||||
|
||||
protected final ClusterService clusterService;
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ import java.io.IOException;
|
|||
/**
|
||||
* A base class for task requests
|
||||
*/
|
||||
public class BaseTasksRequest<T extends BaseTasksRequest> extends ActionRequest<T> {
|
||||
public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends ActionRequest<Request> {
|
||||
|
||||
|
||||
public static final String[] ALL_ACTIONS = Strings.EMPTY_ARRAY;
|
||||
|
@ -73,9 +73,9 @@ public class BaseTasksRequest<T extends BaseTasksRequest> extends ActionRequest<
|
|||
* Sets the list of action masks for the actions that should be returned
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T actions(String... actions) {
|
||||
public final Request actions(String... actions) {
|
||||
this.actions = actions;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -90,9 +90,9 @@ public class BaseTasksRequest<T extends BaseTasksRequest> extends ActionRequest<
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T nodesIds(String... nodesIds) {
|
||||
public final Request nodesIds(String... nodesIds) {
|
||||
this.nodesIds = nodesIds;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -103,9 +103,9 @@ public class BaseTasksRequest<T extends BaseTasksRequest> extends ActionRequest<
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public T parentNode(String parentNode) {
|
||||
public Request parentNode(String parentNode) {
|
||||
this.parentNode = parentNode;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -116,9 +116,9 @@ public class BaseTasksRequest<T extends BaseTasksRequest> extends ActionRequest<
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public T parentTaskId(long parentTaskId) {
|
||||
public Request parentTaskId(long parentTaskId) {
|
||||
this.parentTaskId = parentTaskId;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
|
||||
|
@ -127,15 +127,15 @@ public class BaseTasksRequest<T extends BaseTasksRequest> extends ActionRequest<
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T timeout(TimeValue timeout) {
|
||||
public final Request timeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T timeout(String timeout) {
|
||||
public final Request timeout(String timeout) {
|
||||
this.timeout = TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout");
|
||||
return (T) this;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -40,7 +40,8 @@ public interface ElasticsearchClient {
|
|||
* @param <RequestBuilder> The request builder type.
|
||||
* @return A future allowing to get back the response.
|
||||
*/
|
||||
<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(final Action<Request, Response, RequestBuilder> action, final Request request);
|
||||
<Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
|
||||
final Action<Request, Response, RequestBuilder> action, final Request request);
|
||||
|
||||
/**
|
||||
* Executes a generic action, denoted by an {@link Action}.
|
||||
|
@ -52,7 +53,8 @@ public interface ElasticsearchClient {
|
|||
* @param <Response> The response type.
|
||||
* @param <RequestBuilder> The request builder type.
|
||||
*/
|
||||
<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
|
||||
<Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
|
||||
final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
|
||||
|
||||
/**
|
||||
* Prepares a request builder to execute, specified by {@link Action}.
|
||||
|
@ -63,7 +65,8 @@ public interface ElasticsearchClient {
|
|||
* @param <RequestBuilder> The request builder.
|
||||
* @return The request builder, that can, at a later stage, execute the request.
|
||||
*/
|
||||
<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(final Action<Request, Response, RequestBuilder> action);
|
||||
<Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
|
||||
final Action<Request, Response, RequestBuilder> action);
|
||||
|
||||
/**
|
||||
* Returns the threadpool used to execute requests on this client
|
||||
|
|
|
@ -52,7 +52,8 @@ public abstract class FilterClient extends AbstractClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
protected <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
|
||||
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
in().execute(action, request, listener);
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,8 @@ public class NodeClient extends AbstractClient {
|
|||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
|
||||
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
TransportAction<Request, Response> transportAction = actions.get(action);
|
||||
if (transportAction == null) {
|
||||
throw new IllegalStateException("failed to find action [" + action + "] to execute");
|
||||
|
|
|
@ -359,12 +359,14 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
}
|
||||
|
||||
@Override
|
||||
public final <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(final Action<Request, Response, RequestBuilder> action) {
|
||||
public final <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
|
||||
final Action<Request, Response, RequestBuilder> action) {
|
||||
return action.newRequestBuilder(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(Action<Request, Response, RequestBuilder> action, Request request) {
|
||||
public final <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
|
||||
Action<Request, Response, RequestBuilder> action, Request request) {
|
||||
PlainActionFuture<Response> actionFuture = PlainActionFuture.newFuture();
|
||||
execute(action, request, actionFuture);
|
||||
return actionFuture;
|
||||
|
@ -374,12 +376,13 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
* This is the single execution point of *all* clients.
|
||||
*/
|
||||
@Override
|
||||
public final <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
public final <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
|
||||
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
listener = threadedWrapper.wrap(listener);
|
||||
doExecute(action, request, listener);
|
||||
}
|
||||
|
||||
protected abstract <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
|
||||
protected abstract <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
|
||||
|
||||
@Override
|
||||
public ActionFuture<IndexResponse> index(final IndexRequest request) {
|
||||
|
@ -816,17 +819,20 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
}
|
||||
|
||||
@Override
|
||||
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(Action<Request, Response, RequestBuilder> action, Request request) {
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
|
||||
Action<Request, Response, RequestBuilder> action, Request request) {
|
||||
return client.execute(action, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
|
||||
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
client.execute(action, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(Action<Request, Response, RequestBuilder> action) {
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
|
||||
Action<Request, Response, RequestBuilder> action) {
|
||||
return client.prepareExecute(action);
|
||||
}
|
||||
|
||||
|
@ -1173,17 +1179,20 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
}
|
||||
|
||||
@Override
|
||||
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(Action<Request, Response, RequestBuilder> action, Request request) {
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
|
||||
Action<Request, Response, RequestBuilder> action, Request request) {
|
||||
return client.execute(action, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
|
||||
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
client.execute(action, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(Action<Request, Response, RequestBuilder> action) {
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
|
||||
Action<Request, Response, RequestBuilder> action) {
|
||||
return client.prepareExecute(action);
|
||||
}
|
||||
|
||||
|
@ -1672,7 +1681,7 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
public Client filterWithHeader(Map<String, String> headers) {
|
||||
return new FilterClient(this) {
|
||||
@Override
|
||||
protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
protected <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
ThreadContext threadContext = threadPool().getThreadContext();
|
||||
try (ThreadContext.StoredContext ctx = threadContext.stashContext()) {
|
||||
threadContext.putHeader(headers);
|
||||
|
|
|
@ -19,6 +19,10 @@
|
|||
|
||||
package org.elasticsearch.client.transport;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -35,6 +39,7 @@ import org.elasticsearch.common.component.LifecycleComponent;
|
|||
import org.elasticsearch.common.inject.Injector;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.inject.ModulesBuilder;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -53,10 +58,6 @@ import org.elasticsearch.threadpool.ThreadPoolModule;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.netty.NettyTransport;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
|
||||
/**
|
||||
|
@ -80,7 +81,7 @@ public class TransportClient extends AbstractClient {
|
|||
*/
|
||||
public static class Builder {
|
||||
|
||||
private Settings settings = Settings.EMPTY;
|
||||
private Settings providedSettings = Settings.EMPTY;
|
||||
private List<Class<? extends Plugin>> pluginClasses = new ArrayList<>();
|
||||
|
||||
/**
|
||||
|
@ -94,7 +95,7 @@ public class TransportClient extends AbstractClient {
|
|||
* The settings to configure the transport client with.
|
||||
*/
|
||||
public Builder settings(Settings settings) {
|
||||
this.settings = settings;
|
||||
this.providedSettings = settings;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -106,27 +107,29 @@ public class TransportClient extends AbstractClient {
|
|||
return this;
|
||||
}
|
||||
|
||||
private PluginsService newPluginService(final Settings settings) {
|
||||
final Settings.Builder settingsBuilder = settingsBuilder()
|
||||
.put(NettyTransport.PING_SCHEDULE, "5s") // enable by default the transport schedule ping interval
|
||||
.put( InternalSettingsPreparer.prepareSettings(settings))
|
||||
.put("network.server", false)
|
||||
.put("node.client", true)
|
||||
.put(CLIENT_TYPE_SETTING, CLIENT_TYPE);
|
||||
return new PluginsService(settingsBuilder.build(), null, null, pluginClasses);
|
||||
};
|
||||
|
||||
/**
|
||||
* Builds a new instance of the transport client.
|
||||
*/
|
||||
public TransportClient build() {
|
||||
Settings settings = InternalSettingsPreparer.prepareSettings(this.settings);
|
||||
settings = settingsBuilder()
|
||||
.put(NettyTransport.PING_SCHEDULE, "5s") // enable by default the transport schedule ping interval
|
||||
.put(settings)
|
||||
.put("network.server", false)
|
||||
.put("node.client", true)
|
||||
.put(CLIENT_TYPE_SETTING, CLIENT_TYPE)
|
||||
.build();
|
||||
|
||||
PluginsService pluginsService = new PluginsService(settings, null, null, pluginClasses);
|
||||
this.settings = pluginsService.updatedSettings();
|
||||
final PluginsService pluginsService = newPluginService(providedSettings);
|
||||
final Settings settings = pluginsService.updatedSettings();
|
||||
|
||||
Version version = Version.CURRENT;
|
||||
|
||||
final ThreadPool threadPool = new ThreadPool(settings);
|
||||
final NetworkService networkService = new NetworkService(settings);
|
||||
final SettingsFilter settingsFilter = new SettingsFilter(settings);
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
boolean success = false;
|
||||
try {
|
||||
ModulesBuilder modules = new ModulesBuilder();
|
||||
|
@ -136,18 +139,18 @@ public class TransportClient extends AbstractClient {
|
|||
modules.add(pluginModule);
|
||||
}
|
||||
modules.add(new PluginsModule(pluginsService));
|
||||
modules.add(new SettingsModule(this.settings, settingsFilter ));
|
||||
modules.add(new NetworkModule(networkService, this.settings, true));
|
||||
modules.add(new ClusterNameModule(this.settings));
|
||||
modules.add(new SettingsModule(settings, settingsFilter ));
|
||||
modules.add(new NetworkModule(networkService, settings, true, namedWriteableRegistry));
|
||||
modules.add(new ClusterNameModule(settings));
|
||||
modules.add(new ThreadPoolModule(threadPool));
|
||||
modules.add(new SearchModule() {
|
||||
modules.add(new SearchModule(settings, namedWriteableRegistry) {
|
||||
@Override
|
||||
protected void configure() {
|
||||
// noop
|
||||
}
|
||||
});
|
||||
modules.add(new ActionModule(true));
|
||||
modules.add(new CircuitBreakerModule(this.settings));
|
||||
modules.add(new CircuitBreakerModule(settings));
|
||||
|
||||
pluginsService.processModules(modules);
|
||||
|
||||
|
@ -275,7 +278,7 @@ public class TransportClient extends AbstractClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
protected <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
proxy.execute(action, request, listener);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
|||
import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction;
|
||||
import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.IndexTemplateFilter;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
|
||||
|
@ -36,7 +35,6 @@ import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodeService;
|
||||
import org.elasticsearch.cluster.routing.OperationRouting;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
|
@ -56,28 +54,12 @@ import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocatio
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||
import org.elasticsearch.cluster.service.InternalClusterService;
|
||||
import org.elasticsearch.cluster.settings.DynamicSettings;
|
||||
import org.elasticsearch.cluster.settings.Validator;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.IndexingSlowLog;
|
||||
import org.elasticsearch.index.search.stats.SearchSlowLog;
|
||||
import org.elasticsearch.index.settings.IndexDynamicSettings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
import org.elasticsearch.index.shard.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||
import org.elasticsearch.search.internal.DefaultSearchContext;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
@ -108,7 +90,6 @@ public class ClusterModule extends AbstractModule {
|
|||
SnapshotInProgressAllocationDecider.class));
|
||||
|
||||
private final Settings settings;
|
||||
private final DynamicSettings.Builder indexDynamicSettings = new DynamicSettings.Builder();
|
||||
private final ExtensionPoint.SelectedType<ShardsAllocator> shardsAllocators = new ExtensionPoint.SelectedType<>("shards_allocator", ShardsAllocator.class);
|
||||
private final ExtensionPoint.ClassSet<AllocationDecider> allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class);
|
||||
private final ExtensionPoint.ClassSet<IndexTemplateFilter> indexTemplateFilters = new ExtensionPoint.ClassSet<>("index_template_filter", IndexTemplateFilter.class);
|
||||
|
@ -118,9 +99,6 @@ public class ClusterModule extends AbstractModule {
|
|||
|
||||
public ClusterModule(Settings settings) {
|
||||
this.settings = settings;
|
||||
|
||||
registerBuiltinIndexSettings();
|
||||
|
||||
for (Class<? extends AllocationDecider> decider : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) {
|
||||
registerAllocationDecider(decider);
|
||||
}
|
||||
|
@ -128,69 +106,6 @@ public class ClusterModule extends AbstractModule {
|
|||
registerShardsAllocator(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, BalancedShardsAllocator.class);
|
||||
}
|
||||
|
||||
private void registerBuiltinIndexSettings() {
|
||||
registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
|
||||
registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_TYPE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(MergeSchedulerConfig.MAX_THREAD_COUNT, Validator.NON_NEGATIVE_INTEGER);
|
||||
registerIndexDynamicSetting(MergeSchedulerConfig.MAX_MERGE_COUNT, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(MergeSchedulerConfig.AUTO_THROTTLE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_REQUIRE_GROUP + "*", Validator.EMPTY);
|
||||
registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "*", Validator.EMPTY);
|
||||
registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_EXCLUDE_GROUP + "*", Validator.EMPTY);
|
||||
registerIndexDynamicSetting(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, Validator.NON_NEGATIVE_INTEGER);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_READ_ONLY, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_BLOCKS_READ, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_BLOCKS_WRITE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_BLOCKS_METADATA, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_PRIORITY, Validator.NON_NEGATIVE_INTEGER);
|
||||
registerIndexDynamicSetting(IndicesTTLService.INDEX_TTL_DISABLE_PURGE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexShard.INDEX_REFRESH_INTERVAL, Validator.TIME);
|
||||
registerIndexDynamicSetting(PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(EngineConfig.INDEX_GC_DELETES_SETTING, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexShard.INDEX_FLUSH_ON_CLOSE, Validator.BOOLEAN);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_REFORMAT, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, Validator.INTEGER);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED, Validator.DOUBLE);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT, Validator.BYTES_SIZE);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, Validator.INTEGER_GTE_2);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT, Validator.INTEGER_GTE_2);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT, Validator.BYTES_SIZE);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, Validator.DOUBLE_GTE_2);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT, Validator.NON_NEGATIVE_DOUBLE);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_COMPOUND_FORMAT, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, Validator.BYTES_SIZE);
|
||||
registerIndexDynamicSetting(IndexSettings.INDEX_TRANSLOG_DURABILITY, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndicesWarmer.INDEX_WARMER_ENABLED, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, Validator.BOOLEAN);
|
||||
registerIndexDynamicSetting(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, Validator.TIME);
|
||||
registerIndexDynamicSetting(DefaultSearchContext.MAX_RESULT_WINDOW, Validator.POSITIVE_INTEGER);
|
||||
}
|
||||
|
||||
public void registerIndexDynamicSetting(String setting, Validator validator) {
|
||||
indexDynamicSettings.addSetting(setting, validator);
|
||||
}
|
||||
|
||||
|
||||
public void registerAllocationDecider(Class<? extends AllocationDecider> allocationDecider) {
|
||||
allocationDeciders.registerExtension(allocationDecider);
|
||||
}
|
||||
|
@ -205,8 +120,6 @@ public class ClusterModule extends AbstractModule {
|
|||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(DynamicSettings.class).annotatedWith(IndexDynamicSettings.class).toInstance(indexDynamicSettings.build());
|
||||
|
||||
// bind ShardsAllocator
|
||||
String shardsAllocatorType = shardsAllocators.bindType(binder(), settings, ClusterModule.SHARDS_ALLOCATOR_TYPE_KEY, ClusterModule.BALANCED_ALLOCATOR);
|
||||
if (shardsAllocatorType.equals(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR)) {
|
||||
|
|
|
@ -406,10 +406,26 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
|||
String nodeId = nodeStats.getNode().id();
|
||||
String nodeName = nodeStats.getNode().getName();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}", nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(), leastAvailablePath.getTotal(), leastAvailablePath.getAvailable());
|
||||
logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}",
|
||||
nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(),
|
||||
leastAvailablePath.getTotal(), leastAvailablePath.getAvailable());
|
||||
}
|
||||
if (leastAvailablePath.getTotal().bytes() < 0) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("node: [{}] least available path has less than 0 total bytes of disk [{}], skipping",
|
||||
nodeId, leastAvailablePath.getTotal().bytes());
|
||||
}
|
||||
} else {
|
||||
newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().bytes(), leastAvailablePath.getAvailable().bytes()));
|
||||
}
|
||||
if (mostAvailablePath.getTotal().bytes() < 0) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("node: [{}] most available path has less than 0 total bytes of disk [{}], skipping",
|
||||
nodeId, mostAvailablePath.getTotal().bytes());
|
||||
}
|
||||
} else {
|
||||
newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().bytes(), mostAvailablePath.getAvailable().bytes()));
|
||||
}
|
||||
newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().bytes(), leastAvailablePath.getAvailable().bytes()));
|
||||
newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().bytes(), mostAvailablePath.getAvailable().bytes()));
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,9 +22,11 @@ package org.elasticsearch.cluster.action.shard;
|
|||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskConfig;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.MasterNodeChangePredicate;
|
||||
import org.elasticsearch.cluster.NotMasterException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -42,13 +44,16 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.node.NodeClosedException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
||||
import org.elasticsearch.transport.NodeDisconnectedException;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -60,55 +65,97 @@ import java.util.Locale;
|
|||
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
|
||||
|
||||
public class ShardStateAction extends AbstractComponent {
|
||||
|
||||
public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started";
|
||||
public static final String SHARD_FAILED_ACTION_NAME = "internal:cluster/shard/failure";
|
||||
|
||||
private final TransportService transportService;
|
||||
private final ClusterService clusterService;
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
@Inject
|
||||
public ShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService,
|
||||
AllocationService allocationService, RoutingService routingService) {
|
||||
AllocationService allocationService, RoutingService routingService, ThreadPool threadPool) {
|
||||
super(settings);
|
||||
this.transportService = transportService;
|
||||
this.clusterService = clusterService;
|
||||
this.threadPool = threadPool;
|
||||
|
||||
transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ShardRoutingEntry::new, ThreadPool.Names.SAME, new ShardStartedTransportHandler(clusterService, new ShardStartedClusterStateTaskExecutor(allocationService, logger), logger));
|
||||
transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ShardRoutingEntry::new, ThreadPool.Names.SAME, new ShardFailedTransportHandler(clusterService, new ShardFailedClusterStateTaskExecutor(allocationService, routingService, logger), logger));
|
||||
}
|
||||
|
||||
public void shardFailed(final ClusterState clusterState, final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
|
||||
shardFailed(clusterState, shardRouting, indexUUID, message, failure, null, listener);
|
||||
}
|
||||
|
||||
public void resendShardFailed(final ClusterState clusterState, final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
|
||||
logger.trace("{} re-sending failed shard [{}], index UUID [{}], reason [{}]", shardRouting.shardId(), failure, shardRouting, indexUUID, message);
|
||||
shardFailed(clusterState, shardRouting, indexUUID, message, failure, listener);
|
||||
}
|
||||
|
||||
public void shardFailed(final ClusterState clusterState, final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, TimeValue timeout, Listener listener) {
|
||||
DiscoveryNode masterNode = clusterState.nodes().masterNode();
|
||||
private void sendShardAction(final String actionName, final ClusterStateObserver observer, final ShardRoutingEntry shardRoutingEntry, final Listener listener) {
|
||||
DiscoveryNode masterNode = observer.observedState().nodes().masterNode();
|
||||
if (masterNode == null) {
|
||||
logger.warn("{} no master known to fail shard [{}]", shardRouting.shardId(), shardRouting);
|
||||
listener.onShardFailedNoMaster();
|
||||
return;
|
||||
}
|
||||
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, failure);
|
||||
TransportRequestOptions options = TransportRequestOptions.EMPTY;
|
||||
if (timeout != null) {
|
||||
options = TransportRequestOptions.builder().withTimeout(timeout).build();
|
||||
}
|
||||
transportService.sendRequest(masterNode,
|
||||
SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
@Override
|
||||
public void handleResponse(TransportResponse.Empty response) {
|
||||
listener.onSuccess();
|
||||
}
|
||||
logger.warn("{} no master known for action [{}] for shard [{}]", shardRoutingEntry.getShardRouting().shardId(), actionName, shardRoutingEntry.getShardRouting());
|
||||
waitForNewMasterAndRetry(actionName, observer, shardRoutingEntry, listener);
|
||||
} else {
|
||||
logger.debug("{} sending [{}] to [{}] for shard [{}]", shardRoutingEntry.getShardRouting().getId(), actionName, masterNode.getId(), shardRoutingEntry);
|
||||
transportService.sendRequest(masterNode,
|
||||
actionName, shardRoutingEntry, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
@Override
|
||||
public void handleResponse(TransportResponse.Empty response) {
|
||||
listener.onSuccess();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.warn("{} unexpected failure while sending request to [{}] to fail shard [{}]", exp, shardRoutingEntry.shardRouting.shardId(), masterNode, shardRoutingEntry);
|
||||
listener.onShardFailedFailure(masterNode, exp);
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
if (isMasterChannelException(exp)) {
|
||||
waitForNewMasterAndRetry(actionName, observer, shardRoutingEntry, listener);
|
||||
} else {
|
||||
logger.warn("{} unexpected failure while sending request [{}] to [{}] for shard [{}]", exp, shardRoutingEntry.getShardRouting().shardId(), actionName, masterNode, shardRoutingEntry);
|
||||
listener.onFailure(exp.getCause());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private static Class[] MASTER_CHANNEL_EXCEPTIONS = new Class[]{
|
||||
NotMasterException.class,
|
||||
ConnectTransportException.class,
|
||||
Discovery.FailedToCommitClusterStateException.class
|
||||
};
|
||||
|
||||
private static boolean isMasterChannelException(TransportException exp) {
|
||||
return ExceptionsHelper.unwrap(exp, MASTER_CHANNEL_EXCEPTIONS) != null;
|
||||
}
|
||||
|
||||
public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
|
||||
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
|
||||
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, failure);
|
||||
sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardRoutingEntry, listener);
|
||||
}
|
||||
|
||||
public void resendShardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
|
||||
logger.trace("{} re-sending failed shard [{}], index UUID [{}], reason [{}]", shardRouting.shardId(), failure, shardRouting, indexUUID, message);
|
||||
shardFailed(shardRouting, indexUUID, message, failure, listener);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardRoutingEntry shardRoutingEntry, Listener listener) {
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
public void onNewClusterState(ClusterState state) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("new cluster state [{}] after waiting for master election to fail shard [{}]", shardRoutingEntry.getShardRouting().shardId(), state.prettyPrint(), shardRoutingEntry);
|
||||
}
|
||||
});
|
||||
sendShardAction(actionName, observer, shardRoutingEntry, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClusterServiceClose() {
|
||||
logger.warn("{} node closed while execution action [{}] for shard [{}]", shardRoutingEntry.failure, shardRoutingEntry.getShardRouting().getId(), actionName, shardRoutingEntry.getShardRouting());
|
||||
listener.onFailure(new NodeClosedException(clusterService.localNode()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onTimeout(TimeValue timeout) {
|
||||
// we wait indefinitely for a new master
|
||||
assert false;
|
||||
}
|
||||
}, MasterNodeChangePredicate.INSTANCE);
|
||||
}
|
||||
|
||||
private static class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
|
@ -208,21 +255,10 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public void shardStarted(final ClusterState clusterState, final ShardRouting shardRouting, String indexUUID, final String reason) {
|
||||
DiscoveryNode masterNode = clusterState.nodes().masterNode();
|
||||
if (masterNode == null) {
|
||||
logger.warn("{} no master known to start shard [{}]", shardRouting.shardId(), shardRouting);
|
||||
return;
|
||||
}
|
||||
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason, null);
|
||||
logger.debug("sending start shard [{}]", shardRoutingEntry);
|
||||
transportService.sendRequest(masterNode,
|
||||
SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.warn("{} failure sending start shard [{}] to [{}]", exp, shardRouting.shardId(), masterNode, shardRouting);
|
||||
}
|
||||
});
|
||||
public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String message, Listener listener) {
|
||||
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
|
||||
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, null);
|
||||
sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardRoutingEntry, listener);
|
||||
}
|
||||
|
||||
private static class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
|
@ -334,10 +370,23 @@ public class ShardStateAction extends AbstractComponent {
|
|||
default void onSuccess() {
|
||||
}
|
||||
|
||||
default void onShardFailedNoMaster() {
|
||||
}
|
||||
|
||||
default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) {
|
||||
/**
|
||||
* Notification for non-channel exceptions that are not handled
|
||||
* by {@link ShardStateAction}.
|
||||
*
|
||||
* The exceptions that are handled by {@link ShardStateAction}
|
||||
* are:
|
||||
* - {@link NotMasterException}
|
||||
* - {@link NodeDisconnectedException}
|
||||
* - {@link Discovery.FailedToCommitClusterStateException}
|
||||
*
|
||||
* Any other exception is communicated to the requester via
|
||||
* this notification.
|
||||
*
|
||||
* @param t the unexpected cause of the failure on the master
|
||||
*/
|
||||
default void onFailure(final Throwable t) {
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -306,16 +306,16 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
addIndexBlock(indexMetaData.getIndex(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
|
||||
}
|
||||
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_READ_ONLY, false)) {
|
||||
if (IndexMetaData.INDEX_READ_ONLY_SETTING.get(indexMetaData.getSettings())) {
|
||||
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_READ_ONLY_BLOCK);
|
||||
}
|
||||
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_READ, false)) {
|
||||
if (IndexMetaData.INDEX_BLOCKS_READ_SETTING.get(indexMetaData.getSettings())) {
|
||||
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_READ_BLOCK);
|
||||
}
|
||||
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_WRITE, false)) {
|
||||
if (IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.get(indexMetaData.getSettings())) {
|
||||
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
}
|
||||
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_METADATA, false)) {
|
||||
if (IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.get(indexMetaData.getSettings())) {
|
||||
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
}
|
||||
return this;
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
||||
/**
|
||||
* This class acts as a functional wrapper around the <tt>index.auto_expand_replicas</tt> setting.
|
||||
* This setting or rather it's value is expanded into a min and max value which requires special handling
|
||||
* based on the number of datanodes in the cluster. This class handles all the parsing and streamlines the access to these values.
|
||||
*/
|
||||
final class AutoExpandReplicas {
|
||||
// the value we recognize in the "max" position to mean all the nodes
|
||||
private static final String ALL_NODES_VALUE = "all";
|
||||
public static final Setting<AutoExpandReplicas> SETTING = new Setting<>(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "false", (value) -> {
|
||||
final int min;
|
||||
final int max;
|
||||
if (Booleans.parseBoolean(value, true) == false) {
|
||||
return new AutoExpandReplicas(0, 0, false);
|
||||
}
|
||||
final int dash = value.indexOf('-');
|
||||
if (-1 == dash) {
|
||||
throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash);
|
||||
}
|
||||
final String sMin = value.substring(0, dash);
|
||||
try {
|
||||
min = Integer.parseInt(sMin);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash, e);
|
||||
}
|
||||
String sMax = value.substring(dash + 1);
|
||||
if (sMax.equals(ALL_NODES_VALUE)) {
|
||||
max = Integer.MAX_VALUE;
|
||||
} else {
|
||||
try {
|
||||
max = Integer.parseInt(sMax);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash, e);
|
||||
}
|
||||
}
|
||||
return new AutoExpandReplicas(min, max, true);
|
||||
}, true, Setting.Scope.INDEX);
|
||||
|
||||
private final int minReplicas;
|
||||
private final int maxReplicas;
|
||||
private final boolean enabled;
|
||||
|
||||
private AutoExpandReplicas(int minReplicas, int maxReplicas, boolean enabled) {
|
||||
if (minReplicas > maxReplicas) {
|
||||
throw new IllegalArgumentException("[" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] minReplicas must be =< maxReplicas but wasn't " + minReplicas + " > " + maxReplicas);
|
||||
}
|
||||
this.minReplicas = minReplicas;
|
||||
this.maxReplicas = maxReplicas;
|
||||
this.enabled = enabled;
|
||||
}
|
||||
|
||||
int getMinReplicas() {
|
||||
return minReplicas;
|
||||
}
|
||||
|
||||
int getMaxReplicas(int numDataNodes) {
|
||||
return Math.min(maxReplicas, numDataNodes-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return enabled ? minReplicas + "-" + maxReplicas : "false";
|
||||
}
|
||||
|
||||
boolean isEnabled() {
|
||||
return enabled;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -29,14 +29,17 @@ import org.elasticsearch.cluster.DiffableUtils;
|
|||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.loader.SettingsLoader;
|
||||
import org.elasticsearch.common.xcontent.FromXContentBuilder;
|
||||
|
@ -58,6 +61,7 @@ import java.util.HashSet;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
|
||||
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
|
||||
|
@ -70,10 +74,6 @@ import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
|||
*/
|
||||
public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuilder<IndexMetaData>, ToXContent {
|
||||
|
||||
public static final IndexMetaData PROTO = IndexMetaData.builder("")
|
||||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
.numberOfShards(1).numberOfReplicas(0).build();
|
||||
|
||||
public interface Custom extends Diffable<Custom>, ToXContent {
|
||||
|
||||
String type();
|
||||
|
@ -152,14 +152,29 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
}
|
||||
public static final String INDEX_SETTING_PREFIX = "index.";
|
||||
public static final String SETTING_NUMBER_OF_SHARDS = "index.number_of_shards";
|
||||
public static final Setting<Integer> INDEX_NUMBER_OF_SHARDS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, false, Setting.Scope.INDEX);
|
||||
public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas";
|
||||
public static final Setting<Integer> INDEX_NUMBER_OF_REPLICAS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, true, Setting.Scope.INDEX);
|
||||
public static final String SETTING_SHADOW_REPLICAS = "index.shadow_replicas";
|
||||
public static final Setting<Boolean> INDEX_SHADOW_REPLICAS_SETTING = Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, false, Setting.Scope.INDEX);
|
||||
|
||||
public static final String SETTING_SHARED_FILESYSTEM = "index.shared_filesystem";
|
||||
public static final Setting<Boolean> INDEX_SHARED_FILESYSTEM_SETTING = Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, false, Setting.Scope.INDEX);
|
||||
|
||||
public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas";
|
||||
public static final Setting<AutoExpandReplicas> INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING;
|
||||
public static final String SETTING_READ_ONLY = "index.blocks.read_only";
|
||||
public static final Setting<Boolean> INDEX_READ_ONLY_SETTING = Setting.boolSetting(SETTING_READ_ONLY, false, true, Setting.Scope.INDEX);
|
||||
|
||||
public static final String SETTING_BLOCKS_READ = "index.blocks.read";
|
||||
public static final Setting<Boolean> INDEX_BLOCKS_READ_SETTING = Setting.boolSetting(SETTING_BLOCKS_READ, false, true, Setting.Scope.INDEX);
|
||||
|
||||
public static final String SETTING_BLOCKS_WRITE = "index.blocks.write";
|
||||
public static final Setting<Boolean> INDEX_BLOCKS_WRITE_SETTING = Setting.boolSetting(SETTING_BLOCKS_WRITE, false, true, Setting.Scope.INDEX);
|
||||
|
||||
public static final String SETTING_BLOCKS_METADATA = "index.blocks.metadata";
|
||||
public static final Setting<Boolean> INDEX_BLOCKS_METADATA_SETTING = Setting.boolSetting(SETTING_BLOCKS_METADATA, false, true, Setting.Scope.INDEX);
|
||||
|
||||
public static final String SETTING_VERSION_CREATED = "index.version.created";
|
||||
public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string";
|
||||
public static final String SETTING_VERSION_UPGRADED = "index.version.upgraded";
|
||||
|
@ -167,12 +182,23 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
public static final String SETTING_VERSION_MINIMUM_COMPATIBLE = "index.version.minimum_compatible";
|
||||
public static final String SETTING_CREATION_DATE = "index.creation_date";
|
||||
public static final String SETTING_PRIORITY = "index.priority";
|
||||
public static final Setting<Integer> INDEX_PRIORITY_SETTING = Setting.intSetting("index.priority", 1, 0, true, Setting.Scope.INDEX);
|
||||
public static final String SETTING_CREATION_DATE_STRING = "index.creation_date_string";
|
||||
public static final String SETTING_INDEX_UUID = "index.uuid";
|
||||
public static final String SETTING_DATA_PATH = "index.data_path";
|
||||
public static final Setting<String> INDEX_DATA_PATH_SETTING = new Setting<>(SETTING_DATA_PATH, "", Function.identity(), false, Setting.Scope.INDEX);
|
||||
public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
|
||||
public static final Setting<Boolean> INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING = Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, true, Setting.Scope.INDEX);
|
||||
public static final String INDEX_UUID_NA_VALUE = "_na_";
|
||||
|
||||
public static final Setting<Settings> INDEX_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.require.", true, Setting.Scope.INDEX);
|
||||
public static final Setting<Settings> INDEX_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.include.", true, Setting.Scope.INDEX);
|
||||
public static final Setting<Settings> INDEX_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.exclude.", true, Setting.Scope.INDEX);
|
||||
|
||||
public static final IndexMetaData PROTO = IndexMetaData.builder("")
|
||||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
.numberOfShards(1).numberOfReplicas(0).build();
|
||||
|
||||
public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations";
|
||||
|
||||
private final int numberOfShards;
|
||||
|
@ -627,10 +653,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return this;
|
||||
}
|
||||
|
||||
public long creationDate() {
|
||||
return settings.getAsLong(SETTING_CREATION_DATE, -1l);
|
||||
}
|
||||
|
||||
public Builder settings(Settings.Builder settings) {
|
||||
this.settings = settings.build();
|
||||
return this;
|
||||
|
@ -645,11 +667,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return mappings.get(type);
|
||||
}
|
||||
|
||||
public Builder removeMapping(String mappingType) {
|
||||
mappings.remove(mappingType);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder putMapping(String type, String source) throws IOException {
|
||||
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
|
||||
putMapping(new MappingMetaData(type, parser.mapOrdered()));
|
||||
|
@ -692,24 +709,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder removeCustom(String type) {
|
||||
this.customs.remove(type);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Custom getCustom(String type) {
|
||||
return this.customs.get(type);
|
||||
}
|
||||
|
||||
public Builder putActiveAllocationIds(int shardId, Set<String> allocationIds) {
|
||||
activeAllocationIds.put(shardId, new HashSet(allocationIds));
|
||||
return this;
|
||||
}
|
||||
|
||||
public Set<String> getActiveAllocationIds(int shardId) {
|
||||
return activeAllocationIds.get(shardId);
|
||||
}
|
||||
|
||||
public long version() {
|
||||
return this.version;
|
||||
}
|
||||
|
@ -758,22 +762,21 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
filledActiveAllocationIds.put(i, Collections.emptySet());
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap();
|
||||
final Map<String, String> requireMap = INDEX_ROUTING_REQUIRE_GROUP_SETTING.get(settings).getAsMap();
|
||||
final DiscoveryNodeFilters requireFilters;
|
||||
if (requireMap.isEmpty()) {
|
||||
requireFilters = null;
|
||||
} else {
|
||||
requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
|
||||
}
|
||||
Map<String, String> includeMap = settings.getByPrefix("index.routing.allocation.include.").getAsMap();
|
||||
Map<String, String> includeMap = INDEX_ROUTING_INCLUDE_GROUP_SETTING.get(settings).getAsMap();
|
||||
final DiscoveryNodeFilters includeFilters;
|
||||
if (includeMap.isEmpty()) {
|
||||
includeFilters = null;
|
||||
} else {
|
||||
includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
|
||||
}
|
||||
Map<String, String> excludeMap = settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
|
||||
Map<String, String> excludeMap = INDEX_ROUTING_EXCLUDE_GROUP_SETTING.get(settings).getAsMap();
|
||||
final DiscoveryNodeFilters excludeFilters;
|
||||
if (excludeMap.isEmpty()) {
|
||||
excludeFilters = null;
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.elasticsearch.common.compress.CompressedXContent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
|
@ -103,13 +104,14 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
private final IndexTemplateFilter indexTemplateFilter;
|
||||
private final Environment env;
|
||||
private final NodeServicesProvider nodeServicesProvider;
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
|
||||
|
||||
@Inject
|
||||
public MetaDataCreateIndexService(Settings settings, ClusterService clusterService,
|
||||
IndicesService indicesService, AllocationService allocationService,
|
||||
Version version, AliasValidator aliasValidator,
|
||||
Set<IndexTemplateFilter> indexTemplateFilters, Environment env, NodeServicesProvider nodeServicesProvider) {
|
||||
Set<IndexTemplateFilter> indexTemplateFilters, Environment env, NodeServicesProvider nodeServicesProvider, IndexScopedSettings indexScopedSettings) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.indicesService = indicesService;
|
||||
|
@ -118,6 +120,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
this.aliasValidator = aliasValidator;
|
||||
this.env = env;
|
||||
this.nodeServicesProvider = nodeServicesProvider;
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
|
||||
if (indexTemplateFilters.isEmpty()) {
|
||||
this.indexTemplateFilter = DEFAULT_INDEX_TEMPLATE_FILTER;
|
||||
|
@ -174,6 +177,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
public void createIndex(final CreateIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
Settings.Builder updatedSettingsBuilder = Settings.settingsBuilder();
|
||||
updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
indexScopedSettings.validate(updatedSettingsBuilder);
|
||||
request.settings(updatedSettingsBuilder.build());
|
||||
|
||||
clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]",
|
||||
|
@ -313,7 +317,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
// first, add the default mapping
|
||||
if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
try {
|
||||
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(XContentFactory.jsonBuilder().map(mappings.get(MapperService.DEFAULT_MAPPING)).string()), false, request.updateAllTypes());
|
||||
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(XContentFactory.jsonBuilder().map(mappings.get(MapperService.DEFAULT_MAPPING)).string()), MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes());
|
||||
} catch (Exception e) {
|
||||
removalReason = "failed on parsing default mapping on index creation";
|
||||
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, MapperService.DEFAULT_MAPPING, e.getMessage());
|
||||
|
@ -325,7 +329,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
}
|
||||
try {
|
||||
// apply the default here, its the first time we parse it
|
||||
mapperService.merge(entry.getKey(), new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).string()), true, request.updateAllTypes());
|
||||
mapperService.merge(entry.getKey(), new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).string()), MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes());
|
||||
} catch (Exception e) {
|
||||
removalReason = "failed on parsing mappings on index creation";
|
||||
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage());
|
||||
|
@ -460,16 +464,17 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
}
|
||||
|
||||
List<String> getIndexSettingsValidationErrors(Settings settings) {
|
||||
String customPath = settings.get(IndexMetaData.SETTING_DATA_PATH, null);
|
||||
String customPath = IndexMetaData.INDEX_DATA_PATH_SETTING.get(settings);
|
||||
List<String> validationErrors = new ArrayList<>();
|
||||
if (customPath != null && env.sharedDataFile() == null) {
|
||||
if (Strings.isEmpty(customPath) == false && env.sharedDataFile() == null) {
|
||||
validationErrors.add("path.shared_data must be set in order to use custom data paths");
|
||||
} else if (customPath != null) {
|
||||
} else if (Strings.isEmpty(customPath) == false) {
|
||||
Path resolvedPath = PathUtils.get(new Path[]{env.sharedDataFile()}, customPath);
|
||||
if (resolvedPath == null) {
|
||||
validationErrors.add("custom path [" + customPath + "] is not a sub-path of path.shared_data [" + env.sharedDataFile() + "]");
|
||||
}
|
||||
}
|
||||
//norelease - this can be removed?
|
||||
Integer number_of_primaries = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
|
||||
Integer number_of_replicas = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null);
|
||||
if (number_of_primaries != null && number_of_primaries <= 0) {
|
||||
|
|
|
@ -104,12 +104,9 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
|||
// temporarily create the index and add mappings so we can parse the filter
|
||||
try {
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
|
||||
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, false);
|
||||
}
|
||||
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
|
||||
MappingMetaData mappingMetaData = cursor.value;
|
||||
indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(), false, false);
|
||||
indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(), MapperService.MergeReason.MAPPING_RECOVERY, false);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.getIndex());
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.cluster.metadata;
|
|||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -70,7 +69,6 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
}
|
||||
checkSupportedVersion(indexMetaData);
|
||||
IndexMetaData newMetaData = indexMetaData;
|
||||
newMetaData = addDefaultUnitsIfNeeded(newMetaData);
|
||||
checkMappingsCompatibility(newMetaData);
|
||||
newMetaData = markAsUpgraded(newMetaData);
|
||||
return newMetaData;
|
||||
|
@ -113,103 +111,6 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
return false;
|
||||
}
|
||||
|
||||
/** All known byte-sized settings for an index. */
|
||||
public static final Set<String> INDEX_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet(
|
||||
"index.merge.policy.floor_segment",
|
||||
"index.merge.policy.max_merged_segment",
|
||||
"index.merge.policy.max_merge_size",
|
||||
"index.merge.policy.min_merge_size",
|
||||
"index.shard.recovery.file_chunk_size",
|
||||
"index.shard.recovery.translog_size",
|
||||
"index.store.throttle.max_bytes_per_sec",
|
||||
"index.translog.flush_threshold_size",
|
||||
"index.translog.fs.buffer_size",
|
||||
"index.version_map_size"));
|
||||
|
||||
/** All known time settings for an index. */
|
||||
public static final Set<String> INDEX_TIME_SETTINGS = unmodifiableSet(newHashSet(
|
||||
"index.gateway.wait_for_mapping_update_post_recovery",
|
||||
"index.shard.wait_for_mapping_update_post_recovery",
|
||||
"index.gc_deletes",
|
||||
"index.indexing.slowlog.threshold.index.debug",
|
||||
"index.indexing.slowlog.threshold.index.info",
|
||||
"index.indexing.slowlog.threshold.index.trace",
|
||||
"index.indexing.slowlog.threshold.index.warn",
|
||||
"index.refresh_interval",
|
||||
"index.search.slowlog.threshold.fetch.debug",
|
||||
"index.search.slowlog.threshold.fetch.info",
|
||||
"index.search.slowlog.threshold.fetch.trace",
|
||||
"index.search.slowlog.threshold.fetch.warn",
|
||||
"index.search.slowlog.threshold.query.debug",
|
||||
"index.search.slowlog.threshold.query.info",
|
||||
"index.search.slowlog.threshold.query.trace",
|
||||
"index.search.slowlog.threshold.query.warn",
|
||||
"index.shadow.wait_for_initial_commit",
|
||||
"index.store.stats_refresh_interval",
|
||||
"index.translog.flush_threshold_period",
|
||||
"index.translog.interval",
|
||||
"index.translog.sync_interval",
|
||||
"index.shard.inactive_time",
|
||||
UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING));
|
||||
|
||||
/**
|
||||
* Elasticsearch 2.0 requires units on byte/memory and time settings; this method adds the default unit to any such settings that are
|
||||
* missing units.
|
||||
*/
|
||||
private IndexMetaData addDefaultUnitsIfNeeded(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getCreationVersion().before(Version.V_2_0_0_beta1)) {
|
||||
// TODO: can we somehow only do this *once* for a pre-2.0 index? Maybe we could stuff a "fake marker setting" here? Seems hackish...
|
||||
// Created lazily if we find any settings that are missing units:
|
||||
Settings settings = indexMetaData.getSettings();
|
||||
Settings.Builder newSettings = null;
|
||||
for(String byteSizeSetting : INDEX_BYTES_SIZE_SETTINGS) {
|
||||
String value = settings.get(byteSizeSetting);
|
||||
if (value != null) {
|
||||
try {
|
||||
Long.parseLong(value);
|
||||
} catch (NumberFormatException nfe) {
|
||||
continue;
|
||||
}
|
||||
// It's a naked number that previously would be interpreted as default unit (bytes); now we add it:
|
||||
logger.warn("byte-sized index setting [{}] with value [{}] is missing units; assuming default units (b) but in future versions this will be a hard error", byteSizeSetting, value);
|
||||
if (newSettings == null) {
|
||||
newSettings = Settings.builder();
|
||||
newSettings.put(settings);
|
||||
}
|
||||
newSettings.put(byteSizeSetting, value + "b");
|
||||
}
|
||||
}
|
||||
for(String timeSetting : INDEX_TIME_SETTINGS) {
|
||||
String value = settings.get(timeSetting);
|
||||
if (value != null) {
|
||||
try {
|
||||
Long.parseLong(value);
|
||||
} catch (NumberFormatException nfe) {
|
||||
continue;
|
||||
}
|
||||
// It's a naked number that previously would be interpreted as default unit (ms); now we add it:
|
||||
logger.warn("time index setting [{}] with value [{}] is missing units; assuming default units (ms) but in future versions this will be a hard error", timeSetting, value);
|
||||
if (newSettings == null) {
|
||||
newSettings = Settings.builder();
|
||||
newSettings.put(settings);
|
||||
}
|
||||
newSettings.put(timeSetting, value + "ms");
|
||||
}
|
||||
}
|
||||
if (newSettings != null) {
|
||||
// At least one setting was changed:
|
||||
return IndexMetaData.builder(indexMetaData)
|
||||
.version(indexMetaData.getVersion())
|
||||
.settings(newSettings.build())
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
// No changes:
|
||||
return indexMetaData;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Checks the mappings for compatibility with the current version
|
||||
*/
|
||||
|
@ -217,14 +118,14 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
try {
|
||||
// We cannot instantiate real analysis server at this point because the node might not have
|
||||
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
|
||||
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings, Collections.emptyList());
|
||||
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings);
|
||||
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
|
||||
|
||||
try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) {
|
||||
try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, () -> null)) {
|
||||
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
|
||||
MappingMetaData mappingMetaData = cursor.value;
|
||||
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), false, false);
|
||||
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), MapperService.MergeReason.MAPPING_RECOVERY, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
removeIndex = true;
|
||||
for (ObjectCursor<MappingMetaData> metaData : indexMetaData.getMappings().values()) {
|
||||
// don't apply the default mapping, it has been applied when the mapping was created
|
||||
indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), false, true);
|
||||
indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -223,7 +223,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
|
||||
// add mappings for all types, we need them for cross-type validation
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), false, request.updateAllTypes());
|
||||
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -303,7 +303,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
if (existingMapper != null) {
|
||||
existingSource = existingMapper.mappingSource();
|
||||
}
|
||||
DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, true, request.updateAllTypes());
|
||||
DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes());
|
||||
CompressedXContent updatedSource = mergedMapper.mappingSource();
|
||||
|
||||
if (existingSource != null) {
|
||||
|
|
|
@ -30,19 +30,20 @@ import org.elasticsearch.cluster.ClusterService;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.settings.DynamicSettings;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.settings.IndexDynamicSettings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
@ -59,25 +60,21 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
|||
*/
|
||||
public class MetaDataUpdateSettingsService extends AbstractComponent implements ClusterStateListener {
|
||||
|
||||
// the value we recognize in the "max" position to mean all the nodes
|
||||
private static final String ALL_NODES_VALUE = "all";
|
||||
|
||||
private final ClusterService clusterService;
|
||||
|
||||
private final AllocationService allocationService;
|
||||
|
||||
private final DynamicSettings dynamicSettings;
|
||||
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
|
||||
@Inject
|
||||
public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService, @IndexDynamicSettings DynamicSettings dynamicSettings, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService, IndexScopedSettings indexScopedSettings, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
this.clusterService.add(this);
|
||||
this.allocationService = allocationService;
|
||||
this.dynamicSettings = dynamicSettings;
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -90,69 +87,43 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
final int dataNodeCount = event.state().nodes().dataNodes().size();
|
||||
|
||||
Map<Integer, List<String>> nrReplicasChanged = new HashMap<>();
|
||||
|
||||
// we need to do this each time in case it was changed by update settings
|
||||
for (final IndexMetaData indexMetaData : event.state().metaData()) {
|
||||
String autoExpandReplicas = indexMetaData.getSettings().get(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS);
|
||||
if (autoExpandReplicas != null && Booleans.parseBoolean(autoExpandReplicas, true)) { // Booleans only work for false values, just as we want it here
|
||||
try {
|
||||
final int min;
|
||||
final int max;
|
||||
AutoExpandReplicas autoExpandReplicas = IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING.get(indexMetaData.getSettings());
|
||||
if (autoExpandReplicas.isEnabled()) {
|
||||
/*
|
||||
* we have to expand the number of replicas for this index to at least min and at most max nodes here
|
||||
* so we are bumping it up if we have to or reduce it depending on min/max and the number of datanodes.
|
||||
* If we change the number of replicas we just let the shard allocator do it's thing once we updated it
|
||||
* since it goes through the index metadata to figure out if something needs to be done anyway. Do do that
|
||||
* we issue a cluster settings update command below and kicks off a reroute.
|
||||
*/
|
||||
final int min = autoExpandReplicas.getMinReplicas();
|
||||
final int max = autoExpandReplicas.getMaxReplicas(dataNodeCount);
|
||||
int numberOfReplicas = dataNodeCount - 1;
|
||||
if (numberOfReplicas < min) {
|
||||
numberOfReplicas = min;
|
||||
} else if (numberOfReplicas > max) {
|
||||
numberOfReplicas = max;
|
||||
}
|
||||
// same value, nothing to do there
|
||||
if (numberOfReplicas == indexMetaData.getNumberOfReplicas()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
final int dash = autoExpandReplicas.indexOf('-');
|
||||
if (-1 == dash) {
|
||||
logger.warn("failed to set [{}] for index [{}], it should be dash delimited [{}]",
|
||||
IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.getIndex(), autoExpandReplicas);
|
||||
continue;
|
||||
}
|
||||
final String sMin = autoExpandReplicas.substring(0, dash);
|
||||
try {
|
||||
min = Integer.parseInt(sMin);
|
||||
} catch (NumberFormatException e) {
|
||||
logger.warn("failed to set [{}] for index [{}], minimum value is not a number [{}]",
|
||||
e, IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.getIndex(), sMin);
|
||||
continue;
|
||||
}
|
||||
String sMax = autoExpandReplicas.substring(dash + 1);
|
||||
if (sMax.equals(ALL_NODES_VALUE)) {
|
||||
max = dataNodeCount - 1;
|
||||
} else {
|
||||
try {
|
||||
max = Integer.parseInt(sMax);
|
||||
} catch (NumberFormatException e) {
|
||||
logger.warn("failed to set [{}] for index [{}], maximum value is neither [{}] nor a number [{}]",
|
||||
e, IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.getIndex(), ALL_NODES_VALUE, sMax);
|
||||
continue;
|
||||
}
|
||||
if (numberOfReplicas >= min && numberOfReplicas <= max) {
|
||||
|
||||
if (!nrReplicasChanged.containsKey(numberOfReplicas)) {
|
||||
nrReplicasChanged.put(numberOfReplicas, new ArrayList<>());
|
||||
}
|
||||
|
||||
int numberOfReplicas = dataNodeCount - 1;
|
||||
if (numberOfReplicas < min) {
|
||||
numberOfReplicas = min;
|
||||
} else if (numberOfReplicas > max) {
|
||||
numberOfReplicas = max;
|
||||
}
|
||||
|
||||
// same value, nothing to do there
|
||||
if (numberOfReplicas == indexMetaData.getNumberOfReplicas()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (numberOfReplicas >= min && numberOfReplicas <= max) {
|
||||
|
||||
if (!nrReplicasChanged.containsKey(numberOfReplicas)) {
|
||||
nrReplicasChanged.put(numberOfReplicas, new ArrayList<String>());
|
||||
}
|
||||
|
||||
nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.getIndex());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("[{}] failed to parse auto expand replicas", e, indexMetaData.getIndex());
|
||||
nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.getIndex());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (nrReplicasChanged.size() > 0) {
|
||||
// update settings and kick of a reroute (implicit) for them to take effect
|
||||
for (final Integer fNumberOfReplicas : nrReplicasChanged.keySet()) {
|
||||
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, fNumberOfReplicas).build();
|
||||
final List<String> indices = nrReplicasChanged.get(fNumberOfReplicas);
|
||||
|
@ -182,42 +153,30 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
}
|
||||
|
||||
public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
Settings.Builder updatedSettingsBuilder = Settings.settingsBuilder();
|
||||
updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
final Settings normalizedSettings = Settings.settingsBuilder().put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
|
||||
Settings.Builder settingsForClosedIndices = Settings.builder();
|
||||
Settings.Builder settingsForOpenIndices = Settings.builder();
|
||||
Settings.Builder skipppedSettings = Settings.builder();
|
||||
|
||||
indexScopedSettings.validate(normalizedSettings);
|
||||
// never allow to change the number of shards
|
||||
for (String key : updatedSettingsBuilder.internalMap().keySet()) {
|
||||
if (key.equals(IndexMetaData.SETTING_NUMBER_OF_SHARDS)) {
|
||||
for (Map.Entry<String, String> entry : normalizedSettings.getAsMap().entrySet()) {
|
||||
if (entry.getKey().equals(IndexMetaData.SETTING_NUMBER_OF_SHARDS)) {
|
||||
listener.onFailure(new IllegalArgumentException("can't change the number of shards for an index"));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
final Settings closeSettings = updatedSettingsBuilder.build();
|
||||
|
||||
final Set<String> removedSettings = new HashSet<>();
|
||||
final Set<String> errors = new HashSet<>();
|
||||
for (Map.Entry<String, String> setting : updatedSettingsBuilder.internalMap().entrySet()) {
|
||||
if (!dynamicSettings.hasDynamicSetting(setting.getKey())) {
|
||||
removedSettings.add(setting.getKey());
|
||||
Setting setting = indexScopedSettings.get(entry.getKey());
|
||||
assert setting != null; // we already validated the normalized settings
|
||||
settingsForClosedIndices.put(entry.getKey(), entry.getValue());
|
||||
if (setting.isDynamic()) {
|
||||
settingsForOpenIndices.put(entry.getKey(), entry.getValue());
|
||||
} else {
|
||||
String error = dynamicSettings.validateDynamicSetting(setting.getKey(), setting.getValue(), clusterService.state());
|
||||
if (error != null) {
|
||||
errors.add("[" + setting.getKey() + "] - " + error);
|
||||
}
|
||||
skipppedSettings.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
if (!errors.isEmpty()) {
|
||||
listener.onFailure(new IllegalArgumentException("can't process the settings: " + errors.toString()));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!removedSettings.isEmpty()) {
|
||||
for (String removedSetting : removedSettings) {
|
||||
updatedSettingsBuilder.remove(removedSetting);
|
||||
}
|
||||
}
|
||||
final Settings openSettings = updatedSettingsBuilder.build();
|
||||
final Settings skippedSettigns = skipppedSettings.build();
|
||||
final Settings closedSettings = settingsForClosedIndices.build();
|
||||
final Settings openSettings = settingsForOpenIndices.build();
|
||||
|
||||
clusterService.submitStateUpdateTask("update-settings",
|
||||
new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
|
@ -245,16 +204,16 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
}
|
||||
}
|
||||
|
||||
if (closeIndices.size() > 0 && closeSettings.get(IndexMetaData.SETTING_NUMBER_OF_REPLICAS) != null) {
|
||||
if (closeIndices.size() > 0 && closedSettings.get(IndexMetaData.SETTING_NUMBER_OF_REPLICAS) != null) {
|
||||
throw new IllegalArgumentException(String.format(Locale.ROOT,
|
||||
"Can't update [%s] on closed indices [%s] - can leave index in an unopenable state", IndexMetaData.SETTING_NUMBER_OF_REPLICAS,
|
||||
closeIndices
|
||||
));
|
||||
}
|
||||
if (!removedSettings.isEmpty() && !openIndices.isEmpty()) {
|
||||
if (!skippedSettigns.getAsMap().isEmpty() && !openIndices.isEmpty()) {
|
||||
throw new IllegalArgumentException(String.format(Locale.ROOT,
|
||||
"Can't update non dynamic settings[%s] for open indices [%s]",
|
||||
removedSettings,
|
||||
skippedSettigns.getAsMap().keySet(),
|
||||
openIndices
|
||||
));
|
||||
}
|
||||
|
@ -267,57 +226,37 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
}
|
||||
|
||||
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
Boolean updatedReadOnly = openSettings.getAsBoolean(IndexMetaData.SETTING_READ_ONLY, null);
|
||||
if (updatedReadOnly != null) {
|
||||
for (String index : actualIndices) {
|
||||
if (updatedReadOnly) {
|
||||
blocks.addIndexBlock(index, IndexMetaData.INDEX_READ_ONLY_BLOCK);
|
||||
} else {
|
||||
blocks.removeIndexBlock(index, IndexMetaData.INDEX_READ_ONLY_BLOCK);
|
||||
}
|
||||
}
|
||||
}
|
||||
Boolean updateMetaDataBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_METADATA, null);
|
||||
if (updateMetaDataBlock != null) {
|
||||
for (String index : actualIndices) {
|
||||
if (updateMetaDataBlock) {
|
||||
blocks.addIndexBlock(index, IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
} else {
|
||||
blocks.removeIndexBlock(index, IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Boolean updateWriteBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_WRITE, null);
|
||||
if (updateWriteBlock != null) {
|
||||
for (String index : actualIndices) {
|
||||
if (updateWriteBlock) {
|
||||
blocks.addIndexBlock(index, IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
} else {
|
||||
blocks.removeIndexBlock(index, IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Boolean updateReadBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_READ, null);
|
||||
if (updateReadBlock != null) {
|
||||
for (String index : actualIndices) {
|
||||
if (updateReadBlock) {
|
||||
blocks.addIndexBlock(index, IndexMetaData.INDEX_READ_BLOCK);
|
||||
} else {
|
||||
blocks.removeIndexBlock(index, IndexMetaData.INDEX_READ_BLOCK);
|
||||
}
|
||||
}
|
||||
}
|
||||
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_BLOCK, IndexMetaData.INDEX_READ_ONLY_SETTING, openSettings);
|
||||
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_METADATA_BLOCK, IndexMetaData.INDEX_BLOCKS_METADATA_SETTING, openSettings);
|
||||
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_WRITE_BLOCK, IndexMetaData.INDEX_BLOCKS_WRITE_SETTING, openSettings);
|
||||
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_SETTING, openSettings);
|
||||
|
||||
if (!openIndices.isEmpty()) {
|
||||
String[] indices = openIndices.toArray(new String[openIndices.size()]);
|
||||
metaDataBuilder.updateSettings(openSettings, indices);
|
||||
for (String index : openIndices) {
|
||||
IndexMetaData indexMetaData = metaDataBuilder.get(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
Settings.Builder updates = Settings.builder();
|
||||
Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings());
|
||||
if (indexScopedSettings.updateDynamicSettings(openSettings, indexSettings, updates, index)) {
|
||||
metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(indexSettings));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!closeIndices.isEmpty()) {
|
||||
String[] indices = closeIndices.toArray(new String[closeIndices.size()]);
|
||||
metaDataBuilder.updateSettings(closeSettings, indices);
|
||||
for (String index : closeIndices) {
|
||||
IndexMetaData indexMetaData = metaDataBuilder.get(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
Settings.Builder updates = Settings.builder();
|
||||
Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings());
|
||||
if (indexScopedSettings.updateSettings(closedSettings, indexSettings, updates, index)) {
|
||||
metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(indexSettings));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -326,12 +265,34 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
// now, reroute in case things change that require it (like number of replicas)
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(updatedState, "settings update");
|
||||
updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
|
||||
|
||||
for (String index : openIndices) {
|
||||
indexScopedSettings.dryRun(updatedState.metaData().index(index).getSettings());
|
||||
}
|
||||
for (String index : closeIndices) {
|
||||
indexScopedSettings.dryRun(updatedState.metaData().index(index).getSettings());
|
||||
}
|
||||
return updatedState;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the cluster block only iff the setting exists in the given settings
|
||||
*/
|
||||
private static void maybeUpdateClusterBlock(String[] actualIndices, ClusterBlocks.Builder blocks, ClusterBlock block, Setting<Boolean> setting, Settings openSettings) {
|
||||
if (setting.exists(openSettings)) {
|
||||
final boolean updateReadBlock = setting.get(openSettings);
|
||||
for (String index : actualIndices) {
|
||||
if (updateReadBlock) {
|
||||
blocks.addIndexBlock(index, block);
|
||||
} else {
|
||||
blocks.removeIndexBlock(index, block);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
|
||||
|
||||
|
|
|
@ -319,7 +319,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
throw new IllegalArgumentException("resolved [" + node + "] into [" + resolvedNodeIds.length + "] nodes, where expected to be resolved to a single node");
|
||||
}
|
||||
if (resolvedNodeIds.length == 0) {
|
||||
throw new IllegalArgumentException("failed to resolve [" + node + " ], no matching nodes");
|
||||
throw new IllegalArgumentException("failed to resolve [" + node + "], no matching nodes");
|
||||
}
|
||||
return nodes.get(resolvedNodeIds[0]);
|
||||
}
|
||||
|
|
|
@ -114,6 +114,16 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
|||
return shard;
|
||||
}
|
||||
|
||||
/**
|
||||
* All shards for the provided {@link ShardId}
|
||||
* @return All the shard routing entries for the given index and shard id
|
||||
* @throws IndexNotFoundException if provided index does not exist
|
||||
* @throws ShardNotFoundException if provided shard id is unknown
|
||||
*/
|
||||
public IndexShardRoutingTable shardRoutingTable(ShardId shardId) {
|
||||
return shardRoutingTable(shardId.getIndex(), shardId.getId());
|
||||
}
|
||||
|
||||
public RoutingTable validateRaiseException(MetaData metaData) throws RoutingValidationException {
|
||||
RoutingTableValidation validation = validate(metaData);
|
||||
if (!validation.valid()) {
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.joda.Joda;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -41,10 +42,10 @@ import java.io.IOException;
|
|||
public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
|
||||
|
||||
public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime");
|
||||
|
||||
public static final String INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = "index.unassigned.node_left.delayed_timeout";
|
||||
private static final TimeValue DEFAULT_DELAYED_NODE_LEFT_TIMEOUT = TimeValue.timeValueMinutes(1);
|
||||
|
||||
public static final Setting<TimeValue> INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, true, Setting.Scope.INDEX);
|
||||
|
||||
/**
|
||||
* Reason why the shard is in unassigned state.
|
||||
* <p>
|
||||
|
@ -215,7 +216,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
|
|||
if (reason != Reason.NODE_LEFT) {
|
||||
return 0;
|
||||
}
|
||||
TimeValue delayTimeout = indexSettings.getAsTime(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, settings.getAsTime(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, DEFAULT_DELAYED_NODE_LEFT_TIMEOUT));
|
||||
TimeValue delayTimeout = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings, settings);
|
||||
return Math.max(0l, delayTimeout.nanos());
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,240 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.command;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamableReader;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* Abstract base class for allocating an unassigned shard to a node
|
||||
*/
|
||||
public abstract class AbstractAllocateAllocationCommand implements AllocationCommand, ToXContent {
|
||||
|
||||
private static final String INDEX_KEY = "index";
|
||||
private static final String SHARD_KEY = "shard";
|
||||
private static final String NODE_KEY = "node";
|
||||
|
||||
protected static <T extends Builder> ObjectParser<T, Void> createAllocateParser(String command) {
|
||||
ObjectParser<T, Void> parser = new ObjectParser<>(command);
|
||||
parser.declareString(Builder::setIndex, new ParseField(INDEX_KEY));
|
||||
parser.declareInt(Builder::setShard, new ParseField(SHARD_KEY));
|
||||
parser.declareString(Builder::setNode, new ParseField(NODE_KEY));
|
||||
return parser;
|
||||
}
|
||||
|
||||
protected static abstract class Builder<T extends AbstractAllocateAllocationCommand> implements StreamableReader<Builder<T>> {
|
||||
protected String index;
|
||||
protected int shard = -1;
|
||||
protected String node;
|
||||
|
||||
public void setIndex(String index) {
|
||||
this.index = index;
|
||||
}
|
||||
|
||||
public void setShard(int shard) {
|
||||
this.shard = shard;
|
||||
}
|
||||
|
||||
public void setNode(String node) {
|
||||
this.node = node;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder<T> readFrom(StreamInput in) throws IOException {
|
||||
index = in.readString();
|
||||
shard = in.readVInt();
|
||||
node = in.readString();
|
||||
return this;
|
||||
}
|
||||
|
||||
public abstract Builder<T> parse(XContentParser parser) throws IOException;
|
||||
|
||||
public abstract T build();
|
||||
|
||||
protected void validate() {
|
||||
if (index == null) {
|
||||
throw new IllegalArgumentException("Argument [index] must be defined");
|
||||
}
|
||||
if (shard < 0) {
|
||||
throw new IllegalArgumentException("Argument [shard] must be defined and non-negative");
|
||||
}
|
||||
if (node == null) {
|
||||
throw new IllegalArgumentException("Argument [node] must be defined");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.field(INDEX_KEY, shardId().index().name());
|
||||
builder.field(SHARD_KEY, shardId().id());
|
||||
builder.field(NODE_KEY, node());
|
||||
return builder;
|
||||
}
|
||||
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(shardId.getIndex());
|
||||
out.writeVInt(shardId.getId());
|
||||
out.writeString(node);
|
||||
}
|
||||
|
||||
public static abstract class Factory<T extends AbstractAllocateAllocationCommand> implements AllocationCommand.Factory<T> {
|
||||
|
||||
protected abstract Builder<T> newBuilder();
|
||||
|
||||
@Override
|
||||
public T readFrom(StreamInput in) throws IOException {
|
||||
return newBuilder().readFrom(in).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(T command, StreamOutput out) throws IOException {
|
||||
command.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public T fromXContent(XContentParser parser) throws IOException {
|
||||
return newBuilder().parse(parser).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void toXContent(T command, XContentBuilder builder, ToXContent.Params params, String objectName) throws IOException {
|
||||
if (objectName == null) {
|
||||
builder.startObject();
|
||||
} else {
|
||||
builder.startObject(objectName);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
|
||||
protected final ShardId shardId;
|
||||
protected final String node;
|
||||
|
||||
protected AbstractAllocateAllocationCommand(ShardId shardId, String node) {
|
||||
this.shardId = shardId;
|
||||
this.node = node;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the shard id
|
||||
*
|
||||
* @return id of the shard
|
||||
*/
|
||||
public ShardId shardId() {
|
||||
return this.shardId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the id of the node
|
||||
*
|
||||
* @return id of the node
|
||||
*/
|
||||
public String node() {
|
||||
return this.node;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle case where a disco node cannot be found in the routing table. Usually means that it's not a data node.
|
||||
*/
|
||||
protected RerouteExplanation explainOrThrowMissingRoutingNode(RoutingAllocation allocation, boolean explain, DiscoveryNode discoNode) {
|
||||
if (!discoNode.dataNode()) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation, "allocation can only be done on data nodes, not [" + node + "]");
|
||||
} else {
|
||||
return explainOrThrowRejectedCommand(explain, allocation, "could not find [" + node + "] among the routing nodes");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility method for rejecting the current allocation command based on provided reason
|
||||
*/
|
||||
protected RerouteExplanation explainOrThrowRejectedCommand(boolean explain, RoutingAllocation allocation, String reason) {
|
||||
if (explain) {
|
||||
return new RerouteExplanation(this, allocation.decision(Decision.NO, name() + " (allocation command)", reason));
|
||||
}
|
||||
throw new IllegalArgumentException("[" + name() + "] " + reason);
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility method for rejecting the current allocation command based on provided exception
|
||||
*/
|
||||
protected RerouteExplanation explainOrThrowRejectedCommand(boolean explain, RoutingAllocation allocation, RuntimeException rte) {
|
||||
if (explain) {
|
||||
return new RerouteExplanation(this, allocation.decision(Decision.NO, name() + " (allocation command)", rte.getMessage()));
|
||||
}
|
||||
throw rte;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes an unassigned shard on a node and removes it from the unassigned
|
||||
*
|
||||
* @param allocation the allocation
|
||||
* @param routingNodes the routing nodes
|
||||
* @param routingNode the node to initialize it to
|
||||
* @param shardRouting the shard routing that is to be matched in unassigned shards
|
||||
*/
|
||||
protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, RoutingNode routingNode, ShardRouting shardRouting) {
|
||||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes an unassigned shard on a node and removes it from the unassigned
|
||||
*
|
||||
* @param allocation the allocation
|
||||
* @param routingNodes the routing nodes
|
||||
* @param routingNode the node to initialize it to
|
||||
* @param shardRouting the shard routing that is to be matched in unassigned shards
|
||||
* @param shardRoutingChanges changes to apply for shard routing in unassigned shards before initialization
|
||||
*/
|
||||
protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, RoutingNode routingNode,
|
||||
ShardRouting shardRouting, @Nullable Consumer<ShardRouting> shardRoutingChanges) {
|
||||
for (RoutingNodes.UnassignedShards.UnassignedIterator it = routingNodes.unassigned().iterator(); it.hasNext(); ) {
|
||||
ShardRouting unassigned = it.next();
|
||||
if (!unassigned.equalsIgnoringMetaData(shardRouting)) {
|
||||
continue;
|
||||
}
|
||||
if (shardRoutingChanges != null) {
|
||||
shardRoutingChanges.accept(unassigned);
|
||||
}
|
||||
it.initialize(routingNode.nodeId(), unassigned.version(),
|
||||
allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
return;
|
||||
}
|
||||
assert false : "shard to initialize not found in list of unassigned shards";
|
||||
}
|
||||
}
|
|
@ -1,240 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.command;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Allocates an unassigned shard to a specific node. Note, primary allocation will "force"
|
||||
* allocation which might mean one will loose data if using local gateway..., use with care
|
||||
* with the <tt>allowPrimary</tt> flag.
|
||||
*/
|
||||
public class AllocateAllocationCommand implements AllocationCommand {
|
||||
|
||||
public static final String NAME = "allocate";
|
||||
|
||||
public static class Factory implements AllocationCommand.Factory<AllocateAllocationCommand> {
|
||||
|
||||
@Override
|
||||
public AllocateAllocationCommand readFrom(StreamInput in) throws IOException {
|
||||
return new AllocateAllocationCommand(ShardId.readShardId(in), in.readString(), in.readBoolean());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(AllocateAllocationCommand command, StreamOutput out) throws IOException {
|
||||
command.shardId().writeTo(out);
|
||||
out.writeString(command.node());
|
||||
out.writeBoolean(command.allowPrimary());
|
||||
}
|
||||
|
||||
@Override
|
||||
public AllocateAllocationCommand fromXContent(XContentParser parser) throws IOException {
|
||||
String index = null;
|
||||
int shardId = -1;
|
||||
String nodeId = null;
|
||||
boolean allowPrimary = false;
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if ("index".equals(currentFieldName)) {
|
||||
index = parser.text();
|
||||
} else if ("shard".equals(currentFieldName)) {
|
||||
shardId = parser.intValue();
|
||||
} else if ("node".equals(currentFieldName)) {
|
||||
nodeId = parser.text();
|
||||
} else if ("allow_primary".equals(currentFieldName) || "allowPrimary".equals(currentFieldName)) {
|
||||
allowPrimary = parser.booleanValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("[{}] command does not support field [{}]", NAME, currentFieldName);
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("[{}] command does not support complex json tokens [{}]", NAME, token);
|
||||
}
|
||||
}
|
||||
if (index == null) {
|
||||
throw new ElasticsearchParseException("[{}] command missing the index parameter", NAME);
|
||||
}
|
||||
if (shardId == -1) {
|
||||
throw new ElasticsearchParseException("[{}] command missing the shard parameter", NAME);
|
||||
}
|
||||
if (nodeId == null) {
|
||||
throw new ElasticsearchParseException("[{}] command missing the node parameter", NAME);
|
||||
}
|
||||
return new AllocateAllocationCommand(new ShardId(index, shardId), nodeId, allowPrimary);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void toXContent(AllocateAllocationCommand command, XContentBuilder builder, ToXContent.Params params, String objectName) throws IOException {
|
||||
if (objectName == null) {
|
||||
builder.startObject();
|
||||
} else {
|
||||
builder.startObject(objectName);
|
||||
}
|
||||
builder.field("index", command.shardId().index().name());
|
||||
builder.field("shard", command.shardId().id());
|
||||
builder.field("node", command.node());
|
||||
builder.field("allow_primary", command.allowPrimary());
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
|
||||
private final ShardId shardId;
|
||||
private final String node;
|
||||
private final boolean allowPrimary;
|
||||
|
||||
/**
|
||||
* Create a new {@link AllocateAllocationCommand}
|
||||
*
|
||||
* @param shardId {@link ShardId} of the shrad to assign
|
||||
* @param node Node to assign the shard to
|
||||
* @param allowPrimary should the node be allow to allocate the shard as primary
|
||||
*/
|
||||
public AllocateAllocationCommand(ShardId shardId, String node, boolean allowPrimary) {
|
||||
this.shardId = shardId;
|
||||
this.node = node;
|
||||
this.allowPrimary = allowPrimary;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the shards id
|
||||
*
|
||||
* @return id of the shard
|
||||
*/
|
||||
public ShardId shardId() {
|
||||
return this.shardId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the id of the Node
|
||||
*
|
||||
* @return id of the Node
|
||||
*/
|
||||
public String node() {
|
||||
return this.node;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if primary allocation is allowed
|
||||
*
|
||||
* @return <code>true</code> if primary allocation is allowed. Otherwise <code>false</code>
|
||||
*/
|
||||
public boolean allowPrimary() {
|
||||
return this.allowPrimary;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
|
||||
final DiscoveryNode discoNode = allocation.nodes().resolveNode(node);
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
|
||||
ShardRouting shardRouting = null;
|
||||
for (ShardRouting routing : routingNodes.unassigned()) {
|
||||
if (routing.shardId().equals(shardId)) {
|
||||
// prefer primaries first to allocate
|
||||
if (shardRouting == null || routing.primary()) {
|
||||
shardRouting = routing;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (shardRouting == null) {
|
||||
if (explain) {
|
||||
return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command",
|
||||
"failed to find " + shardId + " on the list of unassigned shards"));
|
||||
}
|
||||
throw new IllegalArgumentException("[allocate] failed to find " + shardId + " on the list of unassigned shards");
|
||||
}
|
||||
|
||||
if (shardRouting.primary() && !allowPrimary) {
|
||||
if (explain) {
|
||||
return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command",
|
||||
"trying to allocate a primary shard " + shardId + ", which is disabled"));
|
||||
}
|
||||
throw new IllegalArgumentException("[allocate] trying to allocate a primary shard " + shardId + ", which is disabled");
|
||||
}
|
||||
|
||||
RoutingNode routingNode = routingNodes.node(discoNode.id());
|
||||
if (routingNode == null) {
|
||||
if (!discoNode.dataNode()) {
|
||||
if (explain) {
|
||||
return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command",
|
||||
"Allocation can only be done on data nodes, not [" + node + "]"));
|
||||
}
|
||||
throw new IllegalArgumentException("Allocation can only be done on data nodes, not [" + node + "]");
|
||||
} else {
|
||||
if (explain) {
|
||||
return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command",
|
||||
"Could not find [" + node + "] among the routing nodes"));
|
||||
}
|
||||
throw new IllegalStateException("Could not find [" + node + "] among the routing nodes");
|
||||
}
|
||||
}
|
||||
|
||||
Decision decision = allocation.deciders().canAllocate(shardRouting, routingNode, allocation);
|
||||
if (decision.type() == Decision.Type.NO) {
|
||||
if (explain) {
|
||||
return new RerouteExplanation(this, decision);
|
||||
}
|
||||
throw new IllegalArgumentException("[allocate] allocation of " + shardId + " on node " + discoNode + " is not allowed, reason: " + decision);
|
||||
}
|
||||
// go over and remove it from the unassigned
|
||||
for (RoutingNodes.UnassignedShards.UnassignedIterator it = routingNodes.unassigned().iterator(); it.hasNext(); ) {
|
||||
ShardRouting unassigned = it.next();
|
||||
if (unassigned != shardRouting) {
|
||||
continue;
|
||||
}
|
||||
// if we force allocation of a primary, we need to move the unassigned info back to treat it as if
|
||||
// it was index creation
|
||||
if (unassigned.primary() && unassigned.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED) {
|
||||
unassigned.updateUnassignedInfo(new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
|
||||
"force allocation from previous reason " + unassigned.unassignedInfo().getReason() + ", " + unassigned.unassignedInfo().getMessage(),
|
||||
unassigned.unassignedInfo().getFailure(), System.nanoTime(), System.currentTimeMillis()));
|
||||
}
|
||||
it.initialize(routingNode.nodeId(), unassigned.version(), allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
break;
|
||||
}
|
||||
return new RerouteExplanation(this, decision);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.command;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Allocates an unassigned empty primary shard to a specific node. Use with extreme care as this will result in data loss.
|
||||
* Allocation deciders are ignored.
|
||||
*/
|
||||
public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocationCommand {
|
||||
public static final String NAME = "allocate_empty_primary";
|
||||
|
||||
private static final ObjectParser<Builder, Void> EMPTY_PRIMARY_PARSER = BasePrimaryAllocationCommand.createAllocatePrimaryParser(NAME);
|
||||
|
||||
/**
|
||||
* Creates a new {@link AllocateEmptyPrimaryAllocationCommand}
|
||||
*
|
||||
* @param shardId {@link ShardId} of the shard to assign
|
||||
* @param node node id of the node to assign the shard to
|
||||
* @param acceptDataLoss whether the user agrees to data loss
|
||||
*/
|
||||
public AllocateEmptyPrimaryAllocationCommand(ShardId shardId, String node, boolean acceptDataLoss) {
|
||||
super(shardId, node, acceptDataLoss);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
public static class Builder extends BasePrimaryAllocationCommand.Builder<AllocateEmptyPrimaryAllocationCommand> {
|
||||
|
||||
@Override
|
||||
public Builder parse(XContentParser parser) throws IOException {
|
||||
return EMPTY_PRIMARY_PARSER.parse(parser, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AllocateEmptyPrimaryAllocationCommand build() {
|
||||
validate();
|
||||
return new AllocateEmptyPrimaryAllocationCommand(new ShardId(index, shard), node, acceptDataLoss);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Factory extends AbstractAllocateAllocationCommand.Factory<AllocateEmptyPrimaryAllocationCommand> {
|
||||
|
||||
@Override
|
||||
protected Builder newBuilder() {
|
||||
return new Builder();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
|
||||
final DiscoveryNode discoNode;
|
||||
try {
|
||||
discoNode = allocation.nodes().resolveNode(node);
|
||||
} catch (IllegalArgumentException e) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation, e);
|
||||
}
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
RoutingNode routingNode = routingNodes.node(discoNode.id());
|
||||
if (routingNode == null) {
|
||||
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
|
||||
}
|
||||
|
||||
final ShardRouting shardRouting;
|
||||
try {
|
||||
shardRouting = allocation.routingTable().shardRoutingTable(shardId).primaryShard();
|
||||
} catch (IndexNotFoundException | ShardNotFoundException e) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation, e);
|
||||
}
|
||||
if (shardRouting.unassigned() == false) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation, "primary " + shardId + " is already assigned");
|
||||
}
|
||||
|
||||
if (shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && acceptDataLoss == false) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation,
|
||||
"allocating an empty primary for " + shardId + " can result in data loss. Please confirm by setting the accept_data_loss parameter to true");
|
||||
}
|
||||
|
||||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting,
|
||||
shr -> {
|
||||
if (shr.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED) {
|
||||
// we need to move the unassigned info back to treat it as if it was index creation
|
||||
shr.updateUnassignedInfo(new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
|
||||
"force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage(),
|
||||
shardRouting.unassignedInfo().getFailure(), System.nanoTime(), System.currentTimeMillis()));
|
||||
}
|
||||
});
|
||||
return new RerouteExplanation(this, allocation.decision(Decision.YES, name() + " (allocation command)", "ignore deciders"));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.command;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Allocates an unassigned replica shard to a specific node. Checks if allocation deciders allow allocation.
|
||||
*/
|
||||
public class AllocateReplicaAllocationCommand extends AbstractAllocateAllocationCommand {
|
||||
public static final String NAME = "allocate_replica";
|
||||
|
||||
private static final ObjectParser<AllocateReplicaAllocationCommand.Builder, Void> REPLICA_PARSER = createAllocateParser(NAME);
|
||||
|
||||
/**
|
||||
* Creates a new {@link AllocateReplicaAllocationCommand}
|
||||
*
|
||||
* @param shardId {@link ShardId} of the shard to assign
|
||||
* @param node node id of the node to assign the shard to
|
||||
*/
|
||||
public AllocateReplicaAllocationCommand(ShardId shardId, String node) {
|
||||
super(shardId, node);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
protected static class Builder extends AbstractAllocateAllocationCommand.Builder<AllocateReplicaAllocationCommand> {
|
||||
|
||||
@Override
|
||||
public Builder parse(XContentParser parser) throws IOException {
|
||||
return REPLICA_PARSER.parse(parser, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AllocateReplicaAllocationCommand build() {
|
||||
validate();
|
||||
return new AllocateReplicaAllocationCommand(new ShardId(index, shard), node);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Factory extends AbstractAllocateAllocationCommand.Factory<AllocateReplicaAllocationCommand> {
|
||||
@Override
|
||||
protected Builder newBuilder() {
|
||||
return new Builder();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
|
||||
final DiscoveryNode discoNode;
|
||||
try {
|
||||
discoNode = allocation.nodes().resolveNode(node);
|
||||
} catch (IllegalArgumentException e) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation, e);
|
||||
}
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
RoutingNode routingNode = routingNodes.node(discoNode.id());
|
||||
if (routingNode == null) {
|
||||
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
|
||||
}
|
||||
|
||||
final ShardRouting primaryShardRouting;
|
||||
try {
|
||||
primaryShardRouting = allocation.routingTable().shardRoutingTable(shardId).primaryShard();
|
||||
} catch (IndexNotFoundException | ShardNotFoundException e) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation, e);
|
||||
}
|
||||
if (primaryShardRouting.unassigned()) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation,
|
||||
"trying to allocate a replica shard " + shardId + ", while corresponding primary shard is still unassigned");
|
||||
}
|
||||
|
||||
List<ShardRouting> replicaShardRoutings = allocation.routingTable().shardRoutingTable(shardId).replicaShardsWithState(ShardRoutingState.UNASSIGNED);
|
||||
ShardRouting shardRouting;
|
||||
if (replicaShardRoutings.isEmpty()) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation,
|
||||
"all copies of " + shardId +" are already assigned. Use the move allocation command instead");
|
||||
} else {
|
||||
shardRouting = replicaShardRoutings.get(0);
|
||||
}
|
||||
|
||||
Decision decision = allocation.deciders().canAllocate(shardRouting, routingNode, allocation);
|
||||
if (decision.type() == Decision.Type.NO) {
|
||||
// don't use explainOrThrowRejectedCommand to keep the original "NO" decision
|
||||
if (explain) {
|
||||
return new RerouteExplanation(this, decision);
|
||||
}
|
||||
throw new IllegalArgumentException("[" + name() + "] allocation of " + shardId + " on node " + discoNode + " is not allowed, reason: " + decision);
|
||||
}
|
||||
|
||||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting);
|
||||
return new RerouteExplanation(this, decision);
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,124 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.command;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Allocates an unassigned stale primary shard to a specific node. Use with extreme care as this will result in data loss.
|
||||
* Allocation deciders are ignored.
|
||||
*/
|
||||
public class AllocateStalePrimaryAllocationCommand extends BasePrimaryAllocationCommand {
|
||||
public static final String NAME = "allocate_stale_primary";
|
||||
|
||||
private static final ObjectParser<Builder, Void> STALE_PRIMARY_PARSER = BasePrimaryAllocationCommand.createAllocatePrimaryParser(NAME);
|
||||
|
||||
/**
|
||||
* Creates a new {@link AllocateStalePrimaryAllocationCommand}
|
||||
*
|
||||
* @param shardId {@link ShardId} of the shard to assign
|
||||
* @param node node id of the node to assign the shard to
|
||||
* @param acceptDataLoss whether the user agrees to data loss
|
||||
*/
|
||||
public AllocateStalePrimaryAllocationCommand(ShardId shardId, String node, boolean acceptDataLoss) {
|
||||
super(shardId, node, acceptDataLoss);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
public static class Builder extends BasePrimaryAllocationCommand.Builder<AllocateStalePrimaryAllocationCommand> {
|
||||
|
||||
@Override
|
||||
public Builder parse(XContentParser parser) throws IOException {
|
||||
return STALE_PRIMARY_PARSER.parse(parser, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AllocateStalePrimaryAllocationCommand build() {
|
||||
validate();
|
||||
return new AllocateStalePrimaryAllocationCommand(new ShardId(index, shard), node, acceptDataLoss);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Factory extends AbstractAllocateAllocationCommand.Factory<AllocateStalePrimaryAllocationCommand> {
|
||||
|
||||
@Override
|
||||
protected Builder newBuilder() {
|
||||
return new Builder();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
|
||||
final DiscoveryNode discoNode;
|
||||
try {
|
||||
discoNode = allocation.nodes().resolveNode(node);
|
||||
} catch (IllegalArgumentException e) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation, e);
|
||||
}
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
RoutingNode routingNode = routingNodes.node(discoNode.id());
|
||||
if (routingNode == null) {
|
||||
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
|
||||
}
|
||||
|
||||
final ShardRouting shardRouting;
|
||||
try {
|
||||
shardRouting = allocation.routingTable().shardRoutingTable(shardId).primaryShard();
|
||||
} catch (IndexNotFoundException | ShardNotFoundException e) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation, e);
|
||||
}
|
||||
if (shardRouting.unassigned() == false) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation, "primary " + shardId + " is already assigned");
|
||||
}
|
||||
|
||||
if (acceptDataLoss == false) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation,
|
||||
"allocating an empty primary for " + shardId + " can result in data loss. Please confirm by setting the accept_data_loss parameter to true");
|
||||
}
|
||||
|
||||
final IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
|
||||
if (shardRouting.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation,
|
||||
"trying to allocate an existing primary shard " + shardId + ", while no such shard has ever been active");
|
||||
}
|
||||
|
||||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting);
|
||||
return new RerouteExplanation(this, allocation.decision(Decision.YES, name() + " (allocation command)", "ignore deciders"));
|
||||
}
|
||||
|
||||
}
|
|
@ -67,7 +67,9 @@ public class AllocationCommands {
|
|||
}
|
||||
|
||||
static {
|
||||
registerFactory(AllocateAllocationCommand.NAME, new AllocateAllocationCommand.Factory());
|
||||
registerFactory(AllocateEmptyPrimaryAllocationCommand.NAME, new AllocateEmptyPrimaryAllocationCommand.Factory());
|
||||
registerFactory(AllocateStalePrimaryAllocationCommand.NAME, new AllocateStalePrimaryAllocationCommand.Factory());
|
||||
registerFactory(AllocateReplicaAllocationCommand.NAME, new AllocateReplicaAllocationCommand.Factory());
|
||||
registerFactory(CancelAllocationCommand.NAME, new CancelAllocationCommand.Factory());
|
||||
registerFactory(MoveAllocationCommand.NAME, new MoveAllocationCommand.Factory());
|
||||
}
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.command;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Abstract base class for allocating an unassigned primary shard to a node
|
||||
*/
|
||||
public abstract class BasePrimaryAllocationCommand extends AbstractAllocateAllocationCommand {
|
||||
|
||||
private static final String ACCEPT_DATA_LOSS_KEY = "accept_data_loss";
|
||||
|
||||
protected static <T extends Builder> ObjectParser<T, Void> createAllocatePrimaryParser(String command) {
|
||||
ObjectParser<T, Void> parser = AbstractAllocateAllocationCommand.createAllocateParser(command);
|
||||
parser.declareBoolean(Builder::setAcceptDataLoss, new ParseField(ACCEPT_DATA_LOSS_KEY));
|
||||
return parser;
|
||||
}
|
||||
|
||||
protected final boolean acceptDataLoss;
|
||||
|
||||
protected BasePrimaryAllocationCommand(ShardId shardId, String node, boolean acceptDataLoss) {
|
||||
super(shardId, node);
|
||||
this.acceptDataLoss = acceptDataLoss;
|
||||
}
|
||||
|
||||
/**
|
||||
* The operation only executes if the user explicitly agrees to possible data loss
|
||||
*
|
||||
* @return whether data loss is acceptable
|
||||
*/
|
||||
public boolean acceptDataLoss() {
|
||||
return acceptDataLoss;
|
||||
}
|
||||
|
||||
protected static abstract class Builder<T extends BasePrimaryAllocationCommand> extends AbstractAllocateAllocationCommand.Builder<T> {
|
||||
protected boolean acceptDataLoss;
|
||||
|
||||
public void setAcceptDataLoss(boolean acceptDataLoss) {
|
||||
this.acceptDataLoss = acceptDataLoss;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
acceptDataLoss = in.readBoolean();
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
super.toXContent(builder, params);
|
||||
builder.field(ACCEPT_DATA_LOSS_KEY, acceptDataLoss);
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(acceptDataLoss);
|
||||
}
|
||||
}
|
|
@ -32,7 +32,7 @@ import java.util.Locale;
|
|||
|
||||
/**
|
||||
* This allocation decider allows shard allocations / rebalancing via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} /
|
||||
* {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE} / {@link #INDEX_ROUTING_REBALANCE_ENABLE}.
|
||||
* {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link #INDEX_ROUTING_REBALANCE_ENABLE_SETTING}.
|
||||
* The per index settings overrides the cluster wide setting.
|
||||
*
|
||||
* <p>
|
||||
|
@ -61,10 +61,10 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
public static final String NAME = "enable";
|
||||
|
||||
public static final Setting<Allocation> CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.CLUSTER);
|
||||
public static final String INDEX_ROUTING_ALLOCATION_ENABLE= "index.routing.allocation.enable";
|
||||
public static final Setting<Allocation> INDEX_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("index.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.INDEX);
|
||||
|
||||
public static final Setting<Rebalance> CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.CLUSTER);
|
||||
public static final String INDEX_ROUTING_REBALANCE_ENABLE = "index.routing.rebalance.enable";
|
||||
public static final Setting<Rebalance> INDEX_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("index.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.INDEX);
|
||||
|
||||
private volatile Rebalance enableRebalance;
|
||||
private volatile Allocation enableAllocation;
|
||||
|
@ -92,11 +92,10 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored");
|
||||
}
|
||||
|
||||
IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
|
||||
String enableIndexValue = indexMetaData.getSettings().get(INDEX_ROUTING_ALLOCATION_ENABLE);
|
||||
final IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
|
||||
final Allocation enable;
|
||||
if (enableIndexValue != null) {
|
||||
enable = Allocation.parse(enableIndexValue);
|
||||
if (INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.exists(indexMetaData.getSettings())) {
|
||||
enable = INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.get(indexMetaData.getSettings());
|
||||
} else {
|
||||
enable = this.enableAllocation;
|
||||
}
|
||||
|
@ -129,10 +128,9 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
}
|
||||
|
||||
Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings();
|
||||
String enableIndexValue = indexSettings.get(INDEX_ROUTING_REBALANCE_ENABLE);
|
||||
final Rebalance enable;
|
||||
if (enableIndexValue != null) {
|
||||
enable = Rebalance.parse(enableIndexValue);
|
||||
if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) {
|
||||
enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings);
|
||||
} else {
|
||||
enable = this.enableRebalance;
|
||||
}
|
||||
|
@ -160,7 +158,7 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
|
||||
/**
|
||||
* Allocation values or rather their string representation to be used used with
|
||||
* {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE}
|
||||
* {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE_SETTING}
|
||||
* via cluster / index settings.
|
||||
*/
|
||||
public enum Allocation {
|
||||
|
@ -186,7 +184,7 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
|
||||
/**
|
||||
* Rebalance values or rather their string representation to be used used with
|
||||
* {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE}
|
||||
* {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE_SETTING}
|
||||
* via cluster / index settings.
|
||||
*/
|
||||
public enum Rebalance {
|
||||
|
|
|
@ -60,10 +60,6 @@ public class FilterAllocationDecider extends AllocationDecider {
|
|||
|
||||
public static final String NAME = "filter";
|
||||
|
||||
public static final String INDEX_ROUTING_REQUIRE_GROUP = "index.routing.allocation.require.";
|
||||
public static final String INDEX_ROUTING_INCLUDE_GROUP = "index.routing.allocation.include.";
|
||||
public static final String INDEX_ROUTING_EXCLUDE_GROUP = "index.routing.allocation.exclude.";
|
||||
|
||||
public static final Setting<Settings> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Settings> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.CLUSTER);
|
||||
|
|
|
@ -32,12 +32,12 @@ import org.elasticsearch.common.settings.Settings;
|
|||
/**
|
||||
* This {@link AllocationDecider} limits the number of shards per node on a per
|
||||
* index or node-wide basis. The allocator prevents a single node to hold more
|
||||
* than {@value #INDEX_TOTAL_SHARDS_PER_NODE} per index and
|
||||
* than <tt>index.routing.allocation.total_shards_per_node</tt> per index and
|
||||
* <tt>cluster.routing.allocation.total_shards_per_node</tt> globally during the allocation
|
||||
* process. The limits of this decider can be changed in real-time via a the
|
||||
* index settings API.
|
||||
* <p>
|
||||
* If {@value #INDEX_TOTAL_SHARDS_PER_NODE} is reset to a negative value shards
|
||||
* If <tt>index.routing.allocation.total_shards_per_node</tt> is reset to a negative value shards
|
||||
* per index are unlimited per node. Shards currently in the
|
||||
* {@link ShardRoutingState#RELOCATING relocating} state are ignored by this
|
||||
* {@link AllocationDecider} until the shard changed its state to either
|
||||
|
@ -59,12 +59,13 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
* Controls the maximum number of shards per index on a single Elasticsearch
|
||||
* node. Negative values are interpreted as unlimited.
|
||||
*/
|
||||
public static final String INDEX_TOTAL_SHARDS_PER_NODE = "index.routing.allocation.total_shards_per_node";
|
||||
public static final Setting<Integer> INDEX_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("index.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.INDEX);
|
||||
|
||||
/**
|
||||
* Controls the maximum number of shards per node on a global level.
|
||||
* Negative values are interpreted as unlimited.
|
||||
*/
|
||||
public static final Setting<Integer> CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.CLUSTER);
|
||||
|
||||
|
||||
@Inject
|
||||
|
@ -81,7 +82,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
|
||||
int indexShardLimit = indexMd.getSettings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
|
||||
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
|
||||
// Capture the limit here in case it changes during this method's
|
||||
// execution
|
||||
final int clusterShardLimit = this.clusterShardLimit;
|
||||
|
@ -118,7 +119,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
@Override
|
||||
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
|
||||
int indexShardLimit = indexMd.getSettings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
|
||||
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
|
||||
// Capture the limit here in case it changes during this method's
|
||||
// execution
|
||||
final int clusterShardLimit = this.clusterShardLimit;
|
||||
|
|
|
@ -630,7 +630,11 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
|
||||
}
|
||||
|
||||
executor.clusterStatePublished(newClusterState);
|
||||
try {
|
||||
executor.clusterStatePublished(newClusterState);
|
||||
} catch (Exception e) {
|
||||
logger.error("exception thrown while notifying executor of new cluster state publication [{}]", e, source);
|
||||
}
|
||||
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.settings;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
|
||||
/**
|
||||
* A container for setting names and validation methods for those settings.
|
||||
*/
|
||||
public class DynamicSettings {
|
||||
private final ImmutableOpenMap<String, Validator> dynamicSettings;
|
||||
|
||||
public static class Builder {
|
||||
private ImmutableOpenMap.Builder<String, Validator> settings = ImmutableOpenMap.builder();
|
||||
|
||||
public void addSetting(String setting, Validator validator) {
|
||||
Validator old = settings.put(setting, validator);
|
||||
if (old != null) {
|
||||
throw new IllegalArgumentException("Cannot register setting [" + setting + "] twice");
|
||||
}
|
||||
}
|
||||
|
||||
public DynamicSettings build() {
|
||||
return new DynamicSettings(settings.build());
|
||||
}
|
||||
}
|
||||
|
||||
private DynamicSettings(ImmutableOpenMap<String, Validator> settings) {
|
||||
this.dynamicSettings = settings;
|
||||
}
|
||||
|
||||
public boolean isDynamicOrLoggingSetting(String key) {
|
||||
return hasDynamicSetting(key) || key.startsWith("logger.");
|
||||
}
|
||||
|
||||
public boolean hasDynamicSetting(String key) {
|
||||
for (ObjectCursor<String> dynamicSetting : dynamicSettings.keys()) {
|
||||
if (Regex.simpleMatch(dynamicSetting.value, key)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public String validateDynamicSetting(String dynamicSetting, String value, ClusterState clusterState) {
|
||||
for (ObjectObjectCursor<String, Validator> setting : dynamicSettings) {
|
||||
if (Regex.simpleMatch(setting.key, dynamicSetting)) {
|
||||
return setting.value.validate(dynamicSetting, value, clusterState);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -1,307 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.settings;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue;
|
||||
import static org.elasticsearch.common.unit.MemorySizeValue.parseBytesSizeValueOrHeapRatio;
|
||||
|
||||
|
||||
/**
|
||||
* Validates a setting, returning a failure message if applicable.
|
||||
*/
|
||||
public interface Validator {
|
||||
|
||||
String validate(String setting, String value, ClusterState clusterState);
|
||||
|
||||
Validator EMPTY = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator TIME = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException("value must not be null");
|
||||
}
|
||||
try {
|
||||
// This never returns null:
|
||||
TimeValue.parseTimeValue(value, null, setting);
|
||||
} catch (ElasticsearchParseException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator TIMEOUT = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (value == null) {
|
||||
throw new NullPointerException("value must not be null");
|
||||
}
|
||||
TimeValue timeValue = TimeValue.parseTimeValue(value, null, setting);
|
||||
assert timeValue != null;
|
||||
if (timeValue.millis() < 0 && timeValue.millis() != -1) {
|
||||
return "cannot parse value [" + value + "] as a timeout";
|
||||
}
|
||||
} catch (ElasticsearchParseException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator TIME_NON_NEGATIVE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (value == null) {
|
||||
throw new NullPointerException("value must not be null");
|
||||
}
|
||||
TimeValue timeValue = TimeValue.parseTimeValue(value, null, setting);
|
||||
assert timeValue != null;
|
||||
if (timeValue.millis() < 0) {
|
||||
return "cannot parse value [" + value + "] as non negative time";
|
||||
}
|
||||
} catch (ElasticsearchParseException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator FLOAT = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
Float.parseFloat(value);
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as a float";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator NON_NEGATIVE_FLOAT = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (Float.parseFloat(value) < 0.0) {
|
||||
return "the value of the setting " + setting + " must be a non negative float";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as a double";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator DOUBLE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
Double.parseDouble(value);
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as a double";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator NON_NEGATIVE_DOUBLE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (Double.parseDouble(value) < 0.0) {
|
||||
return "the value of the setting " + setting + " must be a non negative double";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as a double";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator DOUBLE_GTE_2 = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (Double.parseDouble(value) < 2.0) {
|
||||
return "the value of the setting " + setting + " must be >= 2.0";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as a double";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator INTEGER = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
Integer.parseInt(value);
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as an integer";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator POSITIVE_INTEGER = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (Integer.parseInt(value) <= 0) {
|
||||
return "the value of the setting " + setting + " must be a positive integer";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as an integer";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator NON_NEGATIVE_INTEGER = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (Integer.parseInt(value) < 0) {
|
||||
return "the value of the setting " + setting + " must be a non negative integer";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as an integer";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator INTEGER_GTE_2 = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (Integer.parseInt(value) < 2) {
|
||||
return "the value of the setting " + setting + " must be >= 2";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as an integer";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator BYTES_SIZE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
parseBytesSizeValue(value, setting);
|
||||
} catch (ElasticsearchParseException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator POSITIVE_BYTES_SIZE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState state) {
|
||||
try {
|
||||
ByteSizeValue byteSizeValue = parseBytesSizeValue(value, setting);
|
||||
if (byteSizeValue.getBytes() <= 0) {
|
||||
return setting + " must be a positive byte size value";
|
||||
}
|
||||
} catch (ElasticsearchParseException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator PERCENTAGE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (value == null) {
|
||||
return "the value of " + setting + " can not be null";
|
||||
}
|
||||
if (!value.endsWith("%")) {
|
||||
return "the value [" + value + "] for " + setting + " must end with %";
|
||||
}
|
||||
final double asDouble = Double.parseDouble(value.substring(0, value.length() - 1));
|
||||
if (asDouble < 0.0 || asDouble > 100.0) {
|
||||
return "the value [" + value + "] for " + setting + " must be a percentage between 0% and 100%";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Validator BYTES_SIZE_OR_PERCENTAGE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
String byteSize = BYTES_SIZE.validate(setting, value, clusterState);
|
||||
if (byteSize != null) {
|
||||
String percentage = PERCENTAGE.validate(setting, value, clusterState);
|
||||
if (percentage == null) {
|
||||
return null;
|
||||
}
|
||||
return percentage + " or be a valid bytes size value, like [16mb]";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Validator MEMORY_SIZE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
parseBytesSizeValueOrHeapRatio(value, setting);
|
||||
} catch (ElasticsearchParseException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
public static final Validator BOOLEAN = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
|
||||
if (value != null && (Booleans.isExplicitFalse(value) || Booleans.isExplicitTrue(value))) {
|
||||
return null;
|
||||
}
|
||||
return "cannot parse value [" + value + "] as a boolean";
|
||||
}
|
||||
};
|
||||
}
|
|
@ -336,7 +336,7 @@ public class Cache<K, V> {
|
|||
* value using the given mapping function and enters it into this map unless null. The load method for a given key
|
||||
* will be invoked at most once.
|
||||
*
|
||||
* @param key the key whose associated value is to be returned or computed for if non-existant
|
||||
* @param key the key whose associated value is to be returned or computed for if non-existent
|
||||
* @param loader the function to compute a value given a key
|
||||
* @return the current (existing or computed) value associated with the specified key, or null if the computed
|
||||
* value is null
|
||||
|
|
|
@ -36,7 +36,7 @@ public class CircleBuilder extends ShapeBuilder {
|
|||
public static final String FIELD_RADIUS = "radius";
|
||||
public static final GeoShapeType TYPE = GeoShapeType.CIRCLE;
|
||||
|
||||
static final CircleBuilder PROTOTYPE = new CircleBuilder();
|
||||
public static final CircleBuilder PROTOTYPE = new CircleBuilder();
|
||||
|
||||
private DistanceUnit unit = DistanceUnit.DEFAULT;
|
||||
private double radius;
|
||||
|
|
|
@ -33,7 +33,7 @@ public class EnvelopeBuilder extends ShapeBuilder {
|
|||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE;
|
||||
|
||||
static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(new Coordinate(-1.0, 1.0), new Coordinate(1.0, -1.0));
|
||||
public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(new Coordinate(-1.0, 1.0), new Coordinate(1.0, -1.0));
|
||||
|
||||
private Coordinate topLeft;
|
||||
private Coordinate bottomRight;
|
||||
|
|
|
@ -36,7 +36,7 @@ public class GeometryCollectionBuilder extends ShapeBuilder {
|
|||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.GEOMETRYCOLLECTION;
|
||||
|
||||
static final GeometryCollectionBuilder PROTOTYPE = new GeometryCollectionBuilder();
|
||||
public static final GeometryCollectionBuilder PROTOTYPE = new GeometryCollectionBuilder();
|
||||
|
||||
protected final ArrayList<ShapeBuilder> shapes = new ArrayList<>();
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ public class LineStringBuilder extends CoordinateCollection<LineStringBuilder> {
|
|||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.LINESTRING;
|
||||
|
||||
static final LineStringBuilder PROTOTYPE = new LineStringBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(1.0, 1.0));
|
||||
public static final LineStringBuilder PROTOTYPE = new LineStringBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(1.0, 1.0));
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
|
|
|
@ -37,7 +37,7 @@ public class MultiLineStringBuilder extends ShapeBuilder {
|
|||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.MULTILINESTRING;
|
||||
|
||||
static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder();
|
||||
public static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder();
|
||||
|
||||
private final ArrayList<LineStringBuilder> lines = new ArrayList<>();
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ public class MultiPointBuilder extends CoordinateCollection<MultiPointBuilder> {
|
|||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT;
|
||||
|
||||
final static MultiPointBuilder PROTOTYPE = new MultiPointBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).build());
|
||||
public static final MultiPointBuilder PROTOTYPE = new MultiPointBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).build());
|
||||
|
||||
/**
|
||||
* Create a new {@link MultiPointBuilder}.
|
||||
|
|
|
@ -36,7 +36,7 @@ import java.util.Objects;
|
|||
public class MultiPolygonBuilder extends ShapeBuilder {
|
||||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.MULTIPOLYGON;
|
||||
static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder();
|
||||
public static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder();
|
||||
|
||||
private final ArrayList<PolygonBuilder> polygons = new ArrayList<>();
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ import java.util.Objects;
|
|||
public class PointBuilder extends ShapeBuilder {
|
||||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.POINT;
|
||||
static final PointBuilder PROTOTYPE = new PointBuilder();
|
||||
public static final PointBuilder PROTOTYPE = new PointBuilder();
|
||||
|
||||
private Coordinate coordinate;
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
public class PolygonBuilder extends ShapeBuilder {
|
||||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.POLYGON;
|
||||
static final PolygonBuilder PROTOTYPE = new PolygonBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(0.0, 1.0)
|
||||
public static final PolygonBuilder PROTOTYPE = new PolygonBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(0.0, 1.0)
|
||||
.coordinate(1.0, 0.0).coordinate(0.0, 0.0));
|
||||
|
||||
private static final Coordinate[][] EMPTY = new Coordinate[0][];
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import org.elasticsearch.common.geo.ShapesAvailability;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
|
||||
/**
|
||||
* Register the shape builder prototypes with the {@link NamedWriteableRegistry}
|
||||
*/
|
||||
public class ShapeBuilderRegistry {
|
||||
|
||||
@Inject
|
||||
public ShapeBuilderRegistry(NamedWriteableRegistry namedWriteableRegistry) {
|
||||
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
|
||||
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE);
|
||||
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE);
|
||||
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE);
|
||||
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE);
|
||||
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE);
|
||||
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE);
|
||||
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE);
|
||||
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE);
|
||||
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -49,8 +49,8 @@ public abstract class AbstractMatcher<T> implements Matcher<T> {
|
|||
@Override
|
||||
public boolean equals(Object other) {
|
||||
return other instanceof AndMatcher
|
||||
&& ((AndMatcher) other).a.equals(a)
|
||||
&& ((AndMatcher) other).b.equals(b);
|
||||
&& ((AndMatcher<?>) other).a.equals(a)
|
||||
&& ((AndMatcher<?>) other).b.equals(b);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -80,8 +80,8 @@ public abstract class AbstractMatcher<T> implements Matcher<T> {
|
|||
@Override
|
||||
public boolean equals(Object other) {
|
||||
return other instanceof OrMatcher
|
||||
&& ((OrMatcher) other).a.equals(a)
|
||||
&& ((OrMatcher) other).b.equals(b);
|
||||
&& ((OrMatcher<?>) other).a.equals(a)
|
||||
&& ((OrMatcher<?>) other).b.equals(b);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder;
|
|||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder.Rescorer;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
|
@ -676,6 +677,13 @@ public abstract class StreamInput extends InputStream {
|
|||
return readNamedWriteable(ShapeBuilder.class);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a {@link QueryBuilder} from the current stream
|
||||
*/
|
||||
public Rescorer readRescorer() throws IOException {
|
||||
return readNamedWriteable(Rescorer.class);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a {@link org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder} from the current stream
|
||||
*/
|
||||
|
|
|
@ -36,13 +36,13 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder;
|
|||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder.Rescorer;
|
||||
import org.joda.time.ReadableInstant;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.nio.file.AccessDeniedException;
|
||||
import java.nio.file.AtomicMoveNotSupportedException;
|
||||
import java.nio.file.DirectoryNotEmptyException;
|
||||
|
@ -676,5 +676,12 @@ public abstract class StreamOutput extends OutputStream {
|
|||
for (T obj: list) {
|
||||
obj.writeTo(this);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a {@link Rescorer} to the current stream
|
||||
*/
|
||||
public void writeRescorer(Rescorer rescorer) throws IOException {
|
||||
writeNamedWriteable(rescorer);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,11 @@
|
|||
package org.elasticsearch.common.lucene;
|
||||
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReader.CoreClosedListener;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -72,7 +74,7 @@ public final class ShardCoreKeyMap {
|
|||
}
|
||||
final boolean added = objects.add(coreKey);
|
||||
assert added;
|
||||
reader.addCoreClosedListener(ownerCoreCacheKey -> {
|
||||
CoreClosedListener listener = ownerCoreCacheKey -> {
|
||||
assert coreKey == ownerCoreCacheKey;
|
||||
synchronized (ShardCoreKeyMap.this) {
|
||||
coreKeyToShard.remove(ownerCoreCacheKey);
|
||||
|
@ -83,7 +85,20 @@ public final class ShardCoreKeyMap {
|
|||
indexToCoreKey.remove(index);
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
boolean addedListener = false;
|
||||
try {
|
||||
reader.addCoreClosedListener(listener);
|
||||
addedListener = true;
|
||||
} finally {
|
||||
if (false == addedListener) {
|
||||
try {
|
||||
listener.onClose(coreKey);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Blow up trying to recover from failure to add listener", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -117,6 +117,12 @@ public class Queries {
|
|||
if (minimumShouldMatch == null) {
|
||||
return query;
|
||||
}
|
||||
// Queries with a single word expanded with synonyms
|
||||
// have their coordination factor disabled (@see org.apache.lucene.util.QueryBuilder#analyzeBoolean()).
|
||||
// minimumShouldMatch should not be applicable in such case.
|
||||
if (query.isCoordDisabled()) {
|
||||
return query;
|
||||
}
|
||||
int optionalClauses = 0;
|
||||
for (BooleanClause c : query.clauses()) {
|
||||
if (c.getOccur() == BooleanClause.Occur.SHOULD) {
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
|
||||
package org.elasticsearch.common.network;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.elasticsearch.client.transport.TransportClientNodesService;
|
||||
import org.elasticsearch.client.transport.support.TransportProxyClient;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -134,9 +137,6 @@ import org.elasticsearch.transport.TransportService;
|
|||
import org.elasticsearch.transport.local.LocalTransport;
|
||||
import org.elasticsearch.transport.netty.NettyTransport;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A module to handle registering and binding all network related classes.
|
||||
*/
|
||||
|
@ -290,6 +290,7 @@ public class NetworkModule extends AbstractModule {
|
|||
private final ExtensionPoint.ClassSet<RestHandler> restHandlers = new ExtensionPoint.ClassSet<>("rest_handler", RestHandler.class);
|
||||
// we must separate the cat rest handlers so RestCatAction can collect them...
|
||||
private final ExtensionPoint.ClassSet<AbstractCatAction> catHandlers = new ExtensionPoint.ClassSet<>("cat_handler", AbstractCatAction.class);
|
||||
private final NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
/**
|
||||
* Creates a network module that custom networking classes can be plugged into.
|
||||
|
@ -297,11 +298,13 @@ public class NetworkModule extends AbstractModule {
|
|||
* @param networkService A constructed network service object to bind.
|
||||
* @param settings The settings for the node
|
||||
* @param transportClient True if only transport classes should be allowed to be registered, false otherwise.
|
||||
* @param namedWriteableRegistry registry for named writeables for use during streaming
|
||||
*/
|
||||
public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient) {
|
||||
public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient, NamedWriteableRegistry namedWriteableRegistry) {
|
||||
this.networkService = networkService;
|
||||
this.settings = settings;
|
||||
this.transportClient = transportClient;
|
||||
this.namedWriteableRegistry = namedWriteableRegistry;
|
||||
registerTransportService(NETTY_TRANSPORT, TransportService.class);
|
||||
registerTransport(LOCAL_TRANSPORT, LocalTransport.class);
|
||||
registerTransport(NETTY_TRANSPORT, NettyTransport.class);
|
||||
|
@ -353,7 +356,7 @@ public class NetworkModule extends AbstractModule {
|
|||
@Override
|
||||
protected void configure() {
|
||||
bind(NetworkService.class).toInstance(networkService);
|
||||
bind(NamedWriteableRegistry.class).asEagerSingleton();
|
||||
bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
|
||||
|
||||
transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, NETTY_TRANSPORT);
|
||||
String defaultTransport = DiscoveryNode.localNode(settings) ? LOCAL_TRANSPORT : NETTY_TRANSPORT;
|
||||
|
|
|
@ -21,9 +21,13 @@ package org.elasticsearch.common.settings;
|
|||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -36,24 +40,38 @@ import java.util.function.Consumer;
|
|||
*/
|
||||
public abstract class AbstractScopedSettings extends AbstractComponent {
|
||||
private Settings lastSettingsApplied = Settings.EMPTY;
|
||||
private final List<SettingUpdater> settingUpdaters = new ArrayList<>();
|
||||
private final List<SettingUpdater<?>> settingUpdaters = new ArrayList<>();
|
||||
private final Map<String, Setting<?>> complexMatchers = new HashMap<>();
|
||||
private final Map<String, Setting<?>> keySettings = new HashMap<>();
|
||||
private final Setting.Scope scope;
|
||||
|
||||
protected AbstractScopedSettings(Settings settings, Set<Setting<?>> settingsSet, Setting.Scope scope) {
|
||||
super(settings);
|
||||
for (Setting<?> entry : settingsSet) {
|
||||
if (entry.getScope() != scope) {
|
||||
throw new IllegalArgumentException("Setting must be a cluster setting but was: " + entry.getScope());
|
||||
}
|
||||
if (entry.hasComplexMatcher()) {
|
||||
complexMatchers.put(entry.getKey(), entry);
|
||||
} else {
|
||||
keySettings.put(entry.getKey(), entry);
|
||||
}
|
||||
}
|
||||
this.lastSettingsApplied = Settings.EMPTY;
|
||||
this.scope = scope;
|
||||
for (Setting<?> entry : settingsSet) {
|
||||
addSetting(entry);
|
||||
}
|
||||
}
|
||||
|
||||
protected AbstractScopedSettings(Settings nodeSettings, Settings scopeSettings, AbstractScopedSettings other) {
|
||||
super(nodeSettings);
|
||||
this.lastSettingsApplied = scopeSettings;
|
||||
this.scope = other.scope;
|
||||
complexMatchers.putAll(other.complexMatchers);
|
||||
keySettings.putAll(other.keySettings);
|
||||
settingUpdaters.addAll(other.settingUpdaters);
|
||||
}
|
||||
|
||||
protected final void addSetting(Setting<?> setting) {
|
||||
if (setting.getScope() != scope) {
|
||||
throw new IllegalArgumentException("Setting must be a " + scope + " setting but was: " + setting.getScope());
|
||||
}
|
||||
if (setting.hasComplexMatcher()) {
|
||||
complexMatchers.putIfAbsent(setting.getKey(), setting);
|
||||
} else {
|
||||
keySettings.putIfAbsent(setting.getKey(), setting);
|
||||
}
|
||||
}
|
||||
|
||||
public Setting.Scope getScope() {
|
||||
|
@ -68,7 +86,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
final Settings current = Settings.builder().put(this.settings).put(settings).build();
|
||||
final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build();
|
||||
List<RuntimeException> exceptions = new ArrayList<>();
|
||||
for (SettingUpdater settingUpdater : settingUpdaters) {
|
||||
for (SettingUpdater<?> settingUpdater : settingUpdaters) {
|
||||
try {
|
||||
if (settingUpdater.hasChanged(current, previous)) {
|
||||
settingUpdater.getValue(current, previous);
|
||||
|
@ -99,7 +117,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build();
|
||||
try {
|
||||
List<Runnable> applyRunnables = new ArrayList<>();
|
||||
for (SettingUpdater settingUpdater : settingUpdaters) {
|
||||
for (SettingUpdater<?> settingUpdater : settingUpdaters) {
|
||||
try {
|
||||
applyRunnables.add(settingUpdater.updater(current, previous));
|
||||
} catch (Exception ex) {
|
||||
|
@ -161,9 +179,38 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
addSettingsUpdateConsumer(setting, consumer, (s) -> {});
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that all settings in the builder are registered and valid
|
||||
*/
|
||||
public final void validate(Settings.Builder settingsBuilder) {
|
||||
validate(settingsBuilder.build());
|
||||
}
|
||||
|
||||
/**
|
||||
* * Validates that all given settings are registered and valid
|
||||
*/
|
||||
public final void validate(Settings settings) {
|
||||
for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
|
||||
validate(entry.getKey(), settings);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Validates that the setting is valid
|
||||
*/
|
||||
public final void validate(String key, Settings settings) {
|
||||
Setting setting = get(key);
|
||||
if (setting == null) {
|
||||
throw new IllegalArgumentException("unknown setting [" + key + "]");
|
||||
}
|
||||
setting.get(settings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Transactional interface to update settings.
|
||||
* @see Setting
|
||||
* @param <T> the type of the value of the setting
|
||||
*/
|
||||
public interface SettingUpdater<T> {
|
||||
|
||||
|
@ -216,17 +263,16 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
/**
|
||||
* Returns the {@link Setting} for the given key or <code>null</code> if the setting can not be found.
|
||||
*/
|
||||
public Setting get(String key) {
|
||||
public Setting<?> get(String key) {
|
||||
Setting<?> setting = keySettings.get(key);
|
||||
if (setting == null) {
|
||||
for (Map.Entry<String, Setting<?>> entry : complexMatchers.entrySet()) {
|
||||
if (entry.getValue().match(key)) {
|
||||
return entry.getValue();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (setting != null) {
|
||||
return setting;
|
||||
}
|
||||
for (Map.Entry<String, Setting<?>> entry : complexMatchers.entrySet()) {
|
||||
if (entry.getValue().match(key)) {
|
||||
return entry.getValue();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -234,7 +280,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
* Returns <code>true</code> if the setting for the given key is dynamically updateable. Otherwise <code>false</code>.
|
||||
*/
|
||||
public boolean hasDynamicSetting(String key) {
|
||||
final Setting setting = get(key);
|
||||
final Setting<?> setting = get(key);
|
||||
return setting != null && setting.isDynamic();
|
||||
}
|
||||
|
||||
|
@ -253,4 +299,93 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value for the given setting.
|
||||
*/
|
||||
public <T> T get(Setting<T> setting) {
|
||||
if (setting.getScope() != scope) {
|
||||
throw new IllegalArgumentException("settings scope doesn't match the setting scope [" + this.scope + "] != [" + setting.getScope() + "]");
|
||||
}
|
||||
if (get(setting.getKey()) == null) {
|
||||
throw new IllegalArgumentException("setting " + setting.getKey() + " has not been registered");
|
||||
}
|
||||
return setting.get(this.lastSettingsApplied, settings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates a target settings builder with new, updated or deleted settings from a given settings builder.
|
||||
* <p>
|
||||
* Note: This method will only allow updates to dynamic settings. if a non-dynamic setting is updated an {@link IllegalArgumentException} is thrown instead.
|
||||
*</p>
|
||||
* @param toApply the new settings to apply
|
||||
* @param target the target settings builder that the updates are applied to. All keys that have explicit null value in toApply will be removed from this builder
|
||||
* @param updates a settings builder that holds all updates applied to target
|
||||
* @param type a free text string to allow better exceptions messages
|
||||
* @return <code>true</code> if the target has changed otherwise <code>false</code>
|
||||
*/
|
||||
public boolean updateDynamicSettings(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) {
|
||||
return updateSettings(toApply, target, updates, type, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates a target settings builder with new, updated or deleted settings from a given settings builder.
|
||||
* @param toApply the new settings to apply
|
||||
* @param target the target settings builder that the updates are applied to. All keys that have explicit null value in toApply will be removed from this builder
|
||||
* @param updates a settings builder that holds all updates applied to target
|
||||
* @param type a free text string to allow better exceptions messages
|
||||
* @return <code>true</code> if the target has changed otherwise <code>false</code>
|
||||
*/
|
||||
public boolean updateSettings(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) {
|
||||
return updateSettings(toApply, target, updates, type, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates a target settings builder with new, updated or deleted settings from a given settings builder.
|
||||
* @param toApply the new settings to apply
|
||||
* @param target the target settings builder that the updates are applied to. All keys that have explicit null value in toApply will be removed from this builder
|
||||
* @param updates a settings builder that holds all updates applied to target
|
||||
* @param type a free text string to allow better exceptions messages
|
||||
* @param onlyDynamic if <code>false</code> all settings are updated otherwise only dynamic settings are updated. if set to <code>true</code> and a non-dynamic setting is updated an exception is thrown.
|
||||
* @return <code>true</code> if the target has changed otherwise <code>false</code>
|
||||
*/
|
||||
private boolean updateSettings(Settings toApply, Settings.Builder target, Settings.Builder updates, String type, boolean onlyDynamic) {
|
||||
boolean changed = false;
|
||||
final Set<String> toRemove = new HashSet<>();
|
||||
Settings.Builder settingsBuilder = Settings.settingsBuilder();
|
||||
for (Map.Entry<String, String> entry : toApply.getAsMap().entrySet()) {
|
||||
if (entry.getValue() == null) {
|
||||
toRemove.add(entry.getKey());
|
||||
} else if ((onlyDynamic == false && get(entry.getKey()) != null) || hasDynamicSetting(entry.getKey())) {
|
||||
validate(entry.getKey(), toApply);
|
||||
settingsBuilder.put(entry.getKey(), entry.getValue());
|
||||
updates.put(entry.getKey(), entry.getValue());
|
||||
changed = true;
|
||||
} else {
|
||||
throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable");
|
||||
}
|
||||
|
||||
}
|
||||
changed |= applyDeletes(toRemove, target);
|
||||
target.put(settingsBuilder.build());
|
||||
return changed;
|
||||
}
|
||||
|
||||
private static final boolean applyDeletes(Set<String> deletes, Settings.Builder builder) {
|
||||
boolean changed = false;
|
||||
for (String entry : deletes) {
|
||||
Set<String> keysToRemove = new HashSet<>();
|
||||
Set<String> keySet = builder.internalMap().keySet();
|
||||
for (String key : keySet) {
|
||||
if (Regex.simpleMatch(entry, key)) {
|
||||
keysToRemove.add(key);
|
||||
}
|
||||
}
|
||||
for (String key : keysToRemove) {
|
||||
builder.remove(key);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -38,6 +38,8 @@ import org.elasticsearch.common.logging.ESLoggerFactory;
|
|||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
|
@ -62,7 +64,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
super(settings, settingsSet, Setting.Scope.CLUSTER);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public synchronized Settings applySettings(Settings newSettings) {
|
||||
Settings settings = super.applySettings(newSettings);
|
||||
|
@ -83,6 +84,11 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
return settings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasDynamicSetting(String key) {
|
||||
return isLoggerSetting(key) || super.hasDynamicSetting(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if the settings is a logger setting.
|
||||
*/
|
||||
|
@ -149,5 +155,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||
Transport.TRANSPORT_PROFILES_SETTING,
|
||||
Transport.TRANSPORT_TCP_COMPRESS)));
|
||||
Transport.TRANSPORT_TCP_COMPRESS,
|
||||
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
|
||||
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
|
||||
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING)));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,155 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.IndexingSlowLog;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.index.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.SearchSlowLog;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
|
||||
import org.elasticsearch.index.store.FsDirectoryService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* Encapsulates all valid index level settings.
|
||||
* @see org.elasticsearch.common.settings.Setting.Scope#INDEX
|
||||
*/
|
||||
public final class IndexScopedSettings extends AbstractScopedSettings {
|
||||
|
||||
public static final Predicate<String> INDEX_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
|
||||
public static Set<Setting<?>> BUILT_IN_INDEX_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
IndexSettings.INDEX_TTL_DISABLE_PURGE_SETTING,
|
||||
IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING,
|
||||
IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
|
||||
MergeSchedulerConfig.AUTO_THROTTLE_SETTING,
|
||||
MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING,
|
||||
MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING,
|
||||
IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING,
|
||||
IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING,
|
||||
IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING,
|
||||
IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING,
|
||||
IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING,
|
||||
IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING,
|
||||
IndexMetaData.INDEX_SHADOW_REPLICAS_SETTING,
|
||||
IndexMetaData.INDEX_SHARED_FILESYSTEM_SETTING,
|
||||
IndexMetaData.INDEX_READ_ONLY_SETTING,
|
||||
IndexMetaData.INDEX_BLOCKS_READ_SETTING,
|
||||
IndexMetaData.INDEX_BLOCKS_WRITE_SETTING,
|
||||
IndexMetaData.INDEX_BLOCKS_METADATA_SETTING,
|
||||
IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING,
|
||||
IndexMetaData.INDEX_PRIORITY_SETTING,
|
||||
IndexMetaData.INDEX_DATA_PATH_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_REFORMAT,
|
||||
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING,
|
||||
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING,
|
||||
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING,
|
||||
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING,
|
||||
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING,
|
||||
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING,
|
||||
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING,
|
||||
MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING,
|
||||
MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING,
|
||||
MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING,
|
||||
MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING,
|
||||
MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING,
|
||||
MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING,
|
||||
MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING,
|
||||
MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING,
|
||||
IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING,
|
||||
IndexSettings.INDEX_WARMER_ENABLED_SETTING,
|
||||
IndexSettings.INDEX_REFRESH_INTERVAL_SETTING,
|
||||
IndexSettings.MAX_RESULT_WINDOW_SETTING,
|
||||
IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING,
|
||||
IndexSettings.DEFAULT_FIELD_SETTING,
|
||||
IndexSettings.QUERY_STRING_LENIENT_SETTING,
|
||||
IndexSettings.ALLOW_UNMAPPED,
|
||||
IndexSettings.INDEX_CHECK_ON_STARTUP,
|
||||
ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING,
|
||||
IndexSettings.INDEX_GC_DELETES_SETTING,
|
||||
IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING,
|
||||
UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING,
|
||||
EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING,
|
||||
EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING,
|
||||
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTTING,
|
||||
IndexFieldDataService.INDEX_FIELDDATA_CACHE_KEY,
|
||||
FieldMapper.IGNORE_MALFORMED_SETTING,
|
||||
FieldMapper.COERCE_SETTING,
|
||||
Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING,
|
||||
PercolatorQueriesRegistry.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING,
|
||||
MapperService.INDEX_MAPPER_DYNAMIC_SETTING,
|
||||
MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING,
|
||||
BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING,
|
||||
IndexModule.INDEX_STORE_TYPE_SETTING,
|
||||
IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING,
|
||||
IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING,
|
||||
PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS_SETTING,
|
||||
FsDirectoryService.INDEX_LOCK_FACTOR_SETTING,
|
||||
EngineConfig.INDEX_CODEC_SETTING,
|
||||
SearchService.INDEX_NORMS_LOADING_SETTING,
|
||||
// this sucks but we can't really validate all the analyzers/similarity in here
|
||||
Setting.groupSetting("index.similarity.", false, Setting.Scope.INDEX), // this allows similarity settings to be passed
|
||||
Setting.groupSetting("index.analysis.", false, Setting.Scope.INDEX) // this allows analysis settings to be passed
|
||||
|
||||
)));
|
||||
|
||||
public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS);
|
||||
|
||||
public IndexScopedSettings(Settings settings, Set<Setting<?>> settingsSet) {
|
||||
super(settings, settingsSet, Setting.Scope.INDEX);
|
||||
}
|
||||
|
||||
private IndexScopedSettings(Settings settings, IndexScopedSettings other, IndexMetaData metaData) {
|
||||
super(settings, metaData.getSettings(), other);
|
||||
}
|
||||
|
||||
public IndexScopedSettings copy(Settings settings, IndexMetaData metaData) {
|
||||
return new IndexScopedSettings(settings, this, metaData);
|
||||
}
|
||||
}
|
|
@ -36,12 +36,15 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* A setting. Encapsulates typical stuff like default value, parsing, and scope.
|
||||
* Some (dynamic=true) can by modified at run time using the API.
|
||||
*/
|
||||
public class Setting<T> extends ToXContentToBytes {
|
||||
private final String key;
|
||||
|
@ -165,6 +168,16 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value for this setting but falls back to the second provided settings object
|
||||
*/
|
||||
public final T get(Settings primary, Settings secondary) {
|
||||
if (exists(primary)) {
|
||||
return get(primary);
|
||||
}
|
||||
return get(secondary);
|
||||
}
|
||||
|
||||
/**
|
||||
* The settings scope - settings can either be cluster settings or per index settings.
|
||||
*/
|
||||
|
@ -173,11 +186,18 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
INDEX;
|
||||
}
|
||||
|
||||
final AbstractScopedSettings.SettingUpdater newUpdater(Consumer<T> consumer, ESLogger logger) {
|
||||
/**
|
||||
* Build a new updater with a noop validator.
|
||||
*/
|
||||
final AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, ESLogger logger) {
|
||||
return newUpdater(consumer, logger, (s) -> {});
|
||||
}
|
||||
|
||||
AbstractScopedSettings.SettingUpdater newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) {
|
||||
/**
|
||||
* Build the updater responsible for validating new values, logging the new
|
||||
* value, and eventually setting the value where it belongs.
|
||||
*/
|
||||
AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) {
|
||||
if (isDynamic()) {
|
||||
return new Updater(consumer, logger, validator);
|
||||
} else {
|
||||
|
@ -216,7 +236,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
}
|
||||
|
||||
|
||||
private class Updater implements AbstractScopedSettings.SettingUpdater<T> {
|
||||
private final class Updater implements AbstractScopedSettings.SettingUpdater<T> {
|
||||
private final Consumer<T> consumer;
|
||||
private final ESLogger logger;
|
||||
private final Consumer<T> accept;
|
||||
|
@ -256,8 +276,8 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void apply(T value, Settings current, Settings previous) {
|
||||
logger.info("update [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current));
|
||||
public final void apply(T value, Settings current, Settings previous) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current));
|
||||
consumer.accept(value);
|
||||
}
|
||||
}
|
||||
|
@ -285,6 +305,10 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), dynamic, scope);
|
||||
}
|
||||
|
||||
public static Setting<Long> longSetting(String key, long defaultValue, long minValue, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), dynamic, scope);
|
||||
}
|
||||
|
||||
public static int parseInt(String s, int minValue, String key) {
|
||||
int value = Integer.parseInt(s);
|
||||
if (value < minValue) {
|
||||
|
@ -293,6 +317,14 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return value;
|
||||
}
|
||||
|
||||
public static long parseLong(String s, long minValue, String key) {
|
||||
long value = Long.parseLong(s);
|
||||
if (value < minValue) {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
public static Setting<Integer> intSetting(String key, int defaultValue, boolean dynamic, Scope scope) {
|
||||
return intSetting(key, defaultValue, Integer.MIN_VALUE, dynamic, scope);
|
||||
}
|
||||
|
@ -343,6 +375,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return array == null ? defaultValue.apply(settings) : arrayToParsableString(array);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String toTest) {
|
||||
return pattern.matcher(toTest).matches();
|
||||
}
|
||||
|
@ -420,6 +453,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
|
||||
@Override
|
||||
public void apply(Settings value, Settings current, Settings previous) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current));
|
||||
consumer.accept(value);
|
||||
}
|
||||
|
||||
|
@ -460,4 +494,16 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
}, dynamic, scope);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Setting<?> setting = (Setting<?>) o;
|
||||
return Objects.equals(key, setting.key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(key);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,6 +58,7 @@ import java.util.Set;
|
|||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
|
@ -76,19 +77,6 @@ public final class Settings implements ToXContent {
|
|||
public static final Settings EMPTY = new Builder().build();
|
||||
private static final Pattern ARRAY_PATTERN = Pattern.compile("(.*)\\.\\d+$");
|
||||
|
||||
/** Name of the setting to use to disable required units for byte size, time settings. */
|
||||
public static final String SETTINGS_REQUIRE_UNITS = "settings_require_units";
|
||||
|
||||
private static boolean settingsRequireUnits = true;
|
||||
|
||||
public static void setSettingsRequireUnits(boolean v) {
|
||||
settingsRequireUnits = v;
|
||||
}
|
||||
|
||||
public static boolean getSettingsRequireUnits() {
|
||||
return settingsRequireUnits;
|
||||
}
|
||||
|
||||
private final Map<String, String> forcedUnderscoreSettings;
|
||||
private SortedMap<String, String> settings;
|
||||
|
||||
|
@ -225,6 +213,19 @@ public final class Settings implements ToXContent {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new settings object that contains all setting of the current one filtered by the given settings key predicate.
|
||||
*/
|
||||
public Settings filter(Predicate<String> predicate) {
|
||||
Builder builder = new Builder();
|
||||
for (Map.Entry<String, String> entry : getAsMap().entrySet()) {
|
||||
if (predicate.test(entry.getKey())) {
|
||||
builder.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the settings mapped to the given setting name.
|
||||
*/
|
||||
|
|
|
@ -34,8 +34,8 @@ public class SettingsModule extends AbstractModule {
|
|||
|
||||
private final Settings settings;
|
||||
private final SettingsFilter settingsFilter;
|
||||
private final Map<String, Setting<?>> clusterDynamicSettings = new HashMap<>();
|
||||
|
||||
private final Map<String, Setting<?>> clusterSettings = new HashMap<>();
|
||||
private final Map<String, Setting<?>> indexSettings = new HashMap<>();
|
||||
|
||||
public SettingsModule(Settings settings, SettingsFilter settingsFilter) {
|
||||
this.settings = settings;
|
||||
|
@ -43,26 +43,48 @@ public class SettingsModule extends AbstractModule {
|
|||
for (Setting<?> setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) {
|
||||
registerSetting(setting);
|
||||
}
|
||||
for (Setting<?> setting : IndexScopedSettings.BUILT_IN_INDEX_SETTINGS) {
|
||||
registerSetting(setting);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, new HashSet<>(this.indexSettings.values()));
|
||||
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.clusterSettings.values()));
|
||||
// by now we are fully configured, lets check node level settings for unregistered index settings
|
||||
indexScopedSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE));
|
||||
// we can't call this method yet since we have not all node level settings registered.
|
||||
// yet we can validate the ones we have registered to not have invalid values. this is better than nothing
|
||||
// and progress over perfection and we fail as soon as possible.
|
||||
// clusterSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE.negate()));
|
||||
for (Map.Entry<String, String> entry : settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE.negate()).getAsMap().entrySet()) {
|
||||
if (clusterSettings.get(entry.getKey()) != null) {
|
||||
clusterSettings.validate(entry.getKey(), settings);
|
||||
}
|
||||
}
|
||||
|
||||
bind(Settings.class).toInstance(settings);
|
||||
bind(SettingsFilter.class).toInstance(settingsFilter);
|
||||
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(clusterDynamicSettings.values()));
|
||||
|
||||
bind(ClusterSettings.class).toInstance(clusterSettings);
|
||||
bind(IndexScopedSettings.class).toInstance(indexScopedSettings);
|
||||
}
|
||||
|
||||
public void registerSetting(Setting<?> setting) {
|
||||
switch (setting.getScope()) {
|
||||
case CLUSTER:
|
||||
if (clusterDynamicSettings.containsKey(setting.getKey())) {
|
||||
if (clusterSettings.containsKey(setting.getKey())) {
|
||||
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
|
||||
}
|
||||
clusterDynamicSettings.put(setting.getKey(), setting);
|
||||
clusterSettings.put(setting.getKey(), setting);
|
||||
break;
|
||||
case INDEX:
|
||||
throw new UnsupportedOperationException("not yet implemented");
|
||||
if (indexSettings.containsKey(setting.getKey())) {
|
||||
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
|
||||
}
|
||||
indexSettings.put(setting.getKey(), setting);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,12 +20,10 @@
|
|||
package org.elasticsearch.common.unit;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
@ -176,7 +174,6 @@ public class ByteSizeValue implements Streamable {
|
|||
|
||||
public static ByteSizeValue parseBytesSizeValue(String sValue, ByteSizeValue defaultValue, String settingName) throws ElasticsearchParseException {
|
||||
settingName = Objects.requireNonNull(settingName);
|
||||
assert settingName.startsWith("index.") == false || MetaDataIndexUpgradeService.INDEX_BYTES_SIZE_SETTINGS.contains(settingName);
|
||||
if (sValue == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
|
@ -213,12 +210,7 @@ public class ByteSizeValue implements Streamable {
|
|||
bytes = 0;
|
||||
} else {
|
||||
// Missing units:
|
||||
if (Settings.getSettingsRequireUnits()) {
|
||||
throw new ElasticsearchParseException("failed to parse setting [{}] with value [{}] as a size in bytes: unit is missing or unrecognized", settingName, sValue);
|
||||
} else {
|
||||
// Leniency default to bytes:
|
||||
bytes = Long.parseLong(sValue);
|
||||
}
|
||||
throw new ElasticsearchParseException("failed to parse setting [{}] with value [{}] as a size in bytes: unit is missing or unrecognized", settingName, sValue);
|
||||
}
|
||||
} catch (NumberFormatException e) {
|
||||
throw new ElasticsearchParseException("failed to parse [{}]", e, sValue);
|
||||
|
|
|
@ -67,6 +67,8 @@ public final class Fuzziness implements ToXContent, Writeable<Fuzziness> {
|
|||
|
||||
/**
|
||||
* Creates a {@link Fuzziness} instance from an edit distance. The value must be one of <tt>[0, 1, 2]</tt>
|
||||
*
|
||||
* Note: Using this method only makes sense if the field you are applying Fuzziness to is some sort of string.
|
||||
*/
|
||||
public static Fuzziness fromEdits(int edits) {
|
||||
return new Fuzziness(edits);
|
||||
|
|
|
@ -20,12 +20,10 @@
|
|||
package org.elasticsearch.common.unit;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.joda.time.Period;
|
||||
import org.joda.time.PeriodType;
|
||||
import org.joda.time.format.PeriodFormat;
|
||||
|
@ -254,7 +252,6 @@ public class TimeValue implements Streamable {
|
|||
|
||||
public static TimeValue parseTimeValue(String sValue, TimeValue defaultValue, String settingName) {
|
||||
settingName = Objects.requireNonNull(settingName);
|
||||
assert settingName.startsWith("index.") == false || MetaDataIndexUpgradeService.INDEX_TIME_SETTINGS.contains(settingName) : settingName;
|
||||
if (sValue == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
|
@ -280,13 +277,8 @@ public class TimeValue implements Streamable {
|
|||
// Allow this special value to be unit-less:
|
||||
millis = 0;
|
||||
} else {
|
||||
if (Settings.getSettingsRequireUnits()) {
|
||||
// Missing units:
|
||||
throw new ElasticsearchParseException("Failed to parse setting [{}] with value [{}] as a time value: unit is missing or unrecognized", settingName, sValue);
|
||||
} else {
|
||||
// Leniency default to msec for bwc:
|
||||
millis = Long.parseLong(sValue);
|
||||
}
|
||||
// Missing units:
|
||||
throw new ElasticsearchParseException("Failed to parse setting [{}] with value [{}] as a time value: unit is missing or unrecognized", settingName, sValue);
|
||||
}
|
||||
return new TimeValue(millis, TimeUnit.MILLISECONDS);
|
||||
} catch (NumberFormatException e) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue