Merge pull request #16054 from s1monw/new_index_settings
Cut over all index scope settings to the new setting infrastrucuture This change moves all index.* settings over to the new infrastructure. This means in short that: - every setting that has an index scope must be registered up-front - index settings are validated on index creation, template creation, index settings update - node level settings starting with index.* are validated on node startup - settings that are private to ES like index.version.created can only be set by tests when they install a specific test plugin. - all index settings can be reset by passing null as their value on update - all index settings defaults can be listed via the settings APIs Closes #12854 Closes #6732 Closes #16032 Closes #12790
This commit is contained in:
commit
5a64f08f04
|
@ -22,14 +22,9 @@ package org.elasticsearch.action.admin.cluster.settings;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.cluster.ClusterState.builder;
|
||||
|
||||
/**
|
||||
|
@ -57,11 +52,11 @@ final class SettingsUpdater {
|
|||
boolean changed = false;
|
||||
Settings.Builder transientSettings = Settings.settingsBuilder();
|
||||
transientSettings.put(currentState.metaData().transientSettings());
|
||||
changed |= apply(transientToApply, transientSettings, transientUpdates, "transient");
|
||||
changed |= clusterSettings.updateDynamicSettings(transientToApply, transientSettings, transientUpdates, "transient");
|
||||
|
||||
Settings.Builder persistentSettings = Settings.settingsBuilder();
|
||||
persistentSettings.put(currentState.metaData().persistentSettings());
|
||||
changed |= apply(persistentToApply, persistentSettings, persistentUpdates, "persistent");
|
||||
changed |= clusterSettings.updateDynamicSettings(persistentToApply, persistentSettings, persistentUpdates, "persistent");
|
||||
|
||||
if (!changed) {
|
||||
return currentState;
|
||||
|
@ -86,42 +81,5 @@ final class SettingsUpdater {
|
|||
return build;
|
||||
}
|
||||
|
||||
private boolean apply(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) {
|
||||
boolean changed = false;
|
||||
final Set<String> toRemove = new HashSet<>();
|
||||
Settings.Builder settingsBuilder = Settings.settingsBuilder();
|
||||
for (Map.Entry<String, String> entry : toApply.getAsMap().entrySet()) {
|
||||
if (entry.getValue() == null) {
|
||||
toRemove.add(entry.getKey());
|
||||
} else if (clusterSettings.isLoggerSetting(entry.getKey()) || clusterSettings.hasDynamicSetting(entry.getKey())) {
|
||||
settingsBuilder.put(entry.getKey(), entry.getValue());
|
||||
updates.put(entry.getKey(), entry.getValue());
|
||||
changed = true;
|
||||
} else {
|
||||
throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable");
|
||||
}
|
||||
|
||||
}
|
||||
changed |= applyDeletes(toRemove, target);
|
||||
target.put(settingsBuilder.build());
|
||||
return changed;
|
||||
}
|
||||
|
||||
private final boolean applyDeletes(Set<String> deletes, Settings.Builder builder) {
|
||||
boolean changed = false;
|
||||
for (String entry : deletes) {
|
||||
Set<String> keysToRemove = new HashSet<>();
|
||||
Set<String> keySet = builder.internalMap().keySet();
|
||||
for (String key : keySet) {
|
||||
if (Regex.simpleMatch(entry, key)) {
|
||||
keysToRemove.add(key);
|
||||
}
|
||||
}
|
||||
for (String key : keysToRemove) {
|
||||
builder.remove(key);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
|
|||
if (globalBlock != null) {
|
||||
return globalBlock;
|
||||
}
|
||||
if (request.settings().getAsMap().size() == 1 && (request.settings().get(IndexMetaData.SETTING_BLOCKS_METADATA) != null || request.settings().get(IndexMetaData.SETTING_READ_ONLY) != null )) {
|
||||
if (request.settings().getAsMap().size() == 1 && IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.exists(request.settings()) || IndexMetaData.INDEX_READ_ONLY_SETTING.exists(request.settings())) {
|
||||
return null;
|
||||
}
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
|
|
|
@ -25,9 +25,11 @@ import org.elasticsearch.cluster.ClusterService;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -38,13 +40,15 @@ import org.elasticsearch.transport.TransportService;
|
|||
public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<PutIndexTemplateRequest, PutIndexTemplateResponse> {
|
||||
|
||||
private final MetaDataIndexTemplateService indexTemplateService;
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
|
||||
@Inject
|
||||
public TransportPutIndexTemplateAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, MetaDataIndexTemplateService indexTemplateService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndexScopedSettings indexScopedSettings) {
|
||||
super(settings, PutIndexTemplateAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutIndexTemplateRequest::new);
|
||||
this.indexTemplateService = indexTemplateService;
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -69,11 +73,13 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
|
|||
if (cause.length() == 0) {
|
||||
cause = "api";
|
||||
}
|
||||
|
||||
final Settings.Builder templateSettingsBuilder = Settings.settingsBuilder();
|
||||
templateSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
indexScopedSettings.validate(templateSettingsBuilder);
|
||||
indexTemplateService.putTemplate(new MetaDataIndexTemplateService.PutRequest(cause, request.name())
|
||||
.template(request.template())
|
||||
.order(request.order())
|
||||
.settings(request.settings())
|
||||
.settings(templateSettingsBuilder.build())
|
||||
.mappings(request.mappings())
|
||||
.aliases(request.aliases())
|
||||
.customs(request.customs())
|
||||
|
|
|
@ -44,7 +44,7 @@ public final class AutoCreateIndex {
|
|||
@Inject
|
||||
public AutoCreateIndex(Settings settings, IndexNameExpressionResolver resolver) {
|
||||
this.resolver = resolver;
|
||||
dynamicMappingDisabled = !settings.getAsBoolean(MapperService.INDEX_MAPPER_DYNAMIC_SETTING, MapperService.INDEX_MAPPER_DYNAMIC_DEFAULT);
|
||||
dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings);
|
||||
String value = settings.get("action.auto_create_index");
|
||||
if (value == null || Booleans.isExplicitTrue(value)) {
|
||||
needToCheck = true;
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
|||
import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction;
|
||||
import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.IndexTemplateFilter;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
|
||||
|
@ -36,7 +35,6 @@ import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodeService;
|
||||
import org.elasticsearch.cluster.routing.OperationRouting;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
|
@ -56,27 +54,12 @@ import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocatio
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||
import org.elasticsearch.cluster.service.InternalClusterService;
|
||||
import org.elasticsearch.cluster.settings.DynamicSettings;
|
||||
import org.elasticsearch.cluster.settings.Validator;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.IndexingSlowLog;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.search.stats.SearchSlowLog;
|
||||
import org.elasticsearch.index.settings.IndexDynamicSettings;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.index.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||
import org.elasticsearch.search.internal.DefaultSearchContext;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
@ -107,7 +90,6 @@ public class ClusterModule extends AbstractModule {
|
|||
SnapshotInProgressAllocationDecider.class));
|
||||
|
||||
private final Settings settings;
|
||||
private final DynamicSettings.Builder indexDynamicSettings = new DynamicSettings.Builder();
|
||||
private final ExtensionPoint.SelectedType<ShardsAllocator> shardsAllocators = new ExtensionPoint.SelectedType<>("shards_allocator", ShardsAllocator.class);
|
||||
private final ExtensionPoint.ClassSet<AllocationDecider> allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class);
|
||||
private final ExtensionPoint.ClassSet<IndexTemplateFilter> indexTemplateFilters = new ExtensionPoint.ClassSet<>("index_template_filter", IndexTemplateFilter.class);
|
||||
|
@ -117,9 +99,6 @@ public class ClusterModule extends AbstractModule {
|
|||
|
||||
public ClusterModule(Settings settings) {
|
||||
this.settings = settings;
|
||||
|
||||
registerBuiltinIndexSettings();
|
||||
|
||||
for (Class<? extends AllocationDecider> decider : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) {
|
||||
registerAllocationDecider(decider);
|
||||
}
|
||||
|
@ -127,69 +106,6 @@ public class ClusterModule extends AbstractModule {
|
|||
registerShardsAllocator(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, BalancedShardsAllocator.class);
|
||||
}
|
||||
|
||||
private void registerBuiltinIndexSettings() {
|
||||
registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
|
||||
registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_TYPE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(MergeSchedulerConfig.MAX_THREAD_COUNT, Validator.NON_NEGATIVE_INTEGER);
|
||||
registerIndexDynamicSetting(MergeSchedulerConfig.MAX_MERGE_COUNT, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(MergeSchedulerConfig.AUTO_THROTTLE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_REQUIRE_GROUP + "*", Validator.EMPTY);
|
||||
registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "*", Validator.EMPTY);
|
||||
registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_EXCLUDE_GROUP + "*", Validator.EMPTY);
|
||||
registerIndexDynamicSetting(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, Validator.NON_NEGATIVE_INTEGER);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_READ_ONLY, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_BLOCKS_READ, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_BLOCKS_WRITE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_BLOCKS_METADATA, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_PRIORITY, Validator.NON_NEGATIVE_INTEGER);
|
||||
registerIndexDynamicSetting(IndicesTTLService.INDEX_TTL_DISABLE_PURGE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexSettings.INDEX_REFRESH_INTERVAL, Validator.TIME);
|
||||
registerIndexDynamicSetting(PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexSettings.INDEX_GC_DELETES_SETTING, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE, Validator.TIME);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_REFORMAT, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, Validator.INTEGER);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED, Validator.DOUBLE);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT, Validator.BYTES_SIZE);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, Validator.INTEGER_GTE_2);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT, Validator.INTEGER_GTE_2);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT, Validator.BYTES_SIZE);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, Validator.DOUBLE_GTE_2);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT, Validator.NON_NEGATIVE_DOUBLE);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_COMPOUND_FORMAT, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, Validator.BYTES_SIZE);
|
||||
registerIndexDynamicSetting(IndexSettings.INDEX_TRANSLOG_DURABILITY, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndicesWarmer.INDEX_WARMER_ENABLED, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, Validator.BOOLEAN);
|
||||
registerIndexDynamicSetting(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, Validator.TIME);
|
||||
registerIndexDynamicSetting(DefaultSearchContext.MAX_RESULT_WINDOW, Validator.POSITIVE_INTEGER);
|
||||
registerIndexDynamicSetting(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING, Validator.NON_NEGATIVE_INTEGER);
|
||||
}
|
||||
|
||||
public void registerIndexDynamicSetting(String setting, Validator validator) {
|
||||
indexDynamicSettings.addSetting(setting, validator);
|
||||
}
|
||||
|
||||
|
||||
public void registerAllocationDecider(Class<? extends AllocationDecider> allocationDecider) {
|
||||
allocationDeciders.registerExtension(allocationDecider);
|
||||
}
|
||||
|
@ -204,8 +120,6 @@ public class ClusterModule extends AbstractModule {
|
|||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(DynamicSettings.class).annotatedWith(IndexDynamicSettings.class).toInstance(indexDynamicSettings.build());
|
||||
|
||||
// bind ShardsAllocator
|
||||
String shardsAllocatorType = shardsAllocators.bindType(binder(), settings, ClusterModule.SHARDS_ALLOCATOR_TYPE_KEY, ClusterModule.BALANCED_ALLOCATOR);
|
||||
if (shardsAllocatorType.equals(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR)) {
|
||||
|
|
|
@ -306,16 +306,16 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
addIndexBlock(indexMetaData.getIndex(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
|
||||
}
|
||||
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_READ_ONLY, false)) {
|
||||
if (IndexMetaData.INDEX_READ_ONLY_SETTING.get(indexMetaData.getSettings())) {
|
||||
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_READ_ONLY_BLOCK);
|
||||
}
|
||||
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_READ, false)) {
|
||||
if (IndexMetaData.INDEX_BLOCKS_READ_SETTING.get(indexMetaData.getSettings())) {
|
||||
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_READ_BLOCK);
|
||||
}
|
||||
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_WRITE, false)) {
|
||||
if (IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.get(indexMetaData.getSettings())) {
|
||||
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
}
|
||||
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_METADATA, false)) {
|
||||
if (IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.get(indexMetaData.getSettings())) {
|
||||
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
}
|
||||
return this;
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
||||
/**
|
||||
* This class acts as a functional wrapper around the <tt>index.auto_expand_replicas</tt> setting.
|
||||
* This setting or rather it's value is expanded into a min and max value which requires special handling
|
||||
* based on the number of datanodes in the cluster. This class handles all the parsing and streamlines the access to these values.
|
||||
*/
|
||||
final class AutoExpandReplicas {
|
||||
// the value we recognize in the "max" position to mean all the nodes
|
||||
private static final String ALL_NODES_VALUE = "all";
|
||||
public static final Setting<AutoExpandReplicas> SETTING = new Setting<>(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "false", (value) -> {
|
||||
final int min;
|
||||
final int max;
|
||||
if (Booleans.parseBoolean(value, true) == false) {
|
||||
return new AutoExpandReplicas(0, 0, false);
|
||||
}
|
||||
final int dash = value.indexOf('-');
|
||||
if (-1 == dash) {
|
||||
throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash);
|
||||
}
|
||||
final String sMin = value.substring(0, dash);
|
||||
try {
|
||||
min = Integer.parseInt(sMin);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash, e);
|
||||
}
|
||||
String sMax = value.substring(dash + 1);
|
||||
if (sMax.equals(ALL_NODES_VALUE)) {
|
||||
max = Integer.MAX_VALUE;
|
||||
} else {
|
||||
try {
|
||||
max = Integer.parseInt(sMax);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash, e);
|
||||
}
|
||||
}
|
||||
return new AutoExpandReplicas(min, max, true);
|
||||
}, true, Setting.Scope.INDEX);
|
||||
|
||||
private final int minReplicas;
|
||||
private final int maxReplicas;
|
||||
private final boolean enabled;
|
||||
|
||||
private AutoExpandReplicas(int minReplicas, int maxReplicas, boolean enabled) {
|
||||
if (minReplicas > maxReplicas) {
|
||||
throw new IllegalArgumentException("[" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] minReplicas must be =< maxReplicas but wasn't " + minReplicas + " > " + maxReplicas);
|
||||
}
|
||||
this.minReplicas = minReplicas;
|
||||
this.maxReplicas = maxReplicas;
|
||||
this.enabled = enabled;
|
||||
}
|
||||
|
||||
int getMinReplicas() {
|
||||
return minReplicas;
|
||||
}
|
||||
|
||||
int getMaxReplicas(int numDataNodes) {
|
||||
return Math.min(maxReplicas, numDataNodes-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return enabled ? minReplicas + "-" + maxReplicas : "false";
|
||||
}
|
||||
|
||||
boolean isEnabled() {
|
||||
return enabled;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -29,14 +29,17 @@ import org.elasticsearch.cluster.DiffableUtils;
|
|||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.loader.SettingsLoader;
|
||||
import org.elasticsearch.common.xcontent.FromXContentBuilder;
|
||||
|
@ -58,6 +61,7 @@ import java.util.HashSet;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
|
||||
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
|
||||
|
@ -70,10 +74,6 @@ import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
|||
*/
|
||||
public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuilder<IndexMetaData>, ToXContent {
|
||||
|
||||
public static final IndexMetaData PROTO = IndexMetaData.builder("")
|
||||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
.numberOfShards(1).numberOfReplicas(0).build();
|
||||
|
||||
public interface Custom extends Diffable<Custom>, ToXContent {
|
||||
|
||||
String type();
|
||||
|
@ -152,14 +152,29 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
}
|
||||
public static final String INDEX_SETTING_PREFIX = "index.";
|
||||
public static final String SETTING_NUMBER_OF_SHARDS = "index.number_of_shards";
|
||||
public static final Setting<Integer> INDEX_NUMBER_OF_SHARDS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, false, Setting.Scope.INDEX);
|
||||
public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas";
|
||||
public static final Setting<Integer> INDEX_NUMBER_OF_REPLICAS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, true, Setting.Scope.INDEX);
|
||||
public static final String SETTING_SHADOW_REPLICAS = "index.shadow_replicas";
|
||||
public static final Setting<Boolean> INDEX_SHADOW_REPLICAS_SETTING = Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, false, Setting.Scope.INDEX);
|
||||
|
||||
public static final String SETTING_SHARED_FILESYSTEM = "index.shared_filesystem";
|
||||
public static final Setting<Boolean> INDEX_SHARED_FILESYSTEM_SETTING = Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, false, Setting.Scope.INDEX);
|
||||
|
||||
public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas";
|
||||
public static final Setting<AutoExpandReplicas> INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING;
|
||||
public static final String SETTING_READ_ONLY = "index.blocks.read_only";
|
||||
public static final Setting<Boolean> INDEX_READ_ONLY_SETTING = Setting.boolSetting(SETTING_READ_ONLY, false, true, Setting.Scope.INDEX);
|
||||
|
||||
public static final String SETTING_BLOCKS_READ = "index.blocks.read";
|
||||
public static final Setting<Boolean> INDEX_BLOCKS_READ_SETTING = Setting.boolSetting(SETTING_BLOCKS_READ, false, true, Setting.Scope.INDEX);
|
||||
|
||||
public static final String SETTING_BLOCKS_WRITE = "index.blocks.write";
|
||||
public static final Setting<Boolean> INDEX_BLOCKS_WRITE_SETTING = Setting.boolSetting(SETTING_BLOCKS_WRITE, false, true, Setting.Scope.INDEX);
|
||||
|
||||
public static final String SETTING_BLOCKS_METADATA = "index.blocks.metadata";
|
||||
public static final Setting<Boolean> INDEX_BLOCKS_METADATA_SETTING = Setting.boolSetting(SETTING_BLOCKS_METADATA, false, true, Setting.Scope.INDEX);
|
||||
|
||||
public static final String SETTING_VERSION_CREATED = "index.version.created";
|
||||
public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string";
|
||||
public static final String SETTING_VERSION_UPGRADED = "index.version.upgraded";
|
||||
|
@ -167,12 +182,23 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
public static final String SETTING_VERSION_MINIMUM_COMPATIBLE = "index.version.minimum_compatible";
|
||||
public static final String SETTING_CREATION_DATE = "index.creation_date";
|
||||
public static final String SETTING_PRIORITY = "index.priority";
|
||||
public static final Setting<Integer> INDEX_PRIORITY_SETTING = Setting.intSetting("index.priority", 1, 0, true, Setting.Scope.INDEX);
|
||||
public static final String SETTING_CREATION_DATE_STRING = "index.creation_date_string";
|
||||
public static final String SETTING_INDEX_UUID = "index.uuid";
|
||||
public static final String SETTING_DATA_PATH = "index.data_path";
|
||||
public static final Setting<String> INDEX_DATA_PATH_SETTING = new Setting<>(SETTING_DATA_PATH, "", Function.identity(), false, Setting.Scope.INDEX);
|
||||
public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
|
||||
public static final Setting<Boolean> INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING = Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, true, Setting.Scope.INDEX);
|
||||
public static final String INDEX_UUID_NA_VALUE = "_na_";
|
||||
|
||||
public static final Setting<Settings> INDEX_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.require.", true, Setting.Scope.INDEX);
|
||||
public static final Setting<Settings> INDEX_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.include.", true, Setting.Scope.INDEX);
|
||||
public static final Setting<Settings> INDEX_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.exclude.", true, Setting.Scope.INDEX);
|
||||
|
||||
public static final IndexMetaData PROTO = IndexMetaData.builder("")
|
||||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
.numberOfShards(1).numberOfReplicas(0).build();
|
||||
|
||||
public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations";
|
||||
|
||||
private final int numberOfShards;
|
||||
|
@ -627,10 +653,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return this;
|
||||
}
|
||||
|
||||
public long creationDate() {
|
||||
return settings.getAsLong(SETTING_CREATION_DATE, -1l);
|
||||
}
|
||||
|
||||
public Builder settings(Settings.Builder settings) {
|
||||
this.settings = settings.build();
|
||||
return this;
|
||||
|
@ -645,11 +667,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return mappings.get(type);
|
||||
}
|
||||
|
||||
public Builder removeMapping(String mappingType) {
|
||||
mappings.remove(mappingType);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder putMapping(String type, String source) throws IOException {
|
||||
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
|
||||
putMapping(new MappingMetaData(type, parser.mapOrdered()));
|
||||
|
@ -692,24 +709,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder removeCustom(String type) {
|
||||
this.customs.remove(type);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Custom getCustom(String type) {
|
||||
return this.customs.get(type);
|
||||
}
|
||||
|
||||
public Builder putActiveAllocationIds(int shardId, Set<String> allocationIds) {
|
||||
activeAllocationIds.put(shardId, new HashSet(allocationIds));
|
||||
return this;
|
||||
}
|
||||
|
||||
public Set<String> getActiveAllocationIds(int shardId) {
|
||||
return activeAllocationIds.get(shardId);
|
||||
}
|
||||
|
||||
public long version() {
|
||||
return this.version;
|
||||
}
|
||||
|
@ -758,22 +762,21 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
filledActiveAllocationIds.put(i, Collections.emptySet());
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap();
|
||||
final Map<String, String> requireMap = INDEX_ROUTING_REQUIRE_GROUP_SETTING.get(settings).getAsMap();
|
||||
final DiscoveryNodeFilters requireFilters;
|
||||
if (requireMap.isEmpty()) {
|
||||
requireFilters = null;
|
||||
} else {
|
||||
requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
|
||||
}
|
||||
Map<String, String> includeMap = settings.getByPrefix("index.routing.allocation.include.").getAsMap();
|
||||
Map<String, String> includeMap = INDEX_ROUTING_INCLUDE_GROUP_SETTING.get(settings).getAsMap();
|
||||
final DiscoveryNodeFilters includeFilters;
|
||||
if (includeMap.isEmpty()) {
|
||||
includeFilters = null;
|
||||
} else {
|
||||
includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
|
||||
}
|
||||
Map<String, String> excludeMap = settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
|
||||
Map<String, String> excludeMap = INDEX_ROUTING_EXCLUDE_GROUP_SETTING.get(settings).getAsMap();
|
||||
final DiscoveryNodeFilters excludeFilters;
|
||||
if (excludeMap.isEmpty()) {
|
||||
excludeFilters = null;
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.elasticsearch.common.compress.CompressedXContent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
|
@ -103,13 +104,14 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
private final IndexTemplateFilter indexTemplateFilter;
|
||||
private final Environment env;
|
||||
private final NodeServicesProvider nodeServicesProvider;
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
|
||||
|
||||
@Inject
|
||||
public MetaDataCreateIndexService(Settings settings, ClusterService clusterService,
|
||||
IndicesService indicesService, AllocationService allocationService,
|
||||
Version version, AliasValidator aliasValidator,
|
||||
Set<IndexTemplateFilter> indexTemplateFilters, Environment env, NodeServicesProvider nodeServicesProvider) {
|
||||
Set<IndexTemplateFilter> indexTemplateFilters, Environment env, NodeServicesProvider nodeServicesProvider, IndexScopedSettings indexScopedSettings) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.indicesService = indicesService;
|
||||
|
@ -118,6 +120,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
this.aliasValidator = aliasValidator;
|
||||
this.env = env;
|
||||
this.nodeServicesProvider = nodeServicesProvider;
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
|
||||
if (indexTemplateFilters.isEmpty()) {
|
||||
this.indexTemplateFilter = DEFAULT_INDEX_TEMPLATE_FILTER;
|
||||
|
@ -174,6 +177,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
public void createIndex(final CreateIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
Settings.Builder updatedSettingsBuilder = Settings.settingsBuilder();
|
||||
updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
indexScopedSettings.validate(updatedSettingsBuilder);
|
||||
request.settings(updatedSettingsBuilder.build());
|
||||
|
||||
clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]",
|
||||
|
@ -460,16 +464,17 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
}
|
||||
|
||||
List<String> getIndexSettingsValidationErrors(Settings settings) {
|
||||
String customPath = settings.get(IndexMetaData.SETTING_DATA_PATH, null);
|
||||
String customPath = IndexMetaData.INDEX_DATA_PATH_SETTING.get(settings);
|
||||
List<String> validationErrors = new ArrayList<>();
|
||||
if (customPath != null && env.sharedDataFile() == null) {
|
||||
if (Strings.isEmpty(customPath) == false && env.sharedDataFile() == null) {
|
||||
validationErrors.add("path.shared_data must be set in order to use custom data paths");
|
||||
} else if (customPath != null) {
|
||||
} else if (Strings.isEmpty(customPath) == false) {
|
||||
Path resolvedPath = PathUtils.get(new Path[]{env.sharedDataFile()}, customPath);
|
||||
if (resolvedPath == null) {
|
||||
validationErrors.add("custom path [" + customPath + "] is not a sub-path of path.shared_data [" + env.sharedDataFile() + "]");
|
||||
}
|
||||
}
|
||||
//norelease - this can be removed?
|
||||
Integer number_of_primaries = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
|
||||
Integer number_of_replicas = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null);
|
||||
if (number_of_primaries != null && number_of_primaries <= 0) {
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.cluster.metadata;
|
|||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -70,7 +69,6 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
}
|
||||
checkSupportedVersion(indexMetaData);
|
||||
IndexMetaData newMetaData = indexMetaData;
|
||||
newMetaData = addDefaultUnitsIfNeeded(newMetaData);
|
||||
checkMappingsCompatibility(newMetaData);
|
||||
newMetaData = markAsUpgraded(newMetaData);
|
||||
return newMetaData;
|
||||
|
@ -113,103 +111,6 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
return false;
|
||||
}
|
||||
|
||||
/** All known byte-sized settings for an index. */
|
||||
public static final Set<String> INDEX_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet(
|
||||
"index.merge.policy.floor_segment",
|
||||
"index.merge.policy.max_merged_segment",
|
||||
"index.merge.policy.max_merge_size",
|
||||
"index.merge.policy.min_merge_size",
|
||||
"index.shard.recovery.file_chunk_size",
|
||||
"index.shard.recovery.translog_size",
|
||||
"index.store.throttle.max_bytes_per_sec",
|
||||
"index.translog.flush_threshold_size",
|
||||
"index.translog.fs.buffer_size",
|
||||
"index.version_map_size"));
|
||||
|
||||
/** All known time settings for an index. */
|
||||
public static final Set<String> INDEX_TIME_SETTINGS = unmodifiableSet(newHashSet(
|
||||
"index.gateway.wait_for_mapping_update_post_recovery",
|
||||
"index.shard.wait_for_mapping_update_post_recovery",
|
||||
"index.gc_deletes",
|
||||
"index.indexing.slowlog.threshold.index.debug",
|
||||
"index.indexing.slowlog.threshold.index.info",
|
||||
"index.indexing.slowlog.threshold.index.trace",
|
||||
"index.indexing.slowlog.threshold.index.warn",
|
||||
"index.refresh_interval",
|
||||
"index.search.slowlog.threshold.fetch.debug",
|
||||
"index.search.slowlog.threshold.fetch.info",
|
||||
"index.search.slowlog.threshold.fetch.trace",
|
||||
"index.search.slowlog.threshold.fetch.warn",
|
||||
"index.search.slowlog.threshold.query.debug",
|
||||
"index.search.slowlog.threshold.query.info",
|
||||
"index.search.slowlog.threshold.query.trace",
|
||||
"index.search.slowlog.threshold.query.warn",
|
||||
"index.shadow.wait_for_initial_commit",
|
||||
"index.store.stats_refresh_interval",
|
||||
"index.translog.flush_threshold_period",
|
||||
"index.translog.interval",
|
||||
"index.translog.sync_interval",
|
||||
"index.shard.inactive_time",
|
||||
UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING));
|
||||
|
||||
/**
|
||||
* Elasticsearch 2.0 requires units on byte/memory and time settings; this method adds the default unit to any such settings that are
|
||||
* missing units.
|
||||
*/
|
||||
private IndexMetaData addDefaultUnitsIfNeeded(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getCreationVersion().before(Version.V_2_0_0_beta1)) {
|
||||
// TODO: can we somehow only do this *once* for a pre-2.0 index? Maybe we could stuff a "fake marker setting" here? Seems hackish...
|
||||
// Created lazily if we find any settings that are missing units:
|
||||
Settings settings = indexMetaData.getSettings();
|
||||
Settings.Builder newSettings = null;
|
||||
for(String byteSizeSetting : INDEX_BYTES_SIZE_SETTINGS) {
|
||||
String value = settings.get(byteSizeSetting);
|
||||
if (value != null) {
|
||||
try {
|
||||
Long.parseLong(value);
|
||||
} catch (NumberFormatException nfe) {
|
||||
continue;
|
||||
}
|
||||
// It's a naked number that previously would be interpreted as default unit (bytes); now we add it:
|
||||
logger.warn("byte-sized index setting [{}] with value [{}] is missing units; assuming default units (b) but in future versions this will be a hard error", byteSizeSetting, value);
|
||||
if (newSettings == null) {
|
||||
newSettings = Settings.builder();
|
||||
newSettings.put(settings);
|
||||
}
|
||||
newSettings.put(byteSizeSetting, value + "b");
|
||||
}
|
||||
}
|
||||
for(String timeSetting : INDEX_TIME_SETTINGS) {
|
||||
String value = settings.get(timeSetting);
|
||||
if (value != null) {
|
||||
try {
|
||||
Long.parseLong(value);
|
||||
} catch (NumberFormatException nfe) {
|
||||
continue;
|
||||
}
|
||||
// It's a naked number that previously would be interpreted as default unit (ms); now we add it:
|
||||
logger.warn("time index setting [{}] with value [{}] is missing units; assuming default units (ms) but in future versions this will be a hard error", timeSetting, value);
|
||||
if (newSettings == null) {
|
||||
newSettings = Settings.builder();
|
||||
newSettings.put(settings);
|
||||
}
|
||||
newSettings.put(timeSetting, value + "ms");
|
||||
}
|
||||
}
|
||||
if (newSettings != null) {
|
||||
// At least one setting was changed:
|
||||
return IndexMetaData.builder(indexMetaData)
|
||||
.version(indexMetaData.getVersion())
|
||||
.settings(newSettings.build())
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
// No changes:
|
||||
return indexMetaData;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Checks the mappings for compatibility with the current version
|
||||
*/
|
||||
|
@ -217,7 +118,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
try {
|
||||
// We cannot instantiate real analysis server at this point because the node might not have
|
||||
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
|
||||
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings, Collections.emptyList());
|
||||
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings);
|
||||
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
|
||||
|
||||
try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) {
|
||||
|
|
|
@ -30,19 +30,20 @@ import org.elasticsearch.cluster.ClusterService;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.settings.DynamicSettings;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.settings.IndexDynamicSettings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
@ -59,25 +60,21 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
|||
*/
|
||||
public class MetaDataUpdateSettingsService extends AbstractComponent implements ClusterStateListener {
|
||||
|
||||
// the value we recognize in the "max" position to mean all the nodes
|
||||
private static final String ALL_NODES_VALUE = "all";
|
||||
|
||||
private final ClusterService clusterService;
|
||||
|
||||
private final AllocationService allocationService;
|
||||
|
||||
private final DynamicSettings dynamicSettings;
|
||||
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
|
||||
@Inject
|
||||
public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService, @IndexDynamicSettings DynamicSettings dynamicSettings, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService, IndexScopedSettings indexScopedSettings, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
this.clusterService.add(this);
|
||||
this.allocationService = allocationService;
|
||||
this.dynamicSettings = dynamicSettings;
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -90,49 +87,25 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
final int dataNodeCount = event.state().nodes().dataNodes().size();
|
||||
|
||||
Map<Integer, List<String>> nrReplicasChanged = new HashMap<>();
|
||||
|
||||
// we need to do this each time in case it was changed by update settings
|
||||
for (final IndexMetaData indexMetaData : event.state().metaData()) {
|
||||
String autoExpandReplicas = indexMetaData.getSettings().get(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS);
|
||||
if (autoExpandReplicas != null && Booleans.parseBoolean(autoExpandReplicas, true)) { // Booleans only work for false values, just as we want it here
|
||||
try {
|
||||
final int min;
|
||||
final int max;
|
||||
|
||||
final int dash = autoExpandReplicas.indexOf('-');
|
||||
if (-1 == dash) {
|
||||
logger.warn("failed to set [{}] for index [{}], it should be dash delimited [{}]",
|
||||
IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.getIndex(), autoExpandReplicas);
|
||||
continue;
|
||||
}
|
||||
final String sMin = autoExpandReplicas.substring(0, dash);
|
||||
try {
|
||||
min = Integer.parseInt(sMin);
|
||||
} catch (NumberFormatException e) {
|
||||
logger.warn("failed to set [{}] for index [{}], minimum value is not a number [{}]",
|
||||
e, IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.getIndex(), sMin);
|
||||
continue;
|
||||
}
|
||||
String sMax = autoExpandReplicas.substring(dash + 1);
|
||||
if (sMax.equals(ALL_NODES_VALUE)) {
|
||||
max = dataNodeCount - 1;
|
||||
} else {
|
||||
try {
|
||||
max = Integer.parseInt(sMax);
|
||||
} catch (NumberFormatException e) {
|
||||
logger.warn("failed to set [{}] for index [{}], maximum value is neither [{}] nor a number [{}]",
|
||||
e, IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.getIndex(), ALL_NODES_VALUE, sMax);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
AutoExpandReplicas autoExpandReplicas = IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING.get(indexMetaData.getSettings());
|
||||
if (autoExpandReplicas.isEnabled()) {
|
||||
/*
|
||||
* we have to expand the number of replicas for this index to at least min and at most max nodes here
|
||||
* so we are bumping it up if we have to or reduce it depending on min/max and the number of datanodes.
|
||||
* If we change the number of replicas we just let the shard allocator do it's thing once we updated it
|
||||
* since it goes through the index metadata to figure out if something needs to be done anyway. Do do that
|
||||
* we issue a cluster settings update command below and kicks off a reroute.
|
||||
*/
|
||||
final int min = autoExpandReplicas.getMinReplicas();
|
||||
final int max = autoExpandReplicas.getMaxReplicas(dataNodeCount);
|
||||
int numberOfReplicas = dataNodeCount - 1;
|
||||
if (numberOfReplicas < min) {
|
||||
numberOfReplicas = min;
|
||||
} else if (numberOfReplicas > max) {
|
||||
numberOfReplicas = max;
|
||||
}
|
||||
|
||||
// same value, nothing to do there
|
||||
if (numberOfReplicas == indexMetaData.getNumberOfReplicas()) {
|
||||
continue;
|
||||
|
@ -141,18 +114,16 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
if (numberOfReplicas >= min && numberOfReplicas <= max) {
|
||||
|
||||
if (!nrReplicasChanged.containsKey(numberOfReplicas)) {
|
||||
nrReplicasChanged.put(numberOfReplicas, new ArrayList<String>());
|
||||
nrReplicasChanged.put(numberOfReplicas, new ArrayList<>());
|
||||
}
|
||||
|
||||
nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.getIndex());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("[{}] failed to parse auto expand replicas", e, indexMetaData.getIndex());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (nrReplicasChanged.size() > 0) {
|
||||
// update settings and kick of a reroute (implicit) for them to take effect
|
||||
for (final Integer fNumberOfReplicas : nrReplicasChanged.keySet()) {
|
||||
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, fNumberOfReplicas).build();
|
||||
final List<String> indices = nrReplicasChanged.get(fNumberOfReplicas);
|
||||
|
@ -182,42 +153,30 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
}
|
||||
|
||||
public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
Settings.Builder updatedSettingsBuilder = Settings.settingsBuilder();
|
||||
updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
final Settings normalizedSettings = Settings.settingsBuilder().put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
|
||||
Settings.Builder settingsForClosedIndices = Settings.builder();
|
||||
Settings.Builder settingsForOpenIndices = Settings.builder();
|
||||
Settings.Builder skipppedSettings = Settings.builder();
|
||||
|
||||
indexScopedSettings.validate(normalizedSettings);
|
||||
// never allow to change the number of shards
|
||||
for (String key : updatedSettingsBuilder.internalMap().keySet()) {
|
||||
if (key.equals(IndexMetaData.SETTING_NUMBER_OF_SHARDS)) {
|
||||
for (Map.Entry<String, String> entry : normalizedSettings.getAsMap().entrySet()) {
|
||||
if (entry.getKey().equals(IndexMetaData.SETTING_NUMBER_OF_SHARDS)) {
|
||||
listener.onFailure(new IllegalArgumentException("can't change the number of shards for an index"));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
final Settings closeSettings = updatedSettingsBuilder.build();
|
||||
|
||||
final Set<String> removedSettings = new HashSet<>();
|
||||
final Set<String> errors = new HashSet<>();
|
||||
for (Map.Entry<String, String> setting : updatedSettingsBuilder.internalMap().entrySet()) {
|
||||
if (!dynamicSettings.hasDynamicSetting(setting.getKey())) {
|
||||
removedSettings.add(setting.getKey());
|
||||
Setting setting = indexScopedSettings.get(entry.getKey());
|
||||
assert setting != null; // we already validated the normalized settings
|
||||
settingsForClosedIndices.put(entry.getKey(), entry.getValue());
|
||||
if (setting.isDynamic()) {
|
||||
settingsForOpenIndices.put(entry.getKey(), entry.getValue());
|
||||
} else {
|
||||
String error = dynamicSettings.validateDynamicSetting(setting.getKey(), setting.getValue(), clusterService.state());
|
||||
if (error != null) {
|
||||
errors.add("[" + setting.getKey() + "] - " + error);
|
||||
skipppedSettings.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!errors.isEmpty()) {
|
||||
listener.onFailure(new IllegalArgumentException("can't process the settings: " + errors.toString()));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!removedSettings.isEmpty()) {
|
||||
for (String removedSetting : removedSettings) {
|
||||
updatedSettingsBuilder.remove(removedSetting);
|
||||
}
|
||||
}
|
||||
final Settings openSettings = updatedSettingsBuilder.build();
|
||||
final Settings skippedSettigns = skipppedSettings.build();
|
||||
final Settings closedSettings = settingsForClosedIndices.build();
|
||||
final Settings openSettings = settingsForOpenIndices.build();
|
||||
|
||||
clusterService.submitStateUpdateTask("update-settings",
|
||||
new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
|
@ -245,16 +204,16 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
}
|
||||
}
|
||||
|
||||
if (closeIndices.size() > 0 && closeSettings.get(IndexMetaData.SETTING_NUMBER_OF_REPLICAS) != null) {
|
||||
if (closeIndices.size() > 0 && closedSettings.get(IndexMetaData.SETTING_NUMBER_OF_REPLICAS) != null) {
|
||||
throw new IllegalArgumentException(String.format(Locale.ROOT,
|
||||
"Can't update [%s] on closed indices [%s] - can leave index in an unopenable state", IndexMetaData.SETTING_NUMBER_OF_REPLICAS,
|
||||
closeIndices
|
||||
));
|
||||
}
|
||||
if (!removedSettings.isEmpty() && !openIndices.isEmpty()) {
|
||||
if (!skippedSettigns.getAsMap().isEmpty() && !openIndices.isEmpty()) {
|
||||
throw new IllegalArgumentException(String.format(Locale.ROOT,
|
||||
"Can't update non dynamic settings[%s] for open indices [%s]",
|
||||
removedSettings,
|
||||
skippedSettigns.getAsMap().keySet(),
|
||||
openIndices
|
||||
));
|
||||
}
|
||||
|
@ -267,57 +226,37 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
}
|
||||
|
||||
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
Boolean updatedReadOnly = openSettings.getAsBoolean(IndexMetaData.SETTING_READ_ONLY, null);
|
||||
if (updatedReadOnly != null) {
|
||||
for (String index : actualIndices) {
|
||||
if (updatedReadOnly) {
|
||||
blocks.addIndexBlock(index, IndexMetaData.INDEX_READ_ONLY_BLOCK);
|
||||
} else {
|
||||
blocks.removeIndexBlock(index, IndexMetaData.INDEX_READ_ONLY_BLOCK);
|
||||
}
|
||||
}
|
||||
}
|
||||
Boolean updateMetaDataBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_METADATA, null);
|
||||
if (updateMetaDataBlock != null) {
|
||||
for (String index : actualIndices) {
|
||||
if (updateMetaDataBlock) {
|
||||
blocks.addIndexBlock(index, IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
} else {
|
||||
blocks.removeIndexBlock(index, IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Boolean updateWriteBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_WRITE, null);
|
||||
if (updateWriteBlock != null) {
|
||||
for (String index : actualIndices) {
|
||||
if (updateWriteBlock) {
|
||||
blocks.addIndexBlock(index, IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
} else {
|
||||
blocks.removeIndexBlock(index, IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Boolean updateReadBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_READ, null);
|
||||
if (updateReadBlock != null) {
|
||||
for (String index : actualIndices) {
|
||||
if (updateReadBlock) {
|
||||
blocks.addIndexBlock(index, IndexMetaData.INDEX_READ_BLOCK);
|
||||
} else {
|
||||
blocks.removeIndexBlock(index, IndexMetaData.INDEX_READ_BLOCK);
|
||||
}
|
||||
}
|
||||
}
|
||||
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_BLOCK, IndexMetaData.INDEX_READ_ONLY_SETTING, openSettings);
|
||||
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_METADATA_BLOCK, IndexMetaData.INDEX_BLOCKS_METADATA_SETTING, openSettings);
|
||||
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_WRITE_BLOCK, IndexMetaData.INDEX_BLOCKS_WRITE_SETTING, openSettings);
|
||||
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_SETTING, openSettings);
|
||||
|
||||
if (!openIndices.isEmpty()) {
|
||||
String[] indices = openIndices.toArray(new String[openIndices.size()]);
|
||||
metaDataBuilder.updateSettings(openSettings, indices);
|
||||
for (String index : openIndices) {
|
||||
IndexMetaData indexMetaData = metaDataBuilder.get(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
Settings.Builder updates = Settings.builder();
|
||||
Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings());
|
||||
if (indexScopedSettings.updateDynamicSettings(openSettings, indexSettings, updates, index)) {
|
||||
metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(indexSettings));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!closeIndices.isEmpty()) {
|
||||
String[] indices = closeIndices.toArray(new String[closeIndices.size()]);
|
||||
metaDataBuilder.updateSettings(closeSettings, indices);
|
||||
for (String index : closeIndices) {
|
||||
IndexMetaData indexMetaData = metaDataBuilder.get(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
Settings.Builder updates = Settings.builder();
|
||||
Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings());
|
||||
if (indexScopedSettings.updateSettings(closedSettings, indexSettings, updates, index)) {
|
||||
metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(indexSettings));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -326,12 +265,34 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
// now, reroute in case things change that require it (like number of replicas)
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(updatedState, "settings update");
|
||||
updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
|
||||
|
||||
for (String index : openIndices) {
|
||||
indexScopedSettings.dryRun(updatedState.metaData().index(index).getSettings());
|
||||
}
|
||||
for (String index : closeIndices) {
|
||||
indexScopedSettings.dryRun(updatedState.metaData().index(index).getSettings());
|
||||
}
|
||||
return updatedState;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the cluster block only iff the setting exists in the given settings
|
||||
*/
|
||||
private static void maybeUpdateClusterBlock(String[] actualIndices, ClusterBlocks.Builder blocks, ClusterBlock block, Setting<Boolean> setting, Settings openSettings) {
|
||||
if (setting.exists(openSettings)) {
|
||||
final boolean updateReadBlock = setting.get(openSettings);
|
||||
for (String index : actualIndices) {
|
||||
if (updateReadBlock) {
|
||||
blocks.addIndexBlock(index, block);
|
||||
} else {
|
||||
blocks.removeIndexBlock(index, block);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.joda.Joda;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -41,10 +42,10 @@ import java.io.IOException;
|
|||
public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
|
||||
|
||||
public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime");
|
||||
|
||||
public static final String INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = "index.unassigned.node_left.delayed_timeout";
|
||||
private static final TimeValue DEFAULT_DELAYED_NODE_LEFT_TIMEOUT = TimeValue.timeValueMinutes(1);
|
||||
|
||||
public static final Setting<TimeValue> INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, true, Setting.Scope.INDEX);
|
||||
|
||||
/**
|
||||
* Reason why the shard is in unassigned state.
|
||||
* <p>
|
||||
|
@ -215,7 +216,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
|
|||
if (reason != Reason.NODE_LEFT) {
|
||||
return 0;
|
||||
}
|
||||
TimeValue delayTimeout = indexSettings.getAsTime(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, settings.getAsTime(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, DEFAULT_DELAYED_NODE_LEFT_TIMEOUT));
|
||||
TimeValue delayTimeout = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings, settings);
|
||||
return Math.max(0l, delayTimeout.nanos());
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ import java.util.Locale;
|
|||
|
||||
/**
|
||||
* This allocation decider allows shard allocations / rebalancing via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} /
|
||||
* {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE} / {@link #INDEX_ROUTING_REBALANCE_ENABLE}.
|
||||
* {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link #INDEX_ROUTING_REBALANCE_ENABLE_SETTING}.
|
||||
* The per index settings overrides the cluster wide setting.
|
||||
*
|
||||
* <p>
|
||||
|
@ -61,10 +61,10 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
public static final String NAME = "enable";
|
||||
|
||||
public static final Setting<Allocation> CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.CLUSTER);
|
||||
public static final String INDEX_ROUTING_ALLOCATION_ENABLE= "index.routing.allocation.enable";
|
||||
public static final Setting<Allocation> INDEX_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("index.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.INDEX);
|
||||
|
||||
public static final Setting<Rebalance> CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.CLUSTER);
|
||||
public static final String INDEX_ROUTING_REBALANCE_ENABLE = "index.routing.rebalance.enable";
|
||||
public static final Setting<Rebalance> INDEX_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("index.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.INDEX);
|
||||
|
||||
private volatile Rebalance enableRebalance;
|
||||
private volatile Allocation enableAllocation;
|
||||
|
@ -92,11 +92,10 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored");
|
||||
}
|
||||
|
||||
IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
|
||||
String enableIndexValue = indexMetaData.getSettings().get(INDEX_ROUTING_ALLOCATION_ENABLE);
|
||||
final IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
|
||||
final Allocation enable;
|
||||
if (enableIndexValue != null) {
|
||||
enable = Allocation.parse(enableIndexValue);
|
||||
if (INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.exists(indexMetaData.getSettings())) {
|
||||
enable = INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.get(indexMetaData.getSettings());
|
||||
} else {
|
||||
enable = this.enableAllocation;
|
||||
}
|
||||
|
@ -129,10 +128,9 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
}
|
||||
|
||||
Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings();
|
||||
String enableIndexValue = indexSettings.get(INDEX_ROUTING_REBALANCE_ENABLE);
|
||||
final Rebalance enable;
|
||||
if (enableIndexValue != null) {
|
||||
enable = Rebalance.parse(enableIndexValue);
|
||||
if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) {
|
||||
enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings);
|
||||
} else {
|
||||
enable = this.enableRebalance;
|
||||
}
|
||||
|
@ -160,7 +158,7 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
|
||||
/**
|
||||
* Allocation values or rather their string representation to be used used with
|
||||
* {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE}
|
||||
* {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE_SETTING}
|
||||
* via cluster / index settings.
|
||||
*/
|
||||
public enum Allocation {
|
||||
|
@ -186,7 +184,7 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
|
||||
/**
|
||||
* Rebalance values or rather their string representation to be used used with
|
||||
* {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE}
|
||||
* {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE_SETTING}
|
||||
* via cluster / index settings.
|
||||
*/
|
||||
public enum Rebalance {
|
||||
|
|
|
@ -60,10 +60,6 @@ public class FilterAllocationDecider extends AllocationDecider {
|
|||
|
||||
public static final String NAME = "filter";
|
||||
|
||||
public static final String INDEX_ROUTING_REQUIRE_GROUP = "index.routing.allocation.require.";
|
||||
public static final String INDEX_ROUTING_INCLUDE_GROUP = "index.routing.allocation.include.";
|
||||
public static final String INDEX_ROUTING_EXCLUDE_GROUP = "index.routing.allocation.exclude.";
|
||||
|
||||
public static final Setting<Settings> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Settings> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.CLUSTER);
|
||||
|
|
|
@ -32,12 +32,12 @@ import org.elasticsearch.common.settings.Settings;
|
|||
/**
|
||||
* This {@link AllocationDecider} limits the number of shards per node on a per
|
||||
* index or node-wide basis. The allocator prevents a single node to hold more
|
||||
* than {@value #INDEX_TOTAL_SHARDS_PER_NODE} per index and
|
||||
* than <tt>index.routing.allocation.total_shards_per_node</tt> per index and
|
||||
* <tt>cluster.routing.allocation.total_shards_per_node</tt> globally during the allocation
|
||||
* process. The limits of this decider can be changed in real-time via a the
|
||||
* index settings API.
|
||||
* <p>
|
||||
* If {@value #INDEX_TOTAL_SHARDS_PER_NODE} is reset to a negative value shards
|
||||
* If <tt>index.routing.allocation.total_shards_per_node</tt> is reset to a negative value shards
|
||||
* per index are unlimited per node. Shards currently in the
|
||||
* {@link ShardRoutingState#RELOCATING relocating} state are ignored by this
|
||||
* {@link AllocationDecider} until the shard changed its state to either
|
||||
|
@ -59,12 +59,13 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
* Controls the maximum number of shards per index on a single Elasticsearch
|
||||
* node. Negative values are interpreted as unlimited.
|
||||
*/
|
||||
public static final String INDEX_TOTAL_SHARDS_PER_NODE = "index.routing.allocation.total_shards_per_node";
|
||||
public static final Setting<Integer> INDEX_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("index.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.INDEX);
|
||||
|
||||
/**
|
||||
* Controls the maximum number of shards per node on a global level.
|
||||
* Negative values are interpreted as unlimited.
|
||||
*/
|
||||
public static final Setting<Integer> CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.CLUSTER);
|
||||
|
||||
|
||||
@Inject
|
||||
|
@ -81,7 +82,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
|
||||
int indexShardLimit = indexMd.getSettings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
|
||||
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
|
||||
// Capture the limit here in case it changes during this method's
|
||||
// execution
|
||||
final int clusterShardLimit = this.clusterShardLimit;
|
||||
|
@ -118,7 +119,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
@Override
|
||||
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
|
||||
int indexShardLimit = indexMd.getSettings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
|
||||
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
|
||||
// Capture the limit here in case it changes during this method's
|
||||
// execution
|
||||
final int clusterShardLimit = this.clusterShardLimit;
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.settings;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
|
||||
/**
|
||||
* A container for setting names and validation methods for those settings.
|
||||
*/
|
||||
public class DynamicSettings {
|
||||
private final ImmutableOpenMap<String, Validator> dynamicSettings;
|
||||
|
||||
public static class Builder {
|
||||
private ImmutableOpenMap.Builder<String, Validator> settings = ImmutableOpenMap.builder();
|
||||
|
||||
public void addSetting(String setting, Validator validator) {
|
||||
Validator old = settings.put(setting, validator);
|
||||
if (old != null) {
|
||||
throw new IllegalArgumentException("Cannot register setting [" + setting + "] twice");
|
||||
}
|
||||
}
|
||||
|
||||
public DynamicSettings build() {
|
||||
return new DynamicSettings(settings.build());
|
||||
}
|
||||
}
|
||||
|
||||
private DynamicSettings(ImmutableOpenMap<String, Validator> settings) {
|
||||
this.dynamicSettings = settings;
|
||||
}
|
||||
|
||||
public boolean isDynamicOrLoggingSetting(String key) {
|
||||
return hasDynamicSetting(key) || key.startsWith("logger.");
|
||||
}
|
||||
|
||||
public boolean hasDynamicSetting(String key) {
|
||||
for (ObjectCursor<String> dynamicSetting : dynamicSettings.keys()) {
|
||||
if (Regex.simpleMatch(dynamicSetting.value, key)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public String validateDynamicSetting(String dynamicSetting, String value, ClusterState clusterState) {
|
||||
for (ObjectObjectCursor<String, Validator> setting : dynamicSettings) {
|
||||
if (Regex.simpleMatch(setting.key, dynamicSetting)) {
|
||||
return setting.value.validate(dynamicSetting, value, clusterState);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -1,307 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.settings;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue;
|
||||
import static org.elasticsearch.common.unit.MemorySizeValue.parseBytesSizeValueOrHeapRatio;
|
||||
|
||||
|
||||
/**
|
||||
* Validates a setting, returning a failure message if applicable.
|
||||
*/
|
||||
public interface Validator {
|
||||
|
||||
String validate(String setting, String value, ClusterState clusterState);
|
||||
|
||||
Validator EMPTY = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator TIME = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException("value must not be null");
|
||||
}
|
||||
try {
|
||||
// This never returns null:
|
||||
TimeValue.parseTimeValue(value, null, setting);
|
||||
} catch (ElasticsearchParseException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator TIMEOUT = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (value == null) {
|
||||
throw new NullPointerException("value must not be null");
|
||||
}
|
||||
TimeValue timeValue = TimeValue.parseTimeValue(value, null, setting);
|
||||
assert timeValue != null;
|
||||
if (timeValue.millis() < 0 && timeValue.millis() != -1) {
|
||||
return "cannot parse value [" + value + "] as a timeout";
|
||||
}
|
||||
} catch (ElasticsearchParseException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator TIME_NON_NEGATIVE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (value == null) {
|
||||
throw new NullPointerException("value must not be null");
|
||||
}
|
||||
TimeValue timeValue = TimeValue.parseTimeValue(value, null, setting);
|
||||
assert timeValue != null;
|
||||
if (timeValue.millis() < 0) {
|
||||
return "cannot parse value [" + value + "] as non negative time";
|
||||
}
|
||||
} catch (ElasticsearchParseException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator FLOAT = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
Float.parseFloat(value);
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as a float";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator NON_NEGATIVE_FLOAT = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (Float.parseFloat(value) < 0.0) {
|
||||
return "the value of the setting " + setting + " must be a non negative float";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as a double";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator DOUBLE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
Double.parseDouble(value);
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as a double";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator NON_NEGATIVE_DOUBLE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (Double.parseDouble(value) < 0.0) {
|
||||
return "the value of the setting " + setting + " must be a non negative double";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as a double";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator DOUBLE_GTE_2 = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (Double.parseDouble(value) < 2.0) {
|
||||
return "the value of the setting " + setting + " must be >= 2.0";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as a double";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator INTEGER = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
Integer.parseInt(value);
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as an integer";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator POSITIVE_INTEGER = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (Integer.parseInt(value) <= 0) {
|
||||
return "the value of the setting " + setting + " must be a positive integer";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as an integer";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator NON_NEGATIVE_INTEGER = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (Integer.parseInt(value) < 0) {
|
||||
return "the value of the setting " + setting + " must be a non negative integer";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as an integer";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator INTEGER_GTE_2 = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (Integer.parseInt(value) < 2) {
|
||||
return "the value of the setting " + setting + " must be >= 2";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return "cannot parse value [" + value + "] as an integer";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator BYTES_SIZE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
parseBytesSizeValue(value, setting);
|
||||
} catch (ElasticsearchParseException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator POSITIVE_BYTES_SIZE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState state) {
|
||||
try {
|
||||
ByteSizeValue byteSizeValue = parseBytesSizeValue(value, setting);
|
||||
if (byteSizeValue.getBytes() <= 0) {
|
||||
return setting + " must be a positive byte size value";
|
||||
}
|
||||
} catch (ElasticsearchParseException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
Validator PERCENTAGE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
if (value == null) {
|
||||
return "the value of " + setting + " can not be null";
|
||||
}
|
||||
if (!value.endsWith("%")) {
|
||||
return "the value [" + value + "] for " + setting + " must end with %";
|
||||
}
|
||||
final double asDouble = Double.parseDouble(value.substring(0, value.length() - 1));
|
||||
if (asDouble < 0.0 || asDouble > 100.0) {
|
||||
return "the value [" + value + "] for " + setting + " must be a percentage between 0% and 100%";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Validator BYTES_SIZE_OR_PERCENTAGE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
String byteSize = BYTES_SIZE.validate(setting, value, clusterState);
|
||||
if (byteSize != null) {
|
||||
String percentage = PERCENTAGE.validate(setting, value, clusterState);
|
||||
if (percentage == null) {
|
||||
return null;
|
||||
}
|
||||
return percentage + " or be a valid bytes size value, like [16mb]";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Validator MEMORY_SIZE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
try {
|
||||
parseBytesSizeValueOrHeapRatio(value, setting);
|
||||
} catch (ElasticsearchParseException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
public static final Validator BOOLEAN = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value, ClusterState clusterState) {
|
||||
|
||||
if (value != null && (Booleans.isExplicitFalse(value) || Booleans.isExplicitTrue(value))) {
|
||||
return null;
|
||||
}
|
||||
return "cannot parse value [" + value + "] as a boolean";
|
||||
}
|
||||
};
|
||||
}
|
|
@ -21,9 +21,13 @@ package org.elasticsearch.common.settings;
|
|||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -43,17 +47,31 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
|
||||
protected AbstractScopedSettings(Settings settings, Set<Setting<?>> settingsSet, Setting.Scope scope) {
|
||||
super(settings);
|
||||
for (Setting<?> entry : settingsSet) {
|
||||
if (entry.getScope() != scope) {
|
||||
throw new IllegalArgumentException("Setting must be a cluster setting but was: " + entry.getScope());
|
||||
}
|
||||
if (entry.hasComplexMatcher()) {
|
||||
complexMatchers.put(entry.getKey(), entry);
|
||||
} else {
|
||||
keySettings.put(entry.getKey(), entry);
|
||||
}
|
||||
}
|
||||
this.lastSettingsApplied = Settings.EMPTY;
|
||||
this.scope = scope;
|
||||
for (Setting<?> entry : settingsSet) {
|
||||
addSetting(entry);
|
||||
}
|
||||
}
|
||||
|
||||
protected AbstractScopedSettings(Settings nodeSettings, Settings scopeSettings, AbstractScopedSettings other) {
|
||||
super(nodeSettings);
|
||||
this.lastSettingsApplied = scopeSettings;
|
||||
this.scope = other.scope;
|
||||
complexMatchers.putAll(other.complexMatchers);
|
||||
keySettings.putAll(other.keySettings);
|
||||
settingUpdaters.addAll(other.settingUpdaters);
|
||||
}
|
||||
|
||||
protected final void addSetting(Setting<?> setting) {
|
||||
if (setting.getScope() != scope) {
|
||||
throw new IllegalArgumentException("Setting must be a " + scope + " setting but was: " + setting.getScope());
|
||||
}
|
||||
if (setting.hasComplexMatcher()) {
|
||||
complexMatchers.putIfAbsent(setting.getKey(), setting);
|
||||
} else {
|
||||
keySettings.putIfAbsent(setting.getKey(), setting);
|
||||
}
|
||||
}
|
||||
|
||||
public Setting.Scope getScope() {
|
||||
|
@ -161,6 +179,34 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
addSettingsUpdateConsumer(setting, consumer, (s) -> {});
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that all settings in the builder are registered and valid
|
||||
*/
|
||||
public final void validate(Settings.Builder settingsBuilder) {
|
||||
validate(settingsBuilder.build());
|
||||
}
|
||||
|
||||
/**
|
||||
* * Validates that all given settings are registered and valid
|
||||
*/
|
||||
public final void validate(Settings settings) {
|
||||
for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
|
||||
validate(entry.getKey(), settings);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Validates that the setting is valid
|
||||
*/
|
||||
public final void validate(String key, Settings settings) {
|
||||
Setting setting = get(key);
|
||||
if (setting == null) {
|
||||
throw new IllegalArgumentException("unknown setting [" + key + "]");
|
||||
}
|
||||
setting.get(settings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Transactional interface to update settings.
|
||||
* @see Setting
|
||||
|
@ -253,4 +299,93 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value for the given setting.
|
||||
*/
|
||||
public <T> T get(Setting<T> setting) {
|
||||
if (setting.getScope() != scope) {
|
||||
throw new IllegalArgumentException("settings scope doesn't match the setting scope [" + this.scope + "] != [" + setting.getScope() + "]");
|
||||
}
|
||||
if (get(setting.getKey()) == null) {
|
||||
throw new IllegalArgumentException("setting " + setting.getKey() + " has not been registered");
|
||||
}
|
||||
return setting.get(this.lastSettingsApplied, settings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates a target settings builder with new, updated or deleted settings from a given settings builder.
|
||||
* <p>
|
||||
* Note: This method will only allow updates to dynamic settings. if a non-dynamic setting is updated an {@link IllegalArgumentException} is thrown instead.
|
||||
*</p>
|
||||
* @param toApply the new settings to apply
|
||||
* @param target the target settings builder that the updates are applied to. All keys that have explicit null value in toApply will be removed from this builder
|
||||
* @param updates a settings builder that holds all updates applied to target
|
||||
* @param type a free text string to allow better exceptions messages
|
||||
* @return <code>true</code> if the target has changed otherwise <code>false</code>
|
||||
*/
|
||||
public boolean updateDynamicSettings(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) {
|
||||
return updateSettings(toApply, target, updates, type, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates a target settings builder with new, updated or deleted settings from a given settings builder.
|
||||
* @param toApply the new settings to apply
|
||||
* @param target the target settings builder that the updates are applied to. All keys that have explicit null value in toApply will be removed from this builder
|
||||
* @param updates a settings builder that holds all updates applied to target
|
||||
* @param type a free text string to allow better exceptions messages
|
||||
* @return <code>true</code> if the target has changed otherwise <code>false</code>
|
||||
*/
|
||||
public boolean updateSettings(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) {
|
||||
return updateSettings(toApply, target, updates, type, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates a target settings builder with new, updated or deleted settings from a given settings builder.
|
||||
* @param toApply the new settings to apply
|
||||
* @param target the target settings builder that the updates are applied to. All keys that have explicit null value in toApply will be removed from this builder
|
||||
* @param updates a settings builder that holds all updates applied to target
|
||||
* @param type a free text string to allow better exceptions messages
|
||||
* @param onlyDynamic if <code>false</code> all settings are updated otherwise only dynamic settings are updated. if set to <code>true</code> and a non-dynamic setting is updated an exception is thrown.
|
||||
* @return <code>true</code> if the target has changed otherwise <code>false</code>
|
||||
*/
|
||||
private boolean updateSettings(Settings toApply, Settings.Builder target, Settings.Builder updates, String type, boolean onlyDynamic) {
|
||||
boolean changed = false;
|
||||
final Set<String> toRemove = new HashSet<>();
|
||||
Settings.Builder settingsBuilder = Settings.settingsBuilder();
|
||||
for (Map.Entry<String, String> entry : toApply.getAsMap().entrySet()) {
|
||||
if (entry.getValue() == null) {
|
||||
toRemove.add(entry.getKey());
|
||||
} else if ((onlyDynamic == false && get(entry.getKey()) != null) || hasDynamicSetting(entry.getKey())) {
|
||||
validate(entry.getKey(), toApply);
|
||||
settingsBuilder.put(entry.getKey(), entry.getValue());
|
||||
updates.put(entry.getKey(), entry.getValue());
|
||||
changed = true;
|
||||
} else {
|
||||
throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable");
|
||||
}
|
||||
|
||||
}
|
||||
changed |= applyDeletes(toRemove, target);
|
||||
target.put(settingsBuilder.build());
|
||||
return changed;
|
||||
}
|
||||
|
||||
private static final boolean applyDeletes(Set<String> deletes, Settings.Builder builder) {
|
||||
boolean changed = false;
|
||||
for (String entry : deletes) {
|
||||
Set<String> keysToRemove = new HashSet<>();
|
||||
Set<String> keySet = builder.internalMap().keySet();
|
||||
for (String key : keySet) {
|
||||
if (Regex.simpleMatch(entry, key)) {
|
||||
keysToRemove.add(key);
|
||||
}
|
||||
}
|
||||
for (String key : keysToRemove) {
|
||||
builder.remove(key);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -38,6 +38,8 @@ import org.elasticsearch.common.logging.ESLoggerFactory;
|
|||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
|
@ -62,7 +64,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
super(settings, settingsSet, Setting.Scope.CLUSTER);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public synchronized Settings applySettings(Settings newSettings) {
|
||||
Settings settings = super.applySettings(newSettings);
|
||||
|
@ -83,6 +84,11 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
return settings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasDynamicSetting(String key) {
|
||||
return isLoggerSetting(key) || super.hasDynamicSetting(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if the settings is a logger setting.
|
||||
*/
|
||||
|
@ -149,5 +155,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||
Transport.TRANSPORT_PROFILES_SETTING,
|
||||
Transport.TRANSPORT_TCP_COMPRESS)));
|
||||
Transport.TRANSPORT_TCP_COMPRESS,
|
||||
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
|
||||
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
|
||||
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING)));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,155 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.IndexingSlowLog;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.index.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.SearchSlowLog;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
|
||||
import org.elasticsearch.index.store.FsDirectoryService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* Encapsulates all valid index level settings.
|
||||
* @see org.elasticsearch.common.settings.Setting.Scope#INDEX
|
||||
*/
|
||||
public final class IndexScopedSettings extends AbstractScopedSettings {
|
||||
|
||||
public static final Predicate<String> INDEX_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
|
||||
public static Set<Setting<?>> BUILT_IN_INDEX_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
IndexSettings.INDEX_TTL_DISABLE_PURGE_SETTING,
|
||||
IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING,
|
||||
IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
|
||||
MergeSchedulerConfig.AUTO_THROTTLE_SETTING,
|
||||
MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING,
|
||||
MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING,
|
||||
IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING,
|
||||
IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING,
|
||||
IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING,
|
||||
IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING,
|
||||
IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING,
|
||||
IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING,
|
||||
IndexMetaData.INDEX_SHADOW_REPLICAS_SETTING,
|
||||
IndexMetaData.INDEX_SHARED_FILESYSTEM_SETTING,
|
||||
IndexMetaData.INDEX_READ_ONLY_SETTING,
|
||||
IndexMetaData.INDEX_BLOCKS_READ_SETTING,
|
||||
IndexMetaData.INDEX_BLOCKS_WRITE_SETTING,
|
||||
IndexMetaData.INDEX_BLOCKS_METADATA_SETTING,
|
||||
IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING,
|
||||
IndexMetaData.INDEX_PRIORITY_SETTING,
|
||||
IndexMetaData.INDEX_DATA_PATH_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_REFORMAT,
|
||||
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING,
|
||||
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING,
|
||||
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING,
|
||||
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING,
|
||||
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING,
|
||||
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING,
|
||||
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING,
|
||||
MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING,
|
||||
MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING,
|
||||
MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING,
|
||||
MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING,
|
||||
MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING,
|
||||
MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING,
|
||||
MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING,
|
||||
MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING,
|
||||
IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING,
|
||||
IndexSettings.INDEX_WARMER_ENABLED_SETTING,
|
||||
IndexSettings.INDEX_REFRESH_INTERVAL_SETTING,
|
||||
IndexSettings.MAX_RESULT_WINDOW_SETTING,
|
||||
IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING,
|
||||
IndexSettings.DEFAULT_FIELD_SETTING,
|
||||
IndexSettings.QUERY_STRING_LENIENT_SETTING,
|
||||
IndexSettings.ALLOW_UNMAPPED,
|
||||
IndexSettings.INDEX_CHECK_ON_STARTUP,
|
||||
ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING,
|
||||
IndexSettings.INDEX_GC_DELETES_SETTING,
|
||||
IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING,
|
||||
UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING,
|
||||
EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING,
|
||||
EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING,
|
||||
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTTING,
|
||||
IndexFieldDataService.INDEX_FIELDDATA_CACHE_KEY,
|
||||
FieldMapper.IGNORE_MALFORMED_SETTING,
|
||||
FieldMapper.COERCE_SETTING,
|
||||
Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING,
|
||||
PercolatorQueriesRegistry.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING,
|
||||
MapperService.INDEX_MAPPER_DYNAMIC_SETTING,
|
||||
MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING,
|
||||
BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING,
|
||||
IndexModule.INDEX_STORE_TYPE_SETTING,
|
||||
IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING,
|
||||
IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING,
|
||||
PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS_SETTING,
|
||||
FsDirectoryService.INDEX_LOCK_FACTOR_SETTING,
|
||||
EngineConfig.INDEX_CODEC_SETTING,
|
||||
SearchService.INDEX_NORMS_LOADING_SETTING,
|
||||
// this sucks but we can't really validate all the analyzers/similarity in here
|
||||
Setting.groupSetting("index.similarity.", false, Setting.Scope.INDEX), // this allows similarity settings to be passed
|
||||
Setting.groupSetting("index.analysis.", false, Setting.Scope.INDEX) // this allows analysis settings to be passed
|
||||
|
||||
)));
|
||||
|
||||
public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS);
|
||||
|
||||
public IndexScopedSettings(Settings settings, Set<Setting<?>> settingsSet) {
|
||||
super(settings, settingsSet, Setting.Scope.INDEX);
|
||||
}
|
||||
|
||||
private IndexScopedSettings(Settings settings, IndexScopedSettings other, IndexMetaData metaData) {
|
||||
super(settings, metaData.getSettings(), other);
|
||||
}
|
||||
|
||||
public IndexScopedSettings copy(Settings settings, IndexMetaData metaData) {
|
||||
return new IndexScopedSettings(settings, this, metaData);
|
||||
}
|
||||
}
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
@ -167,6 +168,16 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value for this setting but falls back to the second provided settings object
|
||||
*/
|
||||
public final T get(Settings primary, Settings secondary) {
|
||||
if (exists(primary)) {
|
||||
return get(primary);
|
||||
}
|
||||
return get(secondary);
|
||||
}
|
||||
|
||||
/**
|
||||
* The settings scope - settings can either be cluster settings or per index settings.
|
||||
*/
|
||||
|
@ -225,7 +236,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
}
|
||||
|
||||
|
||||
private class Updater implements AbstractScopedSettings.SettingUpdater<T> {
|
||||
private final class Updater implements AbstractScopedSettings.SettingUpdater<T> {
|
||||
private final Consumer<T> consumer;
|
||||
private final ESLogger logger;
|
||||
private final Consumer<T> accept;
|
||||
|
@ -265,8 +276,8 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void apply(T value, Settings current, Settings previous) {
|
||||
logger.info("update [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current));
|
||||
public final void apply(T value, Settings current, Settings previous) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current));
|
||||
consumer.accept(value);
|
||||
}
|
||||
}
|
||||
|
@ -294,6 +305,10 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), dynamic, scope);
|
||||
}
|
||||
|
||||
public static Setting<Long> longSetting(String key, long defaultValue, long minValue, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), dynamic, scope);
|
||||
}
|
||||
|
||||
public static int parseInt(String s, int minValue, String key) {
|
||||
int value = Integer.parseInt(s);
|
||||
if (value < minValue) {
|
||||
|
@ -302,6 +317,14 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return value;
|
||||
}
|
||||
|
||||
public static long parseLong(String s, long minValue, String key) {
|
||||
long value = Long.parseLong(s);
|
||||
if (value < minValue) {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
public static Setting<Integer> intSetting(String key, int defaultValue, boolean dynamic, Scope scope) {
|
||||
return intSetting(key, defaultValue, Integer.MIN_VALUE, dynamic, scope);
|
||||
}
|
||||
|
@ -430,6 +453,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
|
||||
@Override
|
||||
public void apply(Settings value, Settings current, Settings previous) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current));
|
||||
consumer.accept(value);
|
||||
}
|
||||
|
||||
|
@ -470,4 +494,16 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
}, dynamic, scope);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Setting<?> setting = (Setting<?>) o;
|
||||
return Objects.equals(key, setting.key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(key);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,6 +58,7 @@ import java.util.Set;
|
|||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
|
@ -212,6 +213,19 @@ public final class Settings implements ToXContent {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new settings object that contains all setting of the current one filtered by the given settings key predicate.
|
||||
*/
|
||||
public Settings filter(Predicate<String> predicate) {
|
||||
Builder builder = new Builder();
|
||||
for (Map.Entry<String, String> entry : getAsMap().entrySet()) {
|
||||
if (predicate.test(entry.getKey())) {
|
||||
builder.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the settings mapped to the given setting name.
|
||||
*/
|
||||
|
|
|
@ -34,8 +34,8 @@ public class SettingsModule extends AbstractModule {
|
|||
|
||||
private final Settings settings;
|
||||
private final SettingsFilter settingsFilter;
|
||||
private final Map<String, Setting<?>> clusterDynamicSettings = new HashMap<>();
|
||||
|
||||
private final Map<String, Setting<?>> clusterSettings = new HashMap<>();
|
||||
private final Map<String, Setting<?>> indexSettings = new HashMap<>();
|
||||
|
||||
public SettingsModule(Settings settings, SettingsFilter settingsFilter) {
|
||||
this.settings = settings;
|
||||
|
@ -43,26 +43,38 @@ public class SettingsModule extends AbstractModule {
|
|||
for (Setting<?> setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) {
|
||||
registerSetting(setting);
|
||||
}
|
||||
for (Setting<?> setting : IndexScopedSettings.BUILT_IN_INDEX_SETTINGS) {
|
||||
registerSetting(setting);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, new HashSet<>(this.indexSettings.values()));
|
||||
// by now we are fully configured, lets check node level settings for unregistered index settings
|
||||
indexScopedSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE));
|
||||
bind(Settings.class).toInstance(settings);
|
||||
bind(SettingsFilter.class).toInstance(settingsFilter);
|
||||
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(clusterDynamicSettings.values()));
|
||||
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.clusterSettings.values()));
|
||||
|
||||
bind(ClusterSettings.class).toInstance(clusterSettings);
|
||||
bind(IndexScopedSettings.class).toInstance(indexScopedSettings);
|
||||
}
|
||||
|
||||
public void registerSetting(Setting<?> setting) {
|
||||
switch (setting.getScope()) {
|
||||
case CLUSTER:
|
||||
if (clusterDynamicSettings.containsKey(setting.getKey())) {
|
||||
if (clusterSettings.containsKey(setting.getKey())) {
|
||||
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
|
||||
}
|
||||
clusterDynamicSettings.put(setting.getKey(), setting);
|
||||
clusterSettings.put(setting.getKey(), setting);
|
||||
break;
|
||||
case INDEX:
|
||||
throw new UnsupportedOperationException("not yet implemented");
|
||||
if (indexSettings.containsKey(setting.getKey())) {
|
||||
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
|
||||
}
|
||||
indexSettings.put(setting.getKey(), setting);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,12 +20,10 @@
|
|||
package org.elasticsearch.common.unit;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
@ -176,7 +174,6 @@ public class ByteSizeValue implements Streamable {
|
|||
|
||||
public static ByteSizeValue parseBytesSizeValue(String sValue, ByteSizeValue defaultValue, String settingName) throws ElasticsearchParseException {
|
||||
settingName = Objects.requireNonNull(settingName);
|
||||
assert settingName.startsWith("index.") == false || MetaDataIndexUpgradeService.INDEX_BYTES_SIZE_SETTINGS.contains(settingName);
|
||||
if (sValue == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
|
|
|
@ -20,12 +20,10 @@
|
|||
package org.elasticsearch.common.unit;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.joda.time.Period;
|
||||
import org.joda.time.PeriodType;
|
||||
import org.joda.time.format.PeriodFormat;
|
||||
|
@ -254,7 +252,6 @@ public class TimeValue implements Streamable {
|
|||
|
||||
public static TimeValue parseTimeValue(String sValue, TimeValue defaultValue, String settingName) {
|
||||
settingName = Objects.requireNonNull(settingName);
|
||||
assert settingName.startsWith("index.") == false || MetaDataIndexUpgradeService.INDEX_TIME_SETTINGS.contains(settingName) : settingName;
|
||||
if (sValue == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
|
|
|
@ -342,7 +342,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
|
|||
// resolve the directory the shard actually lives in
|
||||
Path p = shardPaths[i].resolve("index");
|
||||
// open a directory (will be immediately closed) on the shard's location
|
||||
dirs[i] = new SimpleFSDirectory(p, FsDirectoryService.buildLockFactory(indexSettings));
|
||||
dirs[i] = new SimpleFSDirectory(p, indexSettings.getValue(FsDirectoryService.INDEX_LOCK_FACTOR_SETTING));
|
||||
// create a lock for the "write.lock" file
|
||||
try {
|
||||
locks[i] = dirs[i].obtainLock(IndexWriter.WRITE_LOCK_NAME);
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.cluster.routing.ShardRouting;
|
|||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
||||
|
@ -40,6 +41,7 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
|
@ -48,15 +50,30 @@ import java.util.stream.Collectors;
|
|||
*/
|
||||
public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
|
||||
@Deprecated
|
||||
public static final String INDEX_RECOVERY_INITIAL_SHARDS = "index.recovery.initial_shards";
|
||||
private static final Function<String, String> INITIAL_SHARDS_PARSER = (value) -> {
|
||||
switch (value) {
|
||||
case "quorum":
|
||||
case "quorum-1":
|
||||
case "half":
|
||||
case "one":
|
||||
case "full":
|
||||
case "full-1":
|
||||
case "all-1":
|
||||
case "all":
|
||||
return value;
|
||||
default:
|
||||
Integer.parseInt(value); // it can be parsed that's all we care here?
|
||||
return value;
|
||||
}
|
||||
};
|
||||
|
||||
private final String initialShards;
|
||||
public static final Setting<String> NODE_INITIAL_SHARDS_SETTING = new Setting<>("gateway.initial_shards", (settings) -> settings.get("gateway.local.initial_shards", "quorum"), INITIAL_SHARDS_PARSER, true, Setting.Scope.CLUSTER);
|
||||
@Deprecated
|
||||
public static final Setting<String> INDEX_RECOVERY_INITIAL_SHARDS_SETTING = new Setting<>("index.recovery.initial_shards", (settings) -> NODE_INITIAL_SHARDS_SETTING.get(settings) , INITIAL_SHARDS_PARSER, true, Setting.Scope.INDEX);
|
||||
|
||||
public PrimaryShardAllocator(Settings settings) {
|
||||
super(settings);
|
||||
this.initialShards = settings.get("gateway.initial_shards", settings.get("gateway.local.initial_shards", "quorum"));
|
||||
logger.debug("using initial_shards [{}]", initialShards);
|
||||
logger.debug("using initial_shards [{}]", NODE_INITIAL_SHARDS_SETTING.get(settings));
|
||||
}
|
||||
|
||||
public boolean allocateUnassigned(RoutingAllocation allocation) {
|
||||
|
@ -73,7 +90,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
}
|
||||
|
||||
final IndexMetaData indexMetaData = metaData.index(shard.getIndex());
|
||||
final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings, Collections.emptyList());
|
||||
final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings);
|
||||
|
||||
if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
// when we create a fresh index
|
||||
|
@ -209,8 +226,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
// check if the counts meets the minimum set
|
||||
int requiredAllocation = 1;
|
||||
// if we restore from a repository one copy is more then enough
|
||||
try {
|
||||
String initialShards = indexMetaData.getSettings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards));
|
||||
String initialShards = INDEX_RECOVERY_INITIAL_SHARDS_SETTING.get(indexMetaData.getSettings(), settings);
|
||||
if ("quorum".equals(initialShards)) {
|
||||
if (indexMetaData.getNumberOfReplicas() > 1) {
|
||||
requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2) + 1;
|
||||
|
@ -230,9 +246,6 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
} else {
|
||||
requiredAllocation = Integer.parseInt(initialShards);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("[{}][{}] failed to derived initial_shards from value {}, ignore allocation for {}", shard.index(), shard.id(), initialShards, shard);
|
||||
}
|
||||
|
||||
return nodesAndVersions.allocationsFound >= requiredAllocation;
|
||||
}
|
||||
|
@ -336,7 +349,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
*/
|
||||
private boolean recoverOnAnyNode(IndexSettings indexSettings) {
|
||||
return indexSettings.isOnSharedFilesystem()
|
||||
&& indexSettings.getSettings().getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false);
|
||||
&& IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING.get(indexSettings.getSettings());
|
||||
}
|
||||
|
||||
protected abstract AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
|
||||
|
|
|
@ -56,7 +56,7 @@ public abstract class PriorityComparator implements Comparator<ShardRouting> {
|
|||
}
|
||||
|
||||
private int priority(Settings settings) {
|
||||
return settings.getAsInt(IndexMetaData.SETTING_PRIORITY, 1);
|
||||
return IndexMetaData.INDEX_PRIORITY_SETTING.get(settings);
|
||||
}
|
||||
|
||||
private long timeCreated(Settings settings) {
|
||||
|
|
|
@ -130,7 +130,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
|
|||
if (metaData != null) {
|
||||
ShardPath shardPath = null;
|
||||
try {
|
||||
IndexSettings indexSettings = new IndexSettings(metaData, settings, Collections.emptyList());
|
||||
IndexSettings indexSettings = new IndexSettings(metaData, settings);
|
||||
shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings);
|
||||
if (shardPath == null) {
|
||||
throw new IllegalStateException(shardId + " no shard path found");
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
package org.elasticsearch.index;
|
||||
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
|
@ -35,7 +37,6 @@ import org.elasticsearch.index.similarity.SimilarityProvider;
|
|||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.IndexingMemoryController;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
|
||||
|
@ -47,6 +48,7 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* IndexModule represents the central extension point for index level custom implementations like:
|
||||
|
@ -57,25 +59,24 @@ import java.util.function.Consumer;
|
|||
* <tt>"index.similarity.my_similarity.type : "BM25"</tt> can be used.</li>
|
||||
* <li>{@link IndexStore} - Custom {@link IndexStore} instances can be registered via {@link #addIndexStore(String, BiFunction)}</li>
|
||||
* <li>{@link IndexEventListener} - Custom {@link IndexEventListener} instances can be registered via {@link #addIndexEventListener(IndexEventListener)}</li>
|
||||
* <li>Settings update listener - Custom settings update listener can be registered via {@link #addIndexSettingsListener(Consumer)}</li>
|
||||
* <li>Settings update listener - Custom settings update listener can be registered via {@link #addSettingsUpdateConsumer(Setting, Consumer)}</li>
|
||||
* </ul>
|
||||
*/
|
||||
public final class IndexModule {
|
||||
|
||||
public static final String STORE_TYPE = "index.store.type";
|
||||
public static final Setting<String> INDEX_STORE_TYPE_SETTING = new Setting<>("index.store.type", "", Function.identity(), false, Setting.Scope.INDEX);
|
||||
public static final String SIMILARITY_SETTINGS_PREFIX = "index.similarity";
|
||||
public static final String INDEX_QUERY_CACHE = "index";
|
||||
public static final String NONE_QUERY_CACHE = "none";
|
||||
public static final String QUERY_CACHE_TYPE = "index.queries.cache.type";
|
||||
public static final Setting<String> INDEX_QUERY_CACHE_TYPE_SETTING = new Setting<>("index.queries.cache.type", INDEX_QUERY_CACHE, Function.identity(), false, Setting.Scope.INDEX);
|
||||
// for test purposes only
|
||||
public static final String QUERY_CACHE_EVERYTHING = "index.queries.cache.everything";
|
||||
public static final Setting<Boolean> INDEX_QUERY_CACHE_EVERYTHING_SETTING = Setting.boolSetting("index.queries.cache.everything", false, false, Setting.Scope.INDEX);
|
||||
private final IndexSettings indexSettings;
|
||||
private final IndexStoreConfig indexStoreConfig;
|
||||
private final AnalysisRegistry analysisRegistry;
|
||||
// pkg private so tests can mock
|
||||
final SetOnce<EngineFactory> engineFactory = new SetOnce<>();
|
||||
private SetOnce<IndexSearcherWrapperFactory> indexSearcherWrapper = new SetOnce<>();
|
||||
private final Set<Consumer<Settings>> settingsConsumers = new HashSet<>();
|
||||
private final Set<IndexEventListener> indexEventListeners = new HashSet<>();
|
||||
private IndexEventListener listener;
|
||||
private final Map<String, BiFunction<String, Settings, SimilarityProvider>> similarities = new HashMap<>();
|
||||
|
@ -92,17 +93,13 @@ public final class IndexModule {
|
|||
}
|
||||
|
||||
/**
|
||||
* Adds a settings consumer for this index
|
||||
* Adds a Setting and it's consumer for this index.
|
||||
*/
|
||||
public void addIndexSettingsListener(Consumer<Settings> listener) {
|
||||
if (listener == null) {
|
||||
throw new IllegalArgumentException("listener must not be null");
|
||||
public <T> void addSettingsUpdateConsumer(Setting<T> setting, Consumer<T> consumer) {
|
||||
if (setting == null) {
|
||||
throw new IllegalArgumentException("setting must not be null");
|
||||
}
|
||||
|
||||
if (settingsConsumers.contains(listener)) {
|
||||
throw new IllegalStateException("listener already registered");
|
||||
}
|
||||
settingsConsumers.add(listener);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(setting, consumer);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -245,27 +242,29 @@ public final class IndexModule {
|
|||
|
||||
public IndexService newIndexService(NodeEnvironment environment, IndexService.ShardStoreDeleter shardStoreDeleter, NodeServicesProvider servicesProvider, MapperRegistry mapperRegistry,
|
||||
IndexingOperationListener... listeners) throws IOException {
|
||||
final IndexSettings settings = indexSettings.newWithListener(settingsConsumers);
|
||||
IndexSearcherWrapperFactory searcherWrapperFactory = indexSearcherWrapper.get() == null ? (shard) -> null : indexSearcherWrapper.get();
|
||||
IndexEventListener eventListener = freeze();
|
||||
final String storeType = settings.getSettings().get(STORE_TYPE);
|
||||
final String storeType = indexSettings.getValue(INDEX_STORE_TYPE_SETTING);
|
||||
final IndexStore store;
|
||||
if (storeType == null || isBuiltinType(storeType)) {
|
||||
store = new IndexStore(settings, indexStoreConfig);
|
||||
if (Strings.isEmpty(storeType) || isBuiltinType(storeType)) {
|
||||
store = new IndexStore(indexSettings, indexStoreConfig);
|
||||
} else {
|
||||
BiFunction<IndexSettings, IndexStoreConfig, IndexStore> factory = storeTypes.get(storeType);
|
||||
if (factory == null) {
|
||||
throw new IllegalArgumentException("Unknown store type [" + storeType + "]");
|
||||
}
|
||||
store = factory.apply(settings, indexStoreConfig);
|
||||
store = factory.apply(indexSettings, indexStoreConfig);
|
||||
if (store == null) {
|
||||
throw new IllegalStateException("store must not be null");
|
||||
}
|
||||
}
|
||||
final String queryCacheType = settings.getSettings().get(IndexModule.QUERY_CACHE_TYPE, IndexModule.INDEX_QUERY_CACHE);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, store::setMaxRate);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING, store::setType);
|
||||
final String queryCacheType = indexSettings.getValue(INDEX_QUERY_CACHE_TYPE_SETTING);
|
||||
final BiFunction<IndexSettings, IndicesQueryCache, QueryCache> queryCacheProvider = queryCaches.get(queryCacheType);
|
||||
final QueryCache queryCache = queryCacheProvider.apply(settings, servicesProvider.getIndicesQueryCache());
|
||||
return new IndexService(settings, environment, new SimilarityService(settings, similarities), shardStoreDeleter, analysisRegistry, engineFactory.get(),
|
||||
final QueryCache queryCache = queryCacheProvider.apply(indexSettings, servicesProvider.getIndicesQueryCache());
|
||||
return new IndexService(indexSettings, environment, new SimilarityService(indexSettings, similarities), shardStoreDeleter, analysisRegistry, engineFactory.get(),
|
||||
servicesProvider, queryCache, store, eventListener, searcherWrapperFactory, mapperRegistry, listeners);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -62,7 +62,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
|||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.search.stats.SearchSlowLog;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexSearcherWrapper;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -141,7 +140,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
this.engineFactory = engineFactory;
|
||||
// initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE
|
||||
this.searcherWrapper = wrapperFactory.newWrapper(this);
|
||||
this.slowLog = new IndexingSlowLog(indexSettings.getSettings());
|
||||
this.slowLog = new IndexingSlowLog(indexSettings);
|
||||
|
||||
// Add our slowLog to the incoming IndexingOperationListeners:
|
||||
this.listeners = new IndexingOperationListener[1+listenersIn.length];
|
||||
|
@ -154,7 +153,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
this.fsyncTask = null;
|
||||
}
|
||||
this.refreshTask = new AsyncRefreshTask(this);
|
||||
searchSlowLog = new SearchSlowLog(indexSettings.getSettings());
|
||||
searchSlowLog = new SearchSlowLog(indexSettings);
|
||||
}
|
||||
|
||||
public int numberOfShards() {
|
||||
|
@ -567,7 +566,6 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
|
||||
public synchronized void updateMetaData(final IndexMetaData metadata) {
|
||||
if (indexSettings.updateIndexMetaData(metadata)) {
|
||||
final Settings settings = indexSettings.getSettings();
|
||||
for (final IndexShard shard : this.shards.values()) {
|
||||
try {
|
||||
shard.onSettingsChanged();
|
||||
|
@ -575,22 +573,6 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
logger.warn("[{}] failed to notify shard about setting change", e, shard.shardId().id());
|
||||
}
|
||||
}
|
||||
try {
|
||||
indexStore.onRefreshSettings(settings);
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to refresh index store settings", e);
|
||||
}
|
||||
try {
|
||||
slowLog.onRefreshSettings(settings); // this will be refactored soon anyway so duplication is ok here
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to refresh slowlog settings", e);
|
||||
}
|
||||
|
||||
try {
|
||||
searchSlowLog.onRefreshSettings(settings); // this will be refactored soon anyway so duplication is ok here
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to refresh slowlog settings", e);
|
||||
}
|
||||
if (refreshTask.getInterval().equals(indexSettings.getRefreshInterval()) == false) {
|
||||
rescheduleRefreshTasks();
|
||||
}
|
||||
|
|
|
@ -25,6 +25,8 @@ import org.elasticsearch.common.ParseFieldMatcher;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
@ -32,45 +34,64 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* This class encapsulates all index level settings and handles settings updates.
|
||||
* It's created per index and available to all index level classes and allows them to retrieve
|
||||
* the latest updated settings instance. Classes that need to listen to settings updates can register
|
||||
* a settings consumer at index creation via {@link IndexModule#addIndexSettingsListener(Consumer)} that will
|
||||
* a settings consumer at index creation via {@link IndexModule#addSettingsUpdateConsumer(Setting, Consumer)} that will
|
||||
* be called for each settings update.
|
||||
*/
|
||||
public final class IndexSettings {
|
||||
|
||||
public static final String DEFAULT_FIELD = "index.query.default_field";
|
||||
public static final String QUERY_STRING_LENIENT = "index.query_string.lenient";
|
||||
public static final String QUERY_STRING_ANALYZE_WILDCARD = "indices.query.query_string.analyze_wildcard";
|
||||
public static final String QUERY_STRING_ALLOW_LEADING_WILDCARD = "indices.query.query_string.allowLeadingWildcard";
|
||||
public static final String ALLOW_UNMAPPED = "index.query.parse.allow_unmapped_fields";
|
||||
public static final String INDEX_TRANSLOG_SYNC_INTERVAL = "index.translog.sync_interval";
|
||||
public static final String INDEX_TRANSLOG_DURABILITY = "index.translog.durability";
|
||||
public static final String INDEX_REFRESH_INTERVAL = "index.refresh_interval";
|
||||
public static final Setting<String> DEFAULT_FIELD_SETTING = new Setting<>("index.query.default_field", AllFieldMapper.NAME, Function.identity(), false, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> QUERY_STRING_LENIENT_SETTING = Setting.boolSetting("index.query_string.lenient", false, false, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> QUERY_STRING_ANALYZE_WILDCARD = Setting.boolSetting("indices.query.query_string.analyze_wildcard", false, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> QUERY_STRING_ALLOW_LEADING_WILDCARD = Setting.boolSetting("indices.query.query_string.allowLeadingWildcard", true, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> ALLOW_UNMAPPED = Setting.boolSetting("index.query.parse.allow_unmapped_fields", true, false, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_TRANSLOG_SYNC_INTERVAL_SETTING = Setting.timeSetting("index.translog.sync_interval", TimeValue.timeValueSeconds(5), false, Setting.Scope.INDEX);
|
||||
public static final Setting<Translog.Durability> INDEX_TRANSLOG_DURABILITY_SETTING = new Setting<>("index.translog.durability", Translog.Durability.REQUEST.name(), (value) -> Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT)), true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_WARMER_ENABLED_SETTING = Setting.boolSetting("index.warmer.enabled", true, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_TTL_DISABLE_PURGE_SETTING = Setting.boolSetting("index.ttl.disable_purge", false, true, Setting.Scope.INDEX);
|
||||
public static final Setting<String> INDEX_CHECK_ON_STARTUP = new Setting<>("index.shard.check_on_startup", "false", (s) -> {
|
||||
switch(s) {
|
||||
case "false":
|
||||
case "true":
|
||||
case "fix":
|
||||
case "checksum":
|
||||
return s;
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of [true, false, fix, checksum] but was: " + s);
|
||||
}
|
||||
}, false, Setting.Scope.INDEX);
|
||||
|
||||
/**
|
||||
* Index setting describing the maximum value of from + size on a query.
|
||||
* The Default maximum value of from + size on a query is 10,000. This was chosen as
|
||||
* a conservative default as it is sure to not cause trouble. Users can
|
||||
* certainly profile their cluster and decide to set it to 100,000
|
||||
* safely. 1,000,000 is probably way to high for any cluster to set
|
||||
* safely.
|
||||
*/
|
||||
public static final Setting<Integer> MAX_RESULT_WINDOW_SETTING = Setting.intSetting("index.max_result_window", 10000, 1, true, Setting.Scope.INDEX);
|
||||
public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS);
|
||||
public static final String INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE = "index.translog.flush_threshold_size";
|
||||
public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60);
|
||||
public static final Setting<TimeValue> INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.refresh_interval", DEFAULT_REFRESH_INTERVAL, new TimeValue(-1, TimeUnit.MILLISECONDS), true, Setting.Scope.INDEX);
|
||||
public static final Setting<ByteSizeValue> INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTTING = Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB), true, Setting.Scope.INDEX);
|
||||
|
||||
|
||||
/**
|
||||
* Index setting to enable / disable deletes garbage collection.
|
||||
* This setting is realtime updateable
|
||||
*/
|
||||
public static final String INDEX_GC_DELETES_SETTING = "index.gc_deletes";
|
||||
public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60);
|
||||
public static final Setting<TimeValue> INDEX_GC_DELETES_SETTING = Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), true, Setting.Scope.INDEX);
|
||||
|
||||
private final String uuid;
|
||||
private final List<Consumer<Settings>> updateListeners;
|
||||
private final Index index;
|
||||
private final Version version;
|
||||
private final ESLogger logger;
|
||||
|
@ -94,9 +115,11 @@ public final class IndexSettings {
|
|||
private volatile ByteSizeValue flushThresholdSize;
|
||||
private final MergeSchedulerConfig mergeSchedulerConfig;
|
||||
private final MergePolicyConfig mergePolicyConfig;
|
||||
|
||||
private final IndexScopedSettings scopedSettings;
|
||||
private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis();
|
||||
|
||||
private volatile boolean warmerEnabled;
|
||||
private volatile int maxResultWindow;
|
||||
private volatile boolean TTLPurgeDisabled;
|
||||
|
||||
/**
|
||||
* Returns the default search field for this index.
|
||||
|
@ -139,10 +162,13 @@ public final class IndexSettings {
|
|||
*
|
||||
* @param indexMetaData the index metadata this settings object is associated with
|
||||
* @param nodeSettings the nodes settings this index is allocated on.
|
||||
* @param updateListeners a collection of listeners / consumers that should be notified if one or more settings are updated
|
||||
*/
|
||||
public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSettings, final Collection<Consumer<Settings>> updateListeners) {
|
||||
this(indexMetaData, nodeSettings, updateListeners, (index) -> Regex.simpleMatch(index, indexMetaData.getIndex()));
|
||||
public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSettings) {
|
||||
this(indexMetaData, nodeSettings, (index) -> Regex.simpleMatch(index, indexMetaData.getIndex()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
|
||||
}
|
||||
|
||||
IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSettings, IndexScopedSettings indexScopedSettings) {
|
||||
this(indexMetaData, nodeSettings, (index) -> Regex.simpleMatch(index, indexMetaData.getIndex()), indexScopedSettings);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -151,13 +177,12 @@ public final class IndexSettings {
|
|||
*
|
||||
* @param indexMetaData the index metadata this settings object is associated with
|
||||
* @param nodeSettings the nodes settings this index is allocated on.
|
||||
* @param updateListeners a collection of listeners / consumers that should be notified if one or more settings are updated
|
||||
* @param indexNameMatcher a matcher that can resolve an expression to the index name or index alias
|
||||
*/
|
||||
public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSettings, final Collection<Consumer<Settings>> updateListeners, final Predicate<String> indexNameMatcher) {
|
||||
public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSettings, final Predicate<String> indexNameMatcher, IndexScopedSettings indexScopedSettings) {
|
||||
scopedSettings = indexScopedSettings.copy(nodeSettings, indexMetaData);
|
||||
this.nodeSettings = nodeSettings;
|
||||
this.settings = Settings.builder().put(nodeSettings).put(indexMetaData.getSettings()).build();
|
||||
this.updateListeners = Collections.unmodifiableList( new ArrayList<>(updateListeners));
|
||||
this.index = new Index(indexMetaData.getIndex());
|
||||
version = Version.indexCreated(settings);
|
||||
uuid = settings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
|
||||
|
@ -167,32 +192,44 @@ public final class IndexSettings {
|
|||
numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
|
||||
isShadowReplicaIndex = IndexMetaData.isIndexUsingShadowReplicas(settings);
|
||||
|
||||
this.defaultField = settings.get(DEFAULT_FIELD, AllFieldMapper.NAME);
|
||||
this.queryStringLenient = settings.getAsBoolean(QUERY_STRING_LENIENT, false);
|
||||
this.queryStringAnalyzeWildcard = settings.getAsBoolean(QUERY_STRING_ANALYZE_WILDCARD, false);
|
||||
this.queryStringAllowLeadingWildcard = settings.getAsBoolean(QUERY_STRING_ALLOW_LEADING_WILDCARD, true);
|
||||
this.defaultField = DEFAULT_FIELD_SETTING.get(settings);
|
||||
this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings);
|
||||
this.queryStringAnalyzeWildcard = QUERY_STRING_ANALYZE_WILDCARD.get(nodeSettings);
|
||||
this.queryStringAllowLeadingWildcard = QUERY_STRING_ALLOW_LEADING_WILDCARD.get(nodeSettings);
|
||||
this.parseFieldMatcher = new ParseFieldMatcher(settings);
|
||||
this.defaultAllowUnmappedFields = settings.getAsBoolean(ALLOW_UNMAPPED, true);
|
||||
this.defaultAllowUnmappedFields = scopedSettings.get(ALLOW_UNMAPPED);
|
||||
this.indexNameMatcher = indexNameMatcher;
|
||||
final String value = settings.get(INDEX_TRANSLOG_DURABILITY, Translog.Durability.REQUEST.name());
|
||||
this.durability = getFromSettings(settings, Translog.Durability.REQUEST);
|
||||
syncInterval = settings.getAsTime(INDEX_TRANSLOG_SYNC_INTERVAL, TimeValue.timeValueSeconds(5));
|
||||
refreshInterval = settings.getAsTime(INDEX_REFRESH_INTERVAL, DEFAULT_REFRESH_INTERVAL);
|
||||
flushThresholdSize = settings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(512, ByteSizeUnit.MB));
|
||||
mergeSchedulerConfig = new MergeSchedulerConfig(settings);
|
||||
gcDeletesInMillis = settings.getAsTime(IndexSettings.INDEX_GC_DELETES_SETTING, DEFAULT_GC_DELETES).getMillis();
|
||||
this.mergePolicyConfig = new MergePolicyConfig(logger, settings);
|
||||
this.durability = scopedSettings.get(INDEX_TRANSLOG_DURABILITY_SETTING);
|
||||
scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_DURABILITY_SETTING, this::setTranslogDurability);
|
||||
syncInterval = INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.get(settings);
|
||||
refreshInterval = scopedSettings.get(INDEX_REFRESH_INTERVAL_SETTING);
|
||||
scopedSettings.addSettingsUpdateConsumer(INDEX_REFRESH_INTERVAL_SETTING, this::setRefreshInterval);
|
||||
flushThresholdSize = scopedSettings.get(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTTING);
|
||||
scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTTING, this::setTranslogFlushThresholdSize);
|
||||
mergeSchedulerConfig = new MergeSchedulerConfig(this);
|
||||
scopedSettings.addSettingsUpdateConsumer(INDEX_GC_DELETES_SETTING, this::setGCDeletes);
|
||||
gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis();
|
||||
warmerEnabled = scopedSettings.get(INDEX_WARMER_ENABLED_SETTING);
|
||||
scopedSettings.addSettingsUpdateConsumer(INDEX_WARMER_ENABLED_SETTING, this::setEnableWarmer);
|
||||
maxResultWindow = scopedSettings.get(MAX_RESULT_WINDOW_SETTING);
|
||||
scopedSettings.addSettingsUpdateConsumer(MAX_RESULT_WINDOW_SETTING, this::setMaxResultWindow);
|
||||
TTLPurgeDisabled = scopedSettings.get(INDEX_TTL_DISABLE_PURGE_SETTING);
|
||||
scopedSettings.addSettingsUpdateConsumer(INDEX_TTL_DISABLE_PURGE_SETTING, this::setTTLPurgeDisabled);
|
||||
this.mergePolicyConfig = new MergePolicyConfig(logger, this);
|
||||
assert indexNameMatcher.test(indexMetaData.getIndex());
|
||||
|
||||
}
|
||||
|
||||
private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) {
|
||||
this.flushThresholdSize = byteSizeValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link IndexSettings} instance adding the given listeners to the settings
|
||||
*/
|
||||
IndexSettings newWithListener(final Collection<Consumer<Settings>> updateListeners) {
|
||||
ArrayList<Consumer<Settings>> newUpdateListeners = new ArrayList<>(updateListeners);
|
||||
newUpdateListeners.addAll(this.updateListeners);
|
||||
return new IndexSettings(indexMetaData, nodeSettings, newUpdateListeners, indexNameMatcher);
|
||||
private void setGCDeletes(TimeValue timeValue) {
|
||||
this.gcDeletesInMillis = timeValue.getMillis();
|
||||
}
|
||||
|
||||
private void setRefreshInterval(TimeValue timeValue) {
|
||||
this.refreshInterval = timeValue;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -321,33 +358,15 @@ public final class IndexSettings {
|
|||
}
|
||||
this.indexMetaData = indexMetaData;
|
||||
final Settings existingSettings = this.settings;
|
||||
if (existingSettings.getByPrefix(IndexMetaData.INDEX_SETTING_PREFIX).getAsMap().equals(newSettings.getByPrefix(IndexMetaData.INDEX_SETTING_PREFIX).getAsMap())) {
|
||||
if (existingSettings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE).getAsMap().equals(newSettings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE).getAsMap())) {
|
||||
// nothing to update, same settings
|
||||
return false;
|
||||
}
|
||||
final Settings mergedSettings = this.settings = Settings.builder().put(nodeSettings).put(newSettings).build();
|
||||
for (final Consumer<Settings> consumer : updateListeners) {
|
||||
try {
|
||||
consumer.accept(mergedSettings);
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to refresh index settings for [{}]", e, mergedSettings);
|
||||
}
|
||||
}
|
||||
try {
|
||||
updateSettings(mergedSettings);
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to refresh index settings for [{}]", e, mergedSettings);
|
||||
}
|
||||
scopedSettings.applySettings(newSettings);
|
||||
this.settings = Settings.builder().put(nodeSettings).put(newSettings).build();
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all settings update consumers
|
||||
*/
|
||||
List<Consumer<Settings>> getUpdateListeners() { // for testing
|
||||
return updateListeners;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the translog durability for this index.
|
||||
*/
|
||||
|
@ -355,60 +374,19 @@ public final class IndexSettings {
|
|||
return durability;
|
||||
}
|
||||
|
||||
private Translog.Durability getFromSettings(Settings settings, Translog.Durability defaultValue) {
|
||||
final String value = settings.get(INDEX_TRANSLOG_DURABILITY, defaultValue.name());
|
||||
try {
|
||||
return Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT));
|
||||
} catch (IllegalArgumentException ex) {
|
||||
logger.warn("Can't apply {} illegal value: {} using {} instead, use one of: {}", INDEX_TRANSLOG_DURABILITY, value, defaultValue, Arrays.toString(Translog.Durability.values()));
|
||||
return defaultValue;
|
||||
}
|
||||
}
|
||||
|
||||
private void updateSettings(Settings settings) {
|
||||
final Translog.Durability durability = getFromSettings(settings, this.durability);
|
||||
if (durability != this.durability) {
|
||||
logger.info("updating durability from [{}] to [{}]", this.durability, durability);
|
||||
private void setTranslogDurability(Translog.Durability durability) {
|
||||
this.durability = durability;
|
||||
}
|
||||
|
||||
TimeValue refreshInterval = settings.getAsTime(IndexSettings.INDEX_REFRESH_INTERVAL, this.refreshInterval);
|
||||
if (!refreshInterval.equals(this.refreshInterval)) {
|
||||
logger.info("updating refresh_interval from [{}] to [{}]", this.refreshInterval, refreshInterval);
|
||||
this.refreshInterval = refreshInterval;
|
||||
/**
|
||||
* Returns true if index warmers are enabled, otherwise <code>false</code>
|
||||
*/
|
||||
public boolean isWarmerEnabled() {
|
||||
return warmerEnabled;
|
||||
}
|
||||
|
||||
ByteSizeValue flushThresholdSize = settings.getAsBytesSize(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, this.flushThresholdSize);
|
||||
if (!flushThresholdSize.equals(this.flushThresholdSize)) {
|
||||
logger.info("updating flush_threshold_size from [{}] to [{}]", this.flushThresholdSize, flushThresholdSize);
|
||||
this.flushThresholdSize = flushThresholdSize;
|
||||
}
|
||||
|
||||
final int maxThreadCount = settings.getAsInt(MergeSchedulerConfig.MAX_THREAD_COUNT, mergeSchedulerConfig.getMaxThreadCount());
|
||||
if (maxThreadCount != mergeSchedulerConfig.getMaxThreadCount()) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", MergeSchedulerConfig.MAX_THREAD_COUNT, mergeSchedulerConfig.getMaxMergeCount(), maxThreadCount);
|
||||
mergeSchedulerConfig.setMaxThreadCount(maxThreadCount);
|
||||
}
|
||||
|
||||
final int maxMergeCount = settings.getAsInt(MergeSchedulerConfig.MAX_MERGE_COUNT, mergeSchedulerConfig.getMaxMergeCount());
|
||||
if (maxMergeCount != mergeSchedulerConfig.getMaxMergeCount()) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", MergeSchedulerConfig.MAX_MERGE_COUNT, mergeSchedulerConfig.getMaxMergeCount(), maxMergeCount);
|
||||
mergeSchedulerConfig.setMaxMergeCount(maxMergeCount);
|
||||
}
|
||||
|
||||
final boolean autoThrottle = settings.getAsBoolean(MergeSchedulerConfig.AUTO_THROTTLE, mergeSchedulerConfig.isAutoThrottle());
|
||||
if (autoThrottle != mergeSchedulerConfig.isAutoThrottle()) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", MergeSchedulerConfig.AUTO_THROTTLE, mergeSchedulerConfig.isAutoThrottle(), autoThrottle);
|
||||
mergeSchedulerConfig.setAutoThrottle(autoThrottle);
|
||||
}
|
||||
|
||||
long gcDeletesInMillis = settings.getAsTime(IndexSettings.INDEX_GC_DELETES_SETTING, TimeValue.timeValueMillis(this.gcDeletesInMillis)).getMillis();
|
||||
if (gcDeletesInMillis != this.gcDeletesInMillis) {
|
||||
logger.info("updating {} from [{}] to [{}]", IndexSettings.INDEX_GC_DELETES_SETTING, TimeValue.timeValueMillis(this.gcDeletesInMillis), TimeValue.timeValueMillis(gcDeletesInMillis));
|
||||
this.gcDeletesInMillis = gcDeletesInMillis;
|
||||
}
|
||||
|
||||
mergePolicyConfig.onRefreshSettings(settings);
|
||||
private void setEnableWarmer(boolean enableWarmer) {
|
||||
this.warmerEnabled = enableWarmer;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -436,6 +414,18 @@ public final class IndexSettings {
|
|||
*/
|
||||
public MergeSchedulerConfig getMergeSchedulerConfig() { return mergeSchedulerConfig; }
|
||||
|
||||
/**
|
||||
* Returns the max result window for search requests, describing the maximum value of from + size on a query.
|
||||
*/
|
||||
public int getMaxResultWindow() {
|
||||
return this.maxResultWindow;
|
||||
}
|
||||
|
||||
private void setMaxResultWindow(int maxResultWindow) {
|
||||
this.maxResultWindow = maxResultWindow;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the GC deletes cycle in milliseconds.
|
||||
*/
|
||||
|
@ -450,4 +440,22 @@ public final class IndexSettings {
|
|||
return mergePolicyConfig.getMergePolicy();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if the TTL purge is disabled for this index. Default is <code>false</code>
|
||||
*/
|
||||
public boolean isTTLPurgeDisabled() {
|
||||
return TTLPurgeDisabled;
|
||||
}
|
||||
|
||||
private void setTTLPurgeDisabled(boolean ttlPurgeDisabled) {
|
||||
this.TTLPurgeDisabled = ttlPurgeDisabled;
|
||||
}
|
||||
|
||||
|
||||
public <T> T getValue(Setting<T> setting) {
|
||||
return scopedSettings.get(setting);
|
||||
}
|
||||
|
||||
|
||||
public IndexScopedSettings getScopedSettings() { return scopedSettings;}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.elasticsearch.common.Booleans;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
|
@ -31,15 +31,12 @@ import org.elasticsearch.index.mapper.ParsedDocument;
|
|||
import org.elasticsearch.index.shard.IndexingOperationListener;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
*/
|
||||
public final class IndexingSlowLog implements IndexingOperationListener {
|
||||
|
||||
private boolean reformat;
|
||||
|
||||
private long indexWarnThreshold;
|
||||
private long indexInfoThreshold;
|
||||
private long indexDebugThreshold;
|
||||
|
@ -51,21 +48,33 @@ public final class IndexingSlowLog implements IndexingOperationListener {
|
|||
*/
|
||||
private int maxSourceCharsToLog;
|
||||
|
||||
private String level;
|
||||
private SlowLogLevel level;
|
||||
|
||||
private final ESLogger indexLogger;
|
||||
private final ESLogger deleteLogger;
|
||||
|
||||
private static final String INDEX_INDEXING_SLOWLOG_PREFIX = "index.indexing.slowlog";
|
||||
public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN = INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn";
|
||||
public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO = INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.info";
|
||||
public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG = INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.debug";
|
||||
public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE = INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.trace";
|
||||
public static final String INDEX_INDEXING_SLOWLOG_REFORMAT = INDEX_INDEXING_SLOWLOG_PREFIX +".reformat";
|
||||
public static final String INDEX_INDEXING_SLOWLOG_LEVEL = INDEX_INDEXING_SLOWLOG_PREFIX +".level";
|
||||
public static final String INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG = INDEX_INDEXING_SLOWLOG_PREFIX + ".source";
|
||||
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING = Setting.boolSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".reformat", true, true, Setting.Scope.INDEX);
|
||||
public static final Setting<SlowLogLevel> INDEX_INDEXING_SLOWLOG_LEVEL_SETTING = new Setting<>(INDEX_INDEXING_SLOWLOG_PREFIX +".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, true, Setting.Scope.INDEX);
|
||||
/**
|
||||
* Reads how much of the source to log. The user can specify any value they
|
||||
* like and numbers are interpreted the maximum number of characters to log
|
||||
* and everything else is interpreted as Elasticsearch interprets booleans
|
||||
* which is then converted to 0 for false and Integer.MAX_VALUE for true.
|
||||
*/
|
||||
public static final Setting<Integer> INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING = new Setting<>(INDEX_INDEXING_SLOWLOG_PREFIX + ".source", "1000", (value) -> {
|
||||
try {
|
||||
return Integer.parseInt(value, 10);
|
||||
} catch (NumberFormatException e) {
|
||||
return Booleans.parseBoolean(value, true) ? Integer.MAX_VALUE : 0;
|
||||
}
|
||||
}, true, Setting.Scope.INDEX);
|
||||
|
||||
IndexingSlowLog(Settings indexSettings) {
|
||||
IndexingSlowLog(IndexSettings indexSettings) {
|
||||
this(indexSettings, Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index"),
|
||||
Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".delete"));
|
||||
}
|
||||
|
@ -73,77 +82,62 @@ public final class IndexingSlowLog implements IndexingOperationListener {
|
|||
/**
|
||||
* Build with the specified loggers. Only used to testing.
|
||||
*/
|
||||
IndexingSlowLog(Settings indexSettings, ESLogger indexLogger, ESLogger deleteLogger) {
|
||||
IndexingSlowLog(IndexSettings indexSettings, ESLogger indexLogger, ESLogger deleteLogger) {
|
||||
this.indexLogger = indexLogger;
|
||||
this.deleteLogger = deleteLogger;
|
||||
|
||||
this.reformat = indexSettings.getAsBoolean(INDEX_INDEXING_SLOWLOG_REFORMAT, true);
|
||||
this.indexWarnThreshold = indexSettings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, TimeValue.timeValueNanos(-1)).nanos();
|
||||
this.indexInfoThreshold = indexSettings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, TimeValue.timeValueNanos(-1)).nanos();
|
||||
this.indexDebugThreshold = indexSettings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, TimeValue.timeValueNanos(-1)).nanos();
|
||||
this.indexTraceThreshold = indexSettings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE, TimeValue.timeValueNanos(-1)).nanos();
|
||||
this.level = indexSettings.get(INDEX_INDEXING_SLOWLOG_LEVEL, "TRACE").toUpperCase(Locale.ROOT);
|
||||
this.maxSourceCharsToLog = readSourceToLog(indexSettings);
|
||||
|
||||
indexLogger.setLevel(level);
|
||||
deleteLogger.setLevel(level);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat);
|
||||
this.reformat = indexSettings.getValue(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING, this::setWarnThreshold);
|
||||
this.indexWarnThreshold = indexSettings.getValue(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING).nanos();
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING, this::setInfoThreshold);
|
||||
this.indexInfoThreshold = indexSettings.getValue(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING).nanos();
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING, this::setDebugThreshold);
|
||||
this.indexDebugThreshold = indexSettings.getValue(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING).nanos();
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING, this::setTraceThreshold);
|
||||
this.indexTraceThreshold = indexSettings.getValue(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING).nanos();
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_LEVEL_SETTING, this::setLevel);
|
||||
setLevel(indexSettings.getValue(INDEX_INDEXING_SLOWLOG_LEVEL_SETTING));
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING, this::setMaxSourceCharsToLog);
|
||||
this.maxSourceCharsToLog = indexSettings.getValue(INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING);
|
||||
}
|
||||
|
||||
synchronized void onRefreshSettings(Settings settings) {
|
||||
long indexWarnThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, TimeValue.timeValueNanos(this.indexWarnThreshold)).nanos();
|
||||
if (indexWarnThreshold != this.indexWarnThreshold) {
|
||||
this.indexWarnThreshold = indexWarnThreshold;
|
||||
}
|
||||
long indexInfoThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, TimeValue.timeValueNanos(this.indexInfoThreshold)).nanos();
|
||||
if (indexInfoThreshold != this.indexInfoThreshold) {
|
||||
this.indexInfoThreshold = indexInfoThreshold;
|
||||
}
|
||||
long indexDebugThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, TimeValue.timeValueNanos(this.indexDebugThreshold)).nanos();
|
||||
if (indexDebugThreshold != this.indexDebugThreshold) {
|
||||
this.indexDebugThreshold = indexDebugThreshold;
|
||||
}
|
||||
long indexTraceThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE, TimeValue.timeValueNanos(this.indexTraceThreshold)).nanos();
|
||||
if (indexTraceThreshold != this.indexTraceThreshold) {
|
||||
this.indexTraceThreshold = indexTraceThreshold;
|
||||
private void setMaxSourceCharsToLog(int maxSourceCharsToLog) {
|
||||
this.maxSourceCharsToLog = maxSourceCharsToLog;
|
||||
}
|
||||
|
||||
String level = settings.get(INDEX_INDEXING_SLOWLOG_LEVEL, this.level);
|
||||
if (!level.equals(this.level)) {
|
||||
this.indexLogger.setLevel(level.toUpperCase(Locale.ROOT));
|
||||
this.deleteLogger.setLevel(level.toUpperCase(Locale.ROOT));
|
||||
private void setLevel(SlowLogLevel level) {
|
||||
this.level = level;
|
||||
this.indexLogger.setLevel(level.name());
|
||||
this.deleteLogger.setLevel(level.name());
|
||||
}
|
||||
|
||||
boolean reformat = settings.getAsBoolean(INDEX_INDEXING_SLOWLOG_REFORMAT, this.reformat);
|
||||
if (reformat != this.reformat) {
|
||||
private void setWarnThreshold(TimeValue warnThreshold) {
|
||||
this.indexWarnThreshold = warnThreshold.nanos();
|
||||
}
|
||||
|
||||
private void setInfoThreshold(TimeValue infoThreshold) {
|
||||
this.indexInfoThreshold = infoThreshold.nanos();
|
||||
}
|
||||
|
||||
private void setDebugThreshold(TimeValue debugThreshold) {
|
||||
this.indexDebugThreshold = debugThreshold.nanos();
|
||||
}
|
||||
|
||||
private void setTraceThreshold(TimeValue traceThreshold) {
|
||||
this.indexTraceThreshold = traceThreshold.nanos();
|
||||
}
|
||||
|
||||
private void setReformat(boolean reformat) {
|
||||
this.reformat = reformat;
|
||||
}
|
||||
|
||||
int maxSourceCharsToLog = readSourceToLog(settings);
|
||||
if (maxSourceCharsToLog != this.maxSourceCharsToLog) {
|
||||
this.maxSourceCharsToLog = maxSourceCharsToLog;
|
||||
}
|
||||
}
|
||||
|
||||
public void postIndex(Engine.Index index) {
|
||||
final long took = index.endTime() - index.startTime();
|
||||
postIndexing(index.parsedDoc(), took);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads how much of the source to log. The user can specify any value they
|
||||
* like and numbers are interpreted the maximum number of characters to log
|
||||
* and everything else is interpreted as Elasticsearch interprets booleans
|
||||
* which is then converted to 0 for false and Integer.MAX_VALUE for true.
|
||||
*/
|
||||
private int readSourceToLog(Settings settings) {
|
||||
String sourceToLog = settings.get(INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG, "1000");
|
||||
try {
|
||||
return Integer.parseInt(sourceToLog, 10);
|
||||
} catch (NumberFormatException e) {
|
||||
return Booleans.parseBoolean(sourceToLog, true) ? Integer.MAX_VALUE : 0;
|
||||
}
|
||||
}
|
||||
|
||||
private void postIndexing(ParsedDocument doc, long tookInNanos) {
|
||||
if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) {
|
||||
|
@ -194,4 +188,33 @@ public final class IndexingSlowLog implements IndexingOperationListener {
|
|||
return sb.toString();
|
||||
}
|
||||
}
|
||||
|
||||
boolean isReformat() {
|
||||
return reformat;
|
||||
}
|
||||
|
||||
long getIndexWarnThreshold() {
|
||||
return indexWarnThreshold;
|
||||
}
|
||||
|
||||
long getIndexInfoThreshold() {
|
||||
return indexInfoThreshold;
|
||||
}
|
||||
|
||||
long getIndexTraceThreshold() {
|
||||
return indexTraceThreshold;
|
||||
}
|
||||
|
||||
long getIndexDebugThreshold() {
|
||||
return indexDebugThreshold;
|
||||
}
|
||||
|
||||
int getMaxSourceCharsToLog() {
|
||||
return maxSourceCharsToLog;
|
||||
}
|
||||
|
||||
SlowLogLevel getLevel() {
|
||||
return level;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.lucene.index.MergePolicy;
|
|||
import org.apache.lucene.index.NoMergePolicy;
|
||||
import org.apache.lucene.index.TieredMergePolicy;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
||||
|
@ -117,7 +117,6 @@ public final class MergePolicyConfig {
|
|||
private final TieredMergePolicy mergePolicy = new TieredMergePolicy();
|
||||
private final ESLogger logger;
|
||||
private final boolean mergesEnabled;
|
||||
private volatile double noCFSRatio;
|
||||
|
||||
public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d;
|
||||
public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = new ByteSizeValue(2, ByteSizeUnit.MB);
|
||||
|
@ -126,35 +125,42 @@ public final class MergePolicyConfig {
|
|||
public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB);
|
||||
public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d;
|
||||
public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d;
|
||||
public static final String INDEX_COMPOUND_FORMAT = "index.compound_format";
|
||||
public static final Setting<Double> INDEX_COMPOUND_FORMAT_SETTING = new Setting<>("index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), MergePolicyConfig::parseNoCFSRatio, true, Setting.Scope.INDEX);
|
||||
|
||||
public static final String INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED = "index.merge.policy.expunge_deletes_allowed";
|
||||
public static final String INDEX_MERGE_POLICY_FLOOR_SEGMENT = "index.merge.policy.floor_segment";
|
||||
public static final String INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE = "index.merge.policy.max_merge_at_once";
|
||||
public static final String INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT = "index.merge.policy.max_merge_at_once_explicit";
|
||||
public static final String INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT = "index.merge.policy.max_merged_segment";
|
||||
public static final String INDEX_MERGE_POLICY_SEGMENTS_PER_TIER = "index.merge.policy.segments_per_tier";
|
||||
public static final String INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT = "index.merge.policy.reclaim_deletes_weight";
|
||||
public static final String INDEX_MERGE_ENABLED = "index.merge.enabled";
|
||||
public static final Setting<Double> INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING = Setting.doubleSetting("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d, true, Setting.Scope.INDEX);
|
||||
public static final Setting<ByteSizeValue> INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING = Setting.byteSizeSetting("index.merge.policy.floor_segment", DEFAULT_FLOOR_SEGMENT, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Integer> INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING = Setting.intSetting("index.merge.policy.max_merge_at_once", DEFAULT_MAX_MERGE_AT_ONCE, 2, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Integer> INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING = Setting.intSetting("index.merge.policy.max_merge_at_once_explicit", DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT, 2, true, Setting.Scope.INDEX);
|
||||
public static final Setting<ByteSizeValue> INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting("index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Double> INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING = Setting.doubleSetting("index.merge.policy.segments_per_tier", DEFAULT_SEGMENTS_PER_TIER, 2.0d, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Double> INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING = Setting.doubleSetting("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT, 0.0d, true, Setting.Scope.INDEX);
|
||||
public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; // don't convert to Setting<> and register... we only set this in tests and register via a plugin
|
||||
|
||||
|
||||
MergePolicyConfig(ESLogger logger, Settings indexSettings) {
|
||||
MergePolicyConfig(ESLogger logger, IndexSettings indexSettings) {
|
||||
this.logger = logger;
|
||||
this.noCFSRatio = parseNoCFSRatio(indexSettings.get(INDEX_COMPOUND_FORMAT, Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO)));
|
||||
double forceMergeDeletesPctAllowed = indexSettings.getAsDouble("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED); // percentage
|
||||
ByteSizeValue floorSegment = indexSettings.getAsBytesSize("index.merge.policy.floor_segment", DEFAULT_FLOOR_SEGMENT);
|
||||
int maxMergeAtOnce = indexSettings.getAsInt("index.merge.policy.max_merge_at_once", DEFAULT_MAX_MERGE_AT_ONCE);
|
||||
int maxMergeAtOnceExplicit = indexSettings.getAsInt("index.merge.policy.max_merge_at_once_explicit", DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_COMPOUND_FORMAT_SETTING, this::setNoCFSRatio);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, this::expungeDeletesAllowed);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, this::floorSegmentSetting);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, this::maxMergesAtOnce);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING, this::maxMergesAtOnceExplicit);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, this::maxMergedSegment);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, this::segmentsPerTier);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, this::reclaimDeletesWeight);
|
||||
double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage
|
||||
ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING);
|
||||
int maxMergeAtOnce = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING);
|
||||
int maxMergeAtOnceExplicit = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING);
|
||||
// TODO is this really a good default number for max_merge_segment, what happens for large indices, won't they end up with many segments?
|
||||
ByteSizeValue maxMergedSegment = indexSettings.getAsBytesSize("index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT);
|
||||
double segmentsPerTier = indexSettings.getAsDouble("index.merge.policy.segments_per_tier", DEFAULT_SEGMENTS_PER_TIER);
|
||||
double reclaimDeletesWeight = indexSettings.getAsDouble("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT);
|
||||
this.mergesEnabled = indexSettings.getAsBoolean(INDEX_MERGE_ENABLED, true);
|
||||
ByteSizeValue maxMergedSegment = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING);
|
||||
double segmentsPerTier = indexSettings.getValue(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING);
|
||||
double reclaimDeletesWeight = indexSettings.getValue(INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING);
|
||||
this.mergesEnabled = indexSettings.getSettings().getAsBoolean(INDEX_MERGE_ENABLED, true);
|
||||
if (mergesEnabled == false) {
|
||||
logger.warn("[{}] is set to false, this should only be used in tests and can cause serious problems in production environments", INDEX_MERGE_ENABLED);
|
||||
}
|
||||
maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier);
|
||||
mergePolicy.setNoCFSRatio(noCFSRatio);
|
||||
mergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING));
|
||||
mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed);
|
||||
mergePolicy.setFloorSegmentMB(floorSegment.mbFrac());
|
||||
mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce);
|
||||
|
@ -166,6 +172,38 @@ public final class MergePolicyConfig {
|
|||
forceMergeDeletesPctAllowed, floorSegment, maxMergeAtOnce, maxMergeAtOnceExplicit, maxMergedSegment, segmentsPerTier, reclaimDeletesWeight);
|
||||
}
|
||||
|
||||
private void reclaimDeletesWeight(Double reclaimDeletesWeight) {
|
||||
mergePolicy.setReclaimDeletesWeight(reclaimDeletesWeight);
|
||||
}
|
||||
|
||||
private void segmentsPerTier(Double segmentsPerTier) {
|
||||
mergePolicy.setSegmentsPerTier(segmentsPerTier);
|
||||
}
|
||||
|
||||
private void maxMergedSegment(ByteSizeValue maxMergedSegment) {
|
||||
mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.mbFrac());
|
||||
}
|
||||
|
||||
private void maxMergesAtOnceExplicit(Integer maxMergeAtOnceExplicit) {
|
||||
mergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
|
||||
}
|
||||
|
||||
private void maxMergesAtOnce(Integer maxMergeAtOnce) {
|
||||
mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce);
|
||||
}
|
||||
|
||||
private void floorSegmentSetting(ByteSizeValue floorSegementSetting) {
|
||||
mergePolicy.setFloorSegmentMB(floorSegementSetting.mbFrac());
|
||||
}
|
||||
|
||||
private void expungeDeletesAllowed(Double value) {
|
||||
mergePolicy.setForceMergeDeletesPctAllowed(value);
|
||||
}
|
||||
|
||||
private void setNoCFSRatio(Double noCFSRatio) {
|
||||
mergePolicy.setNoCFSRatio(noCFSRatio);
|
||||
}
|
||||
|
||||
private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerTier) {
|
||||
// fixing maxMergeAtOnce, see TieredMergePolicy#setMaxMergeAtOnce
|
||||
if (!(segmentsPerTier >= maxMergeAtOnce)) {
|
||||
|
@ -184,65 +222,6 @@ public final class MergePolicyConfig {
|
|||
return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE;
|
||||
}
|
||||
|
||||
void onRefreshSettings(Settings settings) {
|
||||
final double oldExpungeDeletesPctAllowed = mergePolicy.getForceMergeDeletesPctAllowed();
|
||||
final double expungeDeletesPctAllowed = settings.getAsDouble(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED, oldExpungeDeletesPctAllowed);
|
||||
if (expungeDeletesPctAllowed != oldExpungeDeletesPctAllowed) {
|
||||
logger.info("updating [expunge_deletes_allowed] from [{}] to [{}]", oldExpungeDeletesPctAllowed, expungeDeletesPctAllowed);
|
||||
mergePolicy.setForceMergeDeletesPctAllowed(expungeDeletesPctAllowed);
|
||||
}
|
||||
|
||||
final double oldFloorSegmentMB = mergePolicy.getFloorSegmentMB();
|
||||
final ByteSizeValue floorSegment = settings.getAsBytesSize(INDEX_MERGE_POLICY_FLOOR_SEGMENT, null);
|
||||
if (floorSegment != null && floorSegment.mbFrac() != oldFloorSegmentMB) {
|
||||
logger.info("updating [floor_segment] from [{}mb] to [{}]", oldFloorSegmentMB, floorSegment);
|
||||
mergePolicy.setFloorSegmentMB(floorSegment.mbFrac());
|
||||
}
|
||||
|
||||
final double oldSegmentsPerTier = mergePolicy.getSegmentsPerTier();
|
||||
final double segmentsPerTier = settings.getAsDouble(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, oldSegmentsPerTier);
|
||||
if (segmentsPerTier != oldSegmentsPerTier) {
|
||||
logger.info("updating [segments_per_tier] from [{}] to [{}]", oldSegmentsPerTier, segmentsPerTier);
|
||||
mergePolicy.setSegmentsPerTier(segmentsPerTier);
|
||||
}
|
||||
|
||||
final int oldMaxMergeAtOnce = mergePolicy.getMaxMergeAtOnce();
|
||||
int maxMergeAtOnce = settings.getAsInt(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, oldMaxMergeAtOnce);
|
||||
if (maxMergeAtOnce != oldMaxMergeAtOnce) {
|
||||
logger.info("updating [max_merge_at_once] from [{}] to [{}]", oldMaxMergeAtOnce, maxMergeAtOnce);
|
||||
maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier);
|
||||
mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce);
|
||||
}
|
||||
|
||||
final int oldMaxMergeAtOnceExplicit = mergePolicy.getMaxMergeAtOnceExplicit();
|
||||
final int maxMergeAtOnceExplicit = settings.getAsInt(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT, oldMaxMergeAtOnceExplicit);
|
||||
if (maxMergeAtOnceExplicit != oldMaxMergeAtOnceExplicit) {
|
||||
logger.info("updating [max_merge_at_once_explicit] from [{}] to [{}]", oldMaxMergeAtOnceExplicit, maxMergeAtOnceExplicit);
|
||||
mergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
|
||||
}
|
||||
|
||||
final double oldMaxMergedSegmentMB = mergePolicy.getMaxMergedSegmentMB();
|
||||
final ByteSizeValue maxMergedSegment = settings.getAsBytesSize(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT, null);
|
||||
if (maxMergedSegment != null && maxMergedSegment.mbFrac() != oldMaxMergedSegmentMB) {
|
||||
logger.info("updating [max_merged_segment] from [{}mb] to [{}]", oldMaxMergedSegmentMB, maxMergedSegment);
|
||||
mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.mbFrac());
|
||||
}
|
||||
|
||||
final double oldReclaimDeletesWeight = mergePolicy.getReclaimDeletesWeight();
|
||||
final double reclaimDeletesWeight = settings.getAsDouble(INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT, oldReclaimDeletesWeight);
|
||||
if (reclaimDeletesWeight != oldReclaimDeletesWeight) {
|
||||
logger.info("updating [reclaim_deletes_weight] from [{}] to [{}]", oldReclaimDeletesWeight, reclaimDeletesWeight);
|
||||
mergePolicy.setReclaimDeletesWeight(reclaimDeletesWeight);
|
||||
}
|
||||
|
||||
double noCFSRatio = parseNoCFSRatio(settings.get(INDEX_COMPOUND_FORMAT, Double.toString(MergePolicyConfig.this.noCFSRatio)));
|
||||
if (noCFSRatio != MergePolicyConfig.this.noCFSRatio) {
|
||||
logger.info("updating index.compound_format from [{}] to [{}]", formatNoCFSRatio(MergePolicyConfig.this.noCFSRatio), formatNoCFSRatio(noCFSRatio));
|
||||
mergePolicy.setNoCFSRatio(noCFSRatio);
|
||||
MergePolicyConfig.this.noCFSRatio = noCFSRatio;
|
||||
}
|
||||
}
|
||||
|
||||
private static double parseNoCFSRatio(String noCFSRatio) {
|
||||
noCFSRatio = noCFSRatio.trim();
|
||||
if (noCFSRatio.equalsIgnoreCase("true")) {
|
||||
|
@ -261,14 +240,4 @@ public final class MergePolicyConfig {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static String formatNoCFSRatio(double ratio) {
|
||||
if (ratio == 1.0) {
|
||||
return Boolean.TRUE.toString();
|
||||
} else if (ratio == 0.0) {
|
||||
return Boolean.FALSE.toString();
|
||||
} else {
|
||||
return Double.toString(ratio);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index;
|
||||
|
||||
import org.apache.lucene.index.ConcurrentMergeScheduler;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -52,18 +53,21 @@ import org.elasticsearch.index.IndexSettings;
|
|||
*/
|
||||
public final class MergeSchedulerConfig {
|
||||
|
||||
public static final String MAX_THREAD_COUNT = "index.merge.scheduler.max_thread_count";
|
||||
public static final String MAX_MERGE_COUNT = "index.merge.scheduler.max_merge_count";
|
||||
public static final String AUTO_THROTTLE = "index.merge.scheduler.auto_throttle";
|
||||
public static final Setting<Integer> MAX_THREAD_COUNT_SETTING = new Setting<>("index.merge.scheduler.max_thread_count", (s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))), (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"), true, Setting.Scope.INDEX);
|
||||
public static final Setting<Integer> MAX_MERGE_COUNT_SETTING = new Setting<>("index.merge.scheduler.max_merge_count", (s) -> Integer.toString(MAX_THREAD_COUNT_SETTING.get(s) + 5), (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_merge_count"), true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> AUTO_THROTTLE_SETTING = Setting.boolSetting("index.merge.scheduler.auto_throttle", true, true, Setting.Scope.INDEX);
|
||||
|
||||
private volatile boolean autoThrottle;
|
||||
private volatile int maxThreadCount;
|
||||
private volatile int maxMergeCount;
|
||||
|
||||
MergeSchedulerConfig(Settings settings) {
|
||||
maxThreadCount = settings.getAsInt(MAX_THREAD_COUNT, Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(settings) / 2)));
|
||||
maxMergeCount = settings.getAsInt(MAX_MERGE_COUNT, maxThreadCount + 5);
|
||||
this.autoThrottle = settings.getAsBoolean(AUTO_THROTTLE, true);
|
||||
MergeSchedulerConfig(IndexSettings indexSettings) {
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(MAX_THREAD_COUNT_SETTING, this::setMaxThreadCount);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(MAX_MERGE_COUNT_SETTING, this::setMaxMergeCount);
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(AUTO_THROTTLE_SETTING, this::setAutoThrottle);
|
||||
maxThreadCount = indexSettings.getValue(MAX_THREAD_COUNT_SETTING);
|
||||
maxMergeCount = indexSettings.getValue(MAX_MERGE_COUNT_SETTING);
|
||||
this.autoThrottle = indexSettings.getValue(AUTO_THROTTLE_SETTING);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,238 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
*/
|
||||
public final class SearchSlowLog {
|
||||
|
||||
private boolean reformat;
|
||||
|
||||
private long queryWarnThreshold;
|
||||
private long queryInfoThreshold;
|
||||
private long queryDebugThreshold;
|
||||
private long queryTraceThreshold;
|
||||
|
||||
private long fetchWarnThreshold;
|
||||
private long fetchInfoThreshold;
|
||||
private long fetchDebugThreshold;
|
||||
private long fetchTraceThreshold;
|
||||
|
||||
private SlowLogLevel level;
|
||||
|
||||
private final ESLogger queryLogger;
|
||||
private final ESLogger fetchLogger;
|
||||
|
||||
private static final String INDEX_SEARCH_SLOWLOG_PREFIX = "index.search.slowlog";
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_SEARCH_SLOWLOG_REFORMAT = Setting.boolSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".reformat", true, true, Setting.Scope.INDEX);
|
||||
public static final Setting<SlowLogLevel> INDEX_SEARCH_SLOWLOG_LEVEL = new Setting<>(INDEX_SEARCH_SLOWLOG_PREFIX + ".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, true, Setting.Scope.INDEX);
|
||||
|
||||
public SearchSlowLog(IndexSettings indexSettings) {
|
||||
|
||||
this.queryLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query");
|
||||
this.fetchLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch");
|
||||
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_REFORMAT, this::setReformat);
|
||||
this.reformat = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_REFORMAT);
|
||||
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING, this::setQueryWarnThreshold);
|
||||
this.queryWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING).nanos();
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING, this::setQueryInfoThreshold);
|
||||
this.queryInfoThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING).nanos();
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING, this::setQueryDebugThreshold);
|
||||
this.queryDebugThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING).nanos();
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING, this::setQueryTraceThreshold);
|
||||
this.queryTraceThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING).nanos();
|
||||
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING, this::setFetchWarnThreshold);
|
||||
this.fetchWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING).nanos();
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING, this::setFetchInfoThreshold);
|
||||
this.fetchInfoThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING).nanos();
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING, this::setFetchDebugThreshold);
|
||||
this.fetchDebugThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING).nanos();
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING, this::setFetchTraceThreshold);
|
||||
this.fetchTraceThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING).nanos();
|
||||
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_LEVEL, this::setLevel);
|
||||
setLevel(indexSettings.getValue(INDEX_SEARCH_SLOWLOG_LEVEL));
|
||||
}
|
||||
|
||||
private void setLevel(SlowLogLevel level) {
|
||||
this.level = level;
|
||||
this.queryLogger.setLevel(level.name());
|
||||
this.fetchLogger.setLevel(level.name());
|
||||
}
|
||||
|
||||
public void onQueryPhase(SearchContext context, long tookInNanos) {
|
||||
if (queryWarnThreshold >= 0 && tookInNanos > queryWarnThreshold) {
|
||||
queryLogger.warn("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
} else if (queryInfoThreshold >= 0 && tookInNanos > queryInfoThreshold) {
|
||||
queryLogger.info("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
} else if (queryDebugThreshold >= 0 && tookInNanos > queryDebugThreshold) {
|
||||
queryLogger.debug("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
} else if (queryTraceThreshold >= 0 && tookInNanos > queryTraceThreshold) {
|
||||
queryLogger.trace("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
}
|
||||
}
|
||||
|
||||
public void onFetchPhase(SearchContext context, long tookInNanos) {
|
||||
if (fetchWarnThreshold >= 0 && tookInNanos > fetchWarnThreshold) {
|
||||
fetchLogger.warn("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
} else if (fetchInfoThreshold >= 0 && tookInNanos > fetchInfoThreshold) {
|
||||
fetchLogger.info("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
} else if (fetchDebugThreshold >= 0 && tookInNanos > fetchDebugThreshold) {
|
||||
fetchLogger.debug("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
} else if (fetchTraceThreshold >= 0 && tookInNanos > fetchTraceThreshold) {
|
||||
fetchLogger.trace("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
}
|
||||
}
|
||||
|
||||
private static class SlowLogSearchContextPrinter {
|
||||
private final SearchContext context;
|
||||
private final long tookInNanos;
|
||||
private final boolean reformat;
|
||||
|
||||
public SlowLogSearchContextPrinter(SearchContext context, long tookInNanos, boolean reformat) {
|
||||
this.context = context;
|
||||
this.tookInNanos = tookInNanos;
|
||||
this.reformat = reformat;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], took_millis[").append(TimeUnit.NANOSECONDS.toMillis(tookInNanos)).append("], ");
|
||||
if (context.types() == null) {
|
||||
sb.append("types[], ");
|
||||
} else {
|
||||
sb.append("types[");
|
||||
Strings.arrayToDelimitedString(context.types(), ",", sb);
|
||||
sb.append("], ");
|
||||
}
|
||||
if (context.groupStats() == null) {
|
||||
sb.append("stats[], ");
|
||||
} else {
|
||||
sb.append("stats[");
|
||||
Strings.collectionToDelimitedString(context.groupStats(), ",", "", "", sb);
|
||||
sb.append("], ");
|
||||
}
|
||||
sb.append("search_type[").append(context.searchType()).append("], total_shards[").append(context.numberOfShards()).append("], ");
|
||||
if (context.request().source() != null) {
|
||||
sb.append("source[").append(context.request().source()).append("], ");
|
||||
} else {
|
||||
sb.append("source[], ");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
|
||||
private void setReformat(boolean reformat) {
|
||||
this.reformat = reformat;
|
||||
}
|
||||
|
||||
private void setQueryWarnThreshold(TimeValue warnThreshold) {
|
||||
this.queryWarnThreshold = warnThreshold.nanos();
|
||||
}
|
||||
|
||||
private void setQueryInfoThreshold(TimeValue infoThreshold) {
|
||||
this.queryInfoThreshold = infoThreshold.nanos();
|
||||
}
|
||||
|
||||
private void setQueryDebugThreshold(TimeValue debugThreshold) {
|
||||
this.queryDebugThreshold = debugThreshold.nanos();
|
||||
}
|
||||
|
||||
private void setQueryTraceThreshold(TimeValue traceThreshold) {
|
||||
this.queryTraceThreshold = traceThreshold.nanos();
|
||||
}
|
||||
|
||||
private void setFetchWarnThreshold(TimeValue warnThreshold) {
|
||||
this.fetchWarnThreshold = warnThreshold.nanos();
|
||||
}
|
||||
|
||||
private void setFetchInfoThreshold(TimeValue infoThreshold) {
|
||||
this.fetchInfoThreshold = infoThreshold.nanos();
|
||||
}
|
||||
|
||||
private void setFetchDebugThreshold(TimeValue debugThreshold) {
|
||||
this.fetchDebugThreshold = debugThreshold.nanos();
|
||||
}
|
||||
|
||||
private void setFetchTraceThreshold(TimeValue traceThreshold) {
|
||||
this.fetchTraceThreshold = traceThreshold.nanos();
|
||||
}
|
||||
|
||||
boolean isReformat() {
|
||||
return reformat;
|
||||
}
|
||||
|
||||
long getQueryWarnThreshold() {
|
||||
return queryWarnThreshold;
|
||||
}
|
||||
|
||||
long getQueryInfoThreshold() {
|
||||
return queryInfoThreshold;
|
||||
}
|
||||
|
||||
long getQueryDebugThreshold() {
|
||||
return queryDebugThreshold;
|
||||
}
|
||||
|
||||
long getQueryTraceThreshold() {
|
||||
return queryTraceThreshold;
|
||||
}
|
||||
|
||||
long getFetchWarnThreshold() {
|
||||
return fetchWarnThreshold;
|
||||
}
|
||||
|
||||
long getFetchInfoThreshold() {
|
||||
return fetchInfoThreshold;
|
||||
}
|
||||
|
||||
long getFetchDebugThreshold() {
|
||||
return fetchDebugThreshold;
|
||||
}
|
||||
|
||||
long getFetchTraceThreshold() {
|
||||
return fetchTraceThreshold;
|
||||
}
|
||||
|
||||
SlowLogLevel getLevel() {
|
||||
return level;
|
||||
}
|
||||
}
|
|
@ -16,18 +16,13 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index;
|
||||
|
||||
package org.elasticsearch.recovery;
|
||||
import java.util.Locale;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class SmallTranslogOpsRecoveryIT extends SimpleRecoveryIT {
|
||||
|
||||
@Override
|
||||
protected Settings recoverySettings() {
|
||||
return Settings.settingsBuilder().put("index.shard.recovery.translog_ops", 1).build();
|
||||
public enum SlowLogLevel {
|
||||
WARN, TRACE, INFO, DEBUG;
|
||||
public static SlowLogLevel parse(String level) {
|
||||
return valueOf(level.toUpperCase(Locale.ROOT));
|
||||
}
|
||||
}
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.cache.CacheBuilder;
|
|||
import org.elasticsearch.common.cache.RemovalListener;
|
||||
import org.elasticsearch.common.cache.RemovalNotification;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -69,7 +70,7 @@ import java.util.concurrent.Executor;
|
|||
*/
|
||||
public final class BitsetFilterCache extends AbstractIndexComponent implements LeafReader.CoreClosedListener, RemovalListener<Object, Cache<Query, BitsetFilterCache.Value>>, Closeable {
|
||||
|
||||
public static final String LOAD_RANDOM_ACCESS_FILTERS_EAGERLY = "index.load_fixed_bitset_filters_eagerly";
|
||||
public static final Setting<Boolean> INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING = Setting.boolSetting("index.load_fixed_bitset_filters_eagerly", true, false, Setting.Scope.INDEX);
|
||||
|
||||
private final boolean loadRandomAccessFiltersEagerly;
|
||||
private final Cache<Object, Cache<Query, Value>> loadedFilters;
|
||||
|
@ -82,7 +83,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
|
|||
if (listener == null) {
|
||||
throw new IllegalArgumentException("listener must not be null");
|
||||
}
|
||||
this.loadRandomAccessFiltersEagerly = this.indexSettings.getSettings().getAsBoolean(LOAD_RANDOM_ACCESS_FILTERS_EAGERLY, true);
|
||||
this.loadRandomAccessFiltersEagerly = this.indexSettings.getValue(INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING);
|
||||
this.loadedFilters = CacheBuilder.<Object, Cache<Query, Value>>builder().removalListener(this).build();
|
||||
this.warmer = new BitSetProducerWarmer();
|
||||
this.indicesWarmer = indicesWarmer;
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.index.SnapshotDeletionPolicy;
|
|||
import org.apache.lucene.search.QueryCache;
|
||||
import org.apache.lucene.search.QueryCachingPolicy;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
@ -38,6 +39,8 @@ import org.elasticsearch.index.translog.TranslogConfig;
|
|||
import org.elasticsearch.indices.IndexingMemoryController;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
/*
|
||||
* Holds all the configuration that is used to create an {@link Engine}.
|
||||
* Once {@link Engine} has been created with this object, changes to this
|
||||
|
@ -64,16 +67,30 @@ public final class EngineConfig {
|
|||
private final QueryCache queryCache;
|
||||
private final QueryCachingPolicy queryCachingPolicy;
|
||||
|
||||
static {
|
||||
|
||||
}
|
||||
/**
|
||||
* Index setting to change the low level lucene codec used for writing new segments.
|
||||
* This setting is <b>not</b> realtime updateable.
|
||||
*/
|
||||
public static final String INDEX_CODEC_SETTING = "index.codec";
|
||||
public static final Setting<String> INDEX_CODEC_SETTING = new Setting<>("index.codec", "default", (s) -> {
|
||||
switch(s) {
|
||||
case "default":
|
||||
case "best_compression":
|
||||
case "lucene_default":
|
||||
return s;
|
||||
default:
|
||||
if (Codec.availableCodecs().contains(s) == false) { // we don't error message the not officially supported ones
|
||||
throw new IllegalArgumentException("unknown value for [index.codec] must be one of [default, best_compression] but was: " + s);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
}, false, Setting.Scope.INDEX);
|
||||
|
||||
/** if set to true the engine will start even if the translog id in the commit point can not be found */
|
||||
public static final String INDEX_FORCE_NEW_TRANSLOG = "index.engine.force_new_translog";
|
||||
|
||||
private static final String DEFAULT_CODEC_NAME = "default";
|
||||
private TranslogConfig translogConfig;
|
||||
private boolean create = false;
|
||||
|
||||
|
@ -97,7 +114,7 @@ public final class EngineConfig {
|
|||
this.similarity = similarity;
|
||||
this.codecService = codecService;
|
||||
this.eventListener = eventListener;
|
||||
codecName = settings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME);
|
||||
codecName = indexSettings.getValue(INDEX_CODEC_SETTING);
|
||||
// We give IndexWriter a "huge" (256 MB) buffer, so it won't flush on its own unless the ES indexing buffer is also huge and/or
|
||||
// there are not too many shards allocated to this node. Instead, IndexingMemoryController periodically checks
|
||||
// and refreshes the most heap-consuming shards when total indexing heap usage across all shards is too high:
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.util.Accountable;
|
|||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.plain.AbstractGeoPointDVIndexFieldData;
|
||||
|
@ -55,9 +56,17 @@ import static java.util.Collections.unmodifiableMap;
|
|||
/**
|
||||
*/
|
||||
public class IndexFieldDataService extends AbstractIndexComponent implements Closeable {
|
||||
|
||||
public static final String FIELDDATA_CACHE_KEY = "index.fielddata.cache";
|
||||
public static final String FIELDDATA_CACHE_VALUE_NODE = "node";
|
||||
public static final String FIELDDATA_CACHE_KEY = "index.fielddata.cache";
|
||||
public static final Setting<String> INDEX_FIELDDATA_CACHE_KEY = new Setting<>(FIELDDATA_CACHE_KEY, (s) -> FIELDDATA_CACHE_VALUE_NODE, (s) -> {
|
||||
switch (s) {
|
||||
case "node":
|
||||
case "none":
|
||||
return s;
|
||||
default:
|
||||
throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,node]");
|
||||
}
|
||||
}, false, Setting.Scope.INDEX);
|
||||
|
||||
private static final IndexFieldData.Builder MISSING_DOC_VALUES_BUILDER = (indexProperties, fieldType, cache, breakerService, mapperService1) -> {
|
||||
throw new IllegalStateException("Can't load fielddata on [" + fieldType.name()
|
||||
|
@ -228,7 +237,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
|||
if (cache == null) {
|
||||
// we default to node level cache, which in turn defaults to be unbounded
|
||||
// this means changing the node level settings is simple, just set the bounds there
|
||||
String cacheType = type.getSettings().get("cache", indexSettings.getSettings().get(FIELDDATA_CACHE_KEY, FIELDDATA_CACHE_VALUE_NODE));
|
||||
String cacheType = type.getSettings().get("cache", indexSettings.getValue(INDEX_FIELDDATA_CACHE_KEY));
|
||||
if (FIELDDATA_CACHE_VALUE_NODE.equals(cacheType)) {
|
||||
cache = indicesFieldDataCache.buildIndexFieldDataCache(listener, index(), fieldName, type);
|
||||
} else if ("none".equals(cacheType)){
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.index.IndexOptions;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
|
@ -47,7 +48,8 @@ import java.util.Map;
|
|||
import java.util.stream.StreamSupport;
|
||||
|
||||
public abstract class FieldMapper extends Mapper implements Cloneable {
|
||||
|
||||
public static final Setting<Boolean> IGNORE_MALFORMED_SETTING = Setting.boolSetting("index.mapping.ignore_malformed", false, false, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> COERCE_SETTING = Setting.boolSetting("index.mapping.coerce", false, false, Setting.Scope.INDEX);
|
||||
public abstract static class Builder<T extends Builder, Y extends FieldMapper> extends Mapper.Builder<T, Y> {
|
||||
|
||||
protected final MappedFieldType fieldType;
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
|
@ -94,9 +95,9 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
}
|
||||
|
||||
public static final String DEFAULT_MAPPING = "_default_";
|
||||
public static final String INDEX_MAPPER_DYNAMIC_SETTING = "index.mapper.dynamic";
|
||||
public static final String INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = "index.mapping.nested_fields.limit";
|
||||
public static final Setting<Long> INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = Setting.longSetting("index.mapping.nested_fields.limit", 50l, 0, true, Setting.Scope.INDEX);
|
||||
public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true;
|
||||
public static final Setting<Boolean> INDEX_MAPPER_DYNAMIC_SETTING = Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, false, Setting.Scope.INDEX);
|
||||
private static ObjectHashSet<String> META_FIELDS = ObjectHashSet.from(
|
||||
"_uid", "_id", "_type", "_all", "_parent", "_routing", "_index",
|
||||
"_size", "_timestamp", "_ttl"
|
||||
|
@ -144,7 +145,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer());
|
||||
this.mapperRegistry = mapperRegistry;
|
||||
|
||||
this.dynamic = this.indexSettings.getSettings().getAsBoolean(INDEX_MAPPER_DYNAMIC_SETTING, INDEX_MAPPER_DYNAMIC_DEFAULT);
|
||||
this.dynamic = this.indexSettings.getValue(INDEX_MAPPER_DYNAMIC_SETTING);
|
||||
defaultPercolatorMappingSource = "{\n" +
|
||||
"\"_default_\":{\n" +
|
||||
"\"properties\" : {\n" +
|
||||
|
@ -419,7 +420,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
}
|
||||
|
||||
private void checkNestedFieldsLimit(Map<String, ObjectMapper> fullPathObjectMappers) {
|
||||
long allowedNestedFields = indexSettings.getSettings().getAsLong(INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING, 50L);
|
||||
long allowedNestedFields = indexSettings.getValue(INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING);
|
||||
long actualNestedFields = 0;
|
||||
for (ObjectMapper objectMapper : fullPathObjectMappers.values()) {
|
||||
if (objectMapper.nested().isNested()) {
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.lucene.index.IndexableFieldType;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -51,6 +52,7 @@ import java.util.List;
|
|||
*
|
||||
*/
|
||||
public abstract class NumberFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
|
||||
private static final Setting<Boolean> COERCE_SETTING = Setting.boolSetting("index.mapping.coerce", true, false, Setting.Scope.INDEX); // this is private since it has a different default
|
||||
|
||||
public static class Defaults {
|
||||
|
||||
|
@ -89,7 +91,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||
return new Explicit<>(ignoreMalformed, true);
|
||||
}
|
||||
if (context.indexSettings() != null) {
|
||||
return new Explicit<>(context.indexSettings().getAsBoolean("index.mapping.ignore_malformed", Defaults.IGNORE_MALFORMED.value()), false);
|
||||
return new Explicit<>(IGNORE_MALFORMED_SETTING.get(context.indexSettings()), false);
|
||||
}
|
||||
return Defaults.IGNORE_MALFORMED;
|
||||
}
|
||||
|
@ -104,7 +106,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||
return new Explicit<>(coerce, true);
|
||||
}
|
||||
if (context.indexSettings() != null) {
|
||||
return new Explicit<>(context.indexSettings().getAsBoolean("index.mapping.coerce", Defaults.COERCE.value()), false);
|
||||
return new Explicit<>(COERCE_SETTING.get(context.indexSettings()), false);
|
||||
}
|
||||
return Defaults.COERCE;
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -59,7 +60,6 @@ import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
|||
*/
|
||||
public abstract class BaseGeoPointFieldMapper extends FieldMapper implements ArrayValueMapperParser {
|
||||
public static final String CONTENT_TYPE = "geo_point";
|
||||
|
||||
public static class Names {
|
||||
public static final String LAT = "lat";
|
||||
public static final String LAT_SUFFIX = "." + LAT;
|
||||
|
@ -142,7 +142,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
return new Explicit<>(ignoreMalformed, true);
|
||||
}
|
||||
if (context.indexSettings() != null) {
|
||||
return new Explicit<>(context.indexSettings().getAsBoolean("index.mapping.ignore_malformed", Defaults.IGNORE_MALFORMED.value()), false);
|
||||
return new Explicit<>(IGNORE_MALFORMED_SETTING.get(context.indexSettings()), false);
|
||||
}
|
||||
return Defaults.IGNORE_MALFORMED;
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
|
|||
return new Explicit<>(coerce, true);
|
||||
}
|
||||
if (context.indexSettings() != null) {
|
||||
return new Explicit<>(context.indexSettings().getAsBoolean("index.mapping.coerce", Defaults.COERCE.value()), false);
|
||||
return new Explicit<>(COERCE_SETTING.get(context.indexSettings()), false);
|
||||
}
|
||||
return Defaults.COERCE;
|
||||
}
|
||||
|
|
|
@ -137,7 +137,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
return new Explicit<>(coerce, true);
|
||||
}
|
||||
if (context.indexSettings() != null) {
|
||||
return new Explicit<>(context.indexSettings().getAsBoolean("index.mapping.coerce", Defaults.COERCE.value()), false);
|
||||
return new Explicit<>(COERCE_SETTING.get(context.indexSettings()), false);
|
||||
}
|
||||
return Defaults.COERCE;
|
||||
}
|
||||
|
|
|
@ -119,7 +119,7 @@ public class PercolatorFieldMapper extends FieldMapper {
|
|||
this.queryShardContext = queryShardContext;
|
||||
this.queryTermsField = queryTermsField;
|
||||
this.unknownQueryField = unknownQueryField;
|
||||
this.mapUnmappedFieldAsString = indexSettings.getAsBoolean(PercolatorQueriesRegistry.MAP_UNMAPPED_FIELDS_AS_STRING, false);
|
||||
this.mapUnmappedFieldAsString = PercolatorQueriesRegistry.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.get(indexSettings);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.common.ParsingException;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
import org.elasticsearch.common.metrics.MeanMetric;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -60,7 +61,7 @@ import java.util.concurrent.TimeUnit;
|
|||
*/
|
||||
public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent implements Closeable {
|
||||
|
||||
public final static String MAP_UNMAPPED_FIELDS_AS_STRING = "index.percolator.map_unmapped_fields_as_string";
|
||||
public final static Setting<Boolean> INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING = Setting.boolSetting("index.percolator.map_unmapped_fields_as_string", false, false, Setting.Scope.INDEX);
|
||||
|
||||
private final ConcurrentMap<BytesRef, Query> percolateQueries = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
|
||||
private final QueryShardContext queryShardContext;
|
||||
|
@ -72,7 +73,7 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent
|
|||
public PercolatorQueriesRegistry(ShardId shardId, IndexSettings indexSettings, QueryShardContext queryShardContext) {
|
||||
super(shardId, indexSettings);
|
||||
this.queryShardContext = queryShardContext;
|
||||
this.mapUnmappedFieldsAsString = this.indexSettings.getSettings().getAsBoolean(MAP_UNMAPPED_FIELDS_AS_STRING, false);
|
||||
this.mapUnmappedFieldsAsString = indexSettings.getValue(INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING);
|
||||
}
|
||||
|
||||
public ConcurrentMap<BytesRef, Query> getPercolateQueries() {
|
||||
|
|
|
@ -1,198 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.search.stats;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
*/
|
||||
public final class SearchSlowLog {
|
||||
|
||||
private boolean reformat;
|
||||
|
||||
private long queryWarnThreshold;
|
||||
private long queryInfoThreshold;
|
||||
private long queryDebugThreshold;
|
||||
private long queryTraceThreshold;
|
||||
|
||||
private long fetchWarnThreshold;
|
||||
private long fetchInfoThreshold;
|
||||
private long fetchDebugThreshold;
|
||||
private long fetchTraceThreshold;
|
||||
|
||||
private String level;
|
||||
|
||||
private final ESLogger queryLogger;
|
||||
private final ESLogger fetchLogger;
|
||||
|
||||
private static final String INDEX_SEARCH_SLOWLOG_PREFIX = "index.search.slowlog";
|
||||
public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN = INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.warn";
|
||||
public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO = INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.info";
|
||||
public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG = INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.debug";
|
||||
public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE = INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.trace";
|
||||
public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN = INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.warn";
|
||||
public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO = INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.info";
|
||||
public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG = INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.debug";
|
||||
public static final String INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE = INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.trace";
|
||||
public static final String INDEX_SEARCH_SLOWLOG_REFORMAT = INDEX_SEARCH_SLOWLOG_PREFIX + ".reformat";
|
||||
public static final String INDEX_SEARCH_SLOWLOG_LEVEL = INDEX_SEARCH_SLOWLOG_PREFIX + ".level";
|
||||
|
||||
public SearchSlowLog(Settings indexSettings) {
|
||||
|
||||
this.reformat = indexSettings.getAsBoolean(INDEX_SEARCH_SLOWLOG_REFORMAT, true);
|
||||
|
||||
this.queryWarnThreshold = indexSettings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN, TimeValue.timeValueNanos(-1)).nanos();
|
||||
this.queryInfoThreshold = indexSettings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO, TimeValue.timeValueNanos(-1)).nanos();
|
||||
this.queryDebugThreshold = indexSettings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG, TimeValue.timeValueNanos(-1)).nanos();
|
||||
this.queryTraceThreshold = indexSettings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE, TimeValue.timeValueNanos(-1)).nanos();
|
||||
|
||||
this.fetchWarnThreshold = indexSettings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN, TimeValue.timeValueNanos(-1)).nanos();
|
||||
this.fetchInfoThreshold = indexSettings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO, TimeValue.timeValueNanos(-1)).nanos();
|
||||
this.fetchDebugThreshold = indexSettings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG, TimeValue.timeValueNanos(-1)).nanos();
|
||||
this.fetchTraceThreshold = indexSettings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE, TimeValue.timeValueNanos(-1)).nanos();
|
||||
|
||||
this.level = indexSettings.get(INDEX_SEARCH_SLOWLOG_LEVEL, "TRACE").toUpperCase(Locale.ROOT);
|
||||
|
||||
this.queryLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query");
|
||||
this.fetchLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch");
|
||||
|
||||
queryLogger.setLevel(level);
|
||||
fetchLogger.setLevel(level);
|
||||
}
|
||||
|
||||
void onQueryPhase(SearchContext context, long tookInNanos) {
|
||||
if (queryWarnThreshold >= 0 && tookInNanos > queryWarnThreshold) {
|
||||
queryLogger.warn("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
} else if (queryInfoThreshold >= 0 && tookInNanos > queryInfoThreshold) {
|
||||
queryLogger.info("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
} else if (queryDebugThreshold >= 0 && tookInNanos > queryDebugThreshold) {
|
||||
queryLogger.debug("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
} else if (queryTraceThreshold >= 0 && tookInNanos > queryTraceThreshold) {
|
||||
queryLogger.trace("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
}
|
||||
}
|
||||
|
||||
void onFetchPhase(SearchContext context, long tookInNanos) {
|
||||
if (fetchWarnThreshold >= 0 && tookInNanos > fetchWarnThreshold) {
|
||||
fetchLogger.warn("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
} else if (fetchInfoThreshold >= 0 && tookInNanos > fetchInfoThreshold) {
|
||||
fetchLogger.info("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
} else if (fetchDebugThreshold >= 0 && tookInNanos > fetchDebugThreshold) {
|
||||
fetchLogger.debug("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
} else if (fetchTraceThreshold >= 0 && tookInNanos > fetchTraceThreshold) {
|
||||
fetchLogger.trace("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
|
||||
}
|
||||
}
|
||||
|
||||
public void onRefreshSettings(Settings settings) {
|
||||
long queryWarnThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN, TimeValue.timeValueNanos(this.queryWarnThreshold)).nanos();
|
||||
if (queryWarnThreshold != this.queryWarnThreshold) {
|
||||
this.queryWarnThreshold = queryWarnThreshold;
|
||||
}
|
||||
long queryInfoThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO, TimeValue.timeValueNanos(this.queryInfoThreshold)).nanos();
|
||||
if (queryInfoThreshold != this.queryInfoThreshold) {
|
||||
this.queryInfoThreshold = queryInfoThreshold;
|
||||
}
|
||||
long queryDebugThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG, TimeValue.timeValueNanos(this.queryDebugThreshold)).nanos();
|
||||
if (queryDebugThreshold != this.queryDebugThreshold) {
|
||||
this.queryDebugThreshold = queryDebugThreshold;
|
||||
}
|
||||
long queryTraceThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE, TimeValue.timeValueNanos(this.queryTraceThreshold)).nanos();
|
||||
if (queryTraceThreshold != this.queryTraceThreshold) {
|
||||
this.queryTraceThreshold = queryTraceThreshold;
|
||||
}
|
||||
|
||||
long fetchWarnThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN, TimeValue.timeValueNanos(this.fetchWarnThreshold)).nanos();
|
||||
if (fetchWarnThreshold != this.fetchWarnThreshold) {
|
||||
this.fetchWarnThreshold = fetchWarnThreshold;
|
||||
}
|
||||
long fetchInfoThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO, TimeValue.timeValueNanos(this.fetchInfoThreshold)).nanos();
|
||||
if (fetchInfoThreshold != this.fetchInfoThreshold) {
|
||||
this.fetchInfoThreshold = fetchInfoThreshold;
|
||||
}
|
||||
long fetchDebugThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG, TimeValue.timeValueNanos(this.fetchDebugThreshold)).nanos();
|
||||
if (fetchDebugThreshold != this.fetchDebugThreshold) {
|
||||
this.fetchDebugThreshold = fetchDebugThreshold;
|
||||
}
|
||||
long fetchTraceThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE, TimeValue.timeValueNanos(this.fetchTraceThreshold)).nanos();
|
||||
if (fetchTraceThreshold != this.fetchTraceThreshold) {
|
||||
this.fetchTraceThreshold = fetchTraceThreshold;
|
||||
}
|
||||
|
||||
String level = settings.get(INDEX_SEARCH_SLOWLOG_LEVEL, this.level);
|
||||
if (!level.equals(this.level)) {
|
||||
this.queryLogger.setLevel(level.toUpperCase(Locale.ROOT));
|
||||
this.fetchLogger.setLevel(level.toUpperCase(Locale.ROOT));
|
||||
this.level = level;
|
||||
}
|
||||
|
||||
boolean reformat = settings.getAsBoolean(INDEX_SEARCH_SLOWLOG_REFORMAT, this.reformat);
|
||||
if (reformat != this.reformat) {
|
||||
this.reformat = reformat;
|
||||
}
|
||||
}
|
||||
|
||||
private static class SlowLogSearchContextPrinter {
|
||||
private final SearchContext context;
|
||||
private final long tookInNanos;
|
||||
private final boolean reformat;
|
||||
|
||||
public SlowLogSearchContextPrinter(SearchContext context, long tookInNanos, boolean reformat) {
|
||||
this.context = context;
|
||||
this.tookInNanos = tookInNanos;
|
||||
this.reformat = reformat;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], took_millis[").append(TimeUnit.NANOSECONDS.toMillis(tookInNanos)).append("], ");
|
||||
if (context.types() == null) {
|
||||
sb.append("types[], ");
|
||||
} else {
|
||||
sb.append("types[");
|
||||
Strings.arrayToDelimitedString(context.types(), ",", sb);
|
||||
sb.append("], ");
|
||||
}
|
||||
if (context.groupStats() == null) {
|
||||
sb.append("stats[], ");
|
||||
} else {
|
||||
sb.append("stats[");
|
||||
Strings.collectionToDelimitedString(context.groupStats(), ",", "", "", sb);
|
||||
sb.append("], ");
|
||||
}
|
||||
sb.append("search_type[").append(context.searchType()).append("], total_shards[").append(context.numberOfShards()).append("], ");
|
||||
if (context.request().source() != null) {
|
||||
sb.append("source[").append(context.request().source()).append("], ");
|
||||
} else {
|
||||
sb.append("source[], ");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.metrics.CounterMetric;
|
|||
import org.elasticsearch.common.metrics.MeanMetric;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.SearchSlowLog;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.util.HashMap;
|
||||
|
@ -179,10 +180,6 @@ public final class ShardSearchStats {
|
|||
totalStats.scrollMetric.inc(System.nanoTime() - context.getOriginNanoTime());
|
||||
}
|
||||
|
||||
public void onRefreshSettings(Settings settings) {
|
||||
slowLogSearchService.onRefreshSettings(settings);
|
||||
}
|
||||
|
||||
final static class StatsHolder {
|
||||
public final MeanMetric queryMetric = new MeanMetric();
|
||||
public final MeanMetric fetchMetric = new MeanMetric();
|
||||
|
|
|
@ -91,7 +91,7 @@ import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
|
|||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.recovery.RecoveryStats;
|
||||
import org.elasticsearch.index.refresh.RefreshStats;
|
||||
import org.elasticsearch.index.search.stats.SearchSlowLog;
|
||||
import org.elasticsearch.index.SearchSlowLog;
|
||||
import org.elasticsearch.index.search.stats.SearchStats;
|
||||
import org.elasticsearch.index.search.stats.ShardSearchStats;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
|
@ -237,13 +237,13 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
/* create engine config */
|
||||
logger.debug("state: [CREATED]");
|
||||
|
||||
this.checkIndexOnStartup = settings.get("index.shard.check_on_startup", "false");
|
||||
this.checkIndexOnStartup = indexSettings.getValue(IndexSettings.INDEX_CHECK_ON_STARTUP);
|
||||
this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings,
|
||||
provider.getBigArrays());
|
||||
final QueryCachingPolicy cachingPolicy;
|
||||
// the query cache is a node-level thing, however we want the most popular filters
|
||||
// to be computed on a per-shard basis
|
||||
if (settings.getAsBoolean(IndexModule.QUERY_CACHE_EVERYTHING, false)) {
|
||||
if (IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.get(settings)) {
|
||||
cachingPolicy = QueryCachingPolicy.ALWAYS_CACHE;
|
||||
} else {
|
||||
cachingPolicy = new UsageTrackingQueryCachingPolicy();
|
||||
|
@ -1208,7 +1208,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
BytesStreamOutput os = new BytesStreamOutput();
|
||||
PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name());
|
||||
|
||||
if ("checksum".equalsIgnoreCase(checkIndexOnStartup)) {
|
||||
if ("checksum".equals(checkIndexOnStartup)) {
|
||||
// physical verification only: verify all checksums for the latest commit
|
||||
IOException corrupt = null;
|
||||
MetadataSnapshot metadata = store.getMetadata();
|
||||
|
@ -1240,7 +1240,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
return;
|
||||
}
|
||||
logger.warn("check index [failure]\n{}", new String(os.bytes().toBytes(), StandardCharsets.UTF_8));
|
||||
if ("fix".equalsIgnoreCase(checkIndexOnStartup)) {
|
||||
if ("fix".equals(checkIndexOnStartup)) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("fixing index, writing new segments file ...");
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.elasticsearch.index.engine.EngineFactory;
|
|||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.search.stats.SearchSlowLog;
|
||||
import org.elasticsearch.index.SearchSlowLog;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.lucene.store.StoreRateLimiting;
|
|||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
|
@ -50,7 +51,16 @@ import java.util.Set;
|
|||
public class FsDirectoryService extends DirectoryService implements StoreRateLimiting.Listener, StoreRateLimiting.Provider {
|
||||
|
||||
protected final IndexStore indexStore;
|
||||
|
||||
public static final Setting<LockFactory> INDEX_LOCK_FACTOR_SETTING = new Setting<>("index.store.fs.fs_lock", "native", (s) -> {
|
||||
switch (s) {
|
||||
case "native":
|
||||
return NativeFSLockFactory.INSTANCE;
|
||||
case "simple":
|
||||
return SimpleFSLockFactory.INSTANCE;
|
||||
default:
|
||||
throw new IllegalArgumentException("unrecognized [index.store.fs.fs_lock] \"" + s + "\": must be native or simple");
|
||||
}
|
||||
}, false, Setting.Scope.INDEX);
|
||||
private final CounterMetric rateLimitingTimeInNanos = new CounterMetric();
|
||||
private final ShardPath path;
|
||||
|
||||
|
@ -71,29 +81,11 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim
|
|||
return indexStore.rateLimiting();
|
||||
}
|
||||
|
||||
public static LockFactory buildLockFactory(IndexSettings indexSettings) {
|
||||
final Settings settings = indexSettings.getSettings();
|
||||
String fsLock = settings.get("index.store.fs.lock", settings.get("index.store.fs.fs_lock", "native"));
|
||||
LockFactory lockFactory;
|
||||
if (fsLock.equals("native")) {
|
||||
lockFactory = NativeFSLockFactory.INSTANCE;
|
||||
} else if (fsLock.equals("simple")) {
|
||||
lockFactory = SimpleFSLockFactory.INSTANCE;
|
||||
} else {
|
||||
throw new IllegalArgumentException("unrecognized fs_lock \"" + fsLock + "\": must be native or simple");
|
||||
}
|
||||
return lockFactory;
|
||||
}
|
||||
|
||||
protected final LockFactory buildLockFactory() throws IOException {
|
||||
return buildLockFactory(indexSettings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Directory newDirectory() throws IOException {
|
||||
final Path location = path.resolveIndex();
|
||||
Files.createDirectories(location);
|
||||
Directory wrapped = newFSDirectory(location, buildLockFactory());
|
||||
Directory wrapped = newFSDirectory(location, indexSettings.getValue(INDEX_LOCK_FACTOR_SETTING));
|
||||
return new RateLimitedFSDirectory(wrapped, this, this) ;
|
||||
}
|
||||
|
||||
|
@ -112,7 +104,7 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim
|
|||
|
||||
|
||||
protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException {
|
||||
final String storeType = indexSettings.getSettings().get(IndexModule.STORE_TYPE, IndexModule.Type.DEFAULT.getSettingsKey());
|
||||
final String storeType = indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.DEFAULT.getSettingsKey());
|
||||
if (IndexModule.Type.FS.match(storeType) || IndexModule.Type.DEFAULT.match(storeType)) {
|
||||
final FSDirectory open = FSDirectory.open(location, lockFactory); // use lucene defaults
|
||||
if (open instanceof MMapDirectory && Constants.WINDOWS == false) {
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.store;
|
||||
|
||||
import org.apache.lucene.store.StoreRateLimiting;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
|
@ -29,32 +30,18 @@ import org.elasticsearch.index.shard.ShardPath;
|
|||
*
|
||||
*/
|
||||
public class IndexStore extends AbstractIndexComponent {
|
||||
|
||||
public static final String INDEX_STORE_THROTTLE_TYPE = "index.store.throttle.type";
|
||||
public static final String INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC = "index.store.throttle.max_bytes_per_sec";
|
||||
public static final Setting<StoreRateLimiting.Type> INDEX_STORE_THROTTLE_TYPE_SETTING = new Setting<>("index.store.throttle.type", "none", StoreRateLimiting.Type::fromString, true, Setting.Scope.INDEX) ;
|
||||
public static final Setting<ByteSizeValue> INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("index.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.INDEX);
|
||||
|
||||
protected final IndexStoreConfig indexStoreConfig;
|
||||
private volatile String rateLimitingType;
|
||||
private volatile ByteSizeValue rateLimitingThrottle;
|
||||
private volatile boolean nodeRateLimiting;
|
||||
|
||||
private final StoreRateLimiting rateLimiting = new StoreRateLimiting();
|
||||
|
||||
public IndexStore(IndexSettings indexSettings, IndexStoreConfig indexStoreConfig) {
|
||||
super(indexSettings);
|
||||
this.indexStoreConfig = indexStoreConfig;
|
||||
|
||||
this.rateLimitingType = indexSettings.getSettings().get(INDEX_STORE_THROTTLE_TYPE, "none");
|
||||
if (rateLimitingType.equalsIgnoreCase("node")) {
|
||||
nodeRateLimiting = true;
|
||||
} else {
|
||||
nodeRateLimiting = false;
|
||||
rateLimiting.setType(rateLimitingType);
|
||||
}
|
||||
this.rateLimitingThrottle = indexSettings.getSettings().getAsBytesSize(INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(0));
|
||||
rateLimiting.setMaxRate(rateLimitingThrottle);
|
||||
|
||||
logger.debug("using index.store.throttle.type [{}], with index.store.throttle.max_bytes_per_sec [{}]", rateLimitingType, rateLimitingThrottle);
|
||||
rateLimiting.setType(indexSettings.getValue(INDEX_STORE_THROTTLE_TYPE_SETTING));
|
||||
rateLimiting.setMaxRate(indexSettings.getValue(INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING));
|
||||
logger.debug("using index.store.throttle.type [{}], with index.store.throttle.max_bytes_per_sec [{}]", rateLimiting.getType(), rateLimiting.getRateLimiter());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -62,7 +49,7 @@ public class IndexStore extends AbstractIndexComponent {
|
|||
* the node level one (defaults to the node level one).
|
||||
*/
|
||||
public StoreRateLimiting rateLimiting() {
|
||||
return nodeRateLimiting ? indexStoreConfig.getNodeRateLimiter() : this.rateLimiting;
|
||||
return rateLimiting.getType() == StoreRateLimiting.Type.NONE ? indexStoreConfig.getNodeRateLimiter() : this.rateLimiting;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -72,26 +59,11 @@ public class IndexStore extends AbstractIndexComponent {
|
|||
return new FsDirectoryService(indexSettings, this, path);
|
||||
}
|
||||
|
||||
public void onRefreshSettings(Settings settings) {
|
||||
String rateLimitingType = settings.get(INDEX_STORE_THROTTLE_TYPE, IndexStore.this.rateLimitingType);
|
||||
if (!rateLimitingType.equals(IndexStore.this.rateLimitingType)) {
|
||||
logger.info("updating index.store.throttle.type from [{}] to [{}]", IndexStore.this.rateLimitingType, rateLimitingType);
|
||||
if (rateLimitingType.equalsIgnoreCase("node")) {
|
||||
IndexStore.this.rateLimitingType = rateLimitingType;
|
||||
IndexStore.this.nodeRateLimiting = true;
|
||||
} else {
|
||||
StoreRateLimiting.Type.fromString(rateLimitingType);
|
||||
IndexStore.this.rateLimitingType = rateLimitingType;
|
||||
IndexStore.this.nodeRateLimiting = false;
|
||||
IndexStore.this.rateLimiting.setType(rateLimitingType);
|
||||
}
|
||||
public void setType(StoreRateLimiting.Type type) {
|
||||
rateLimiting.setType(type);
|
||||
}
|
||||
|
||||
ByteSizeValue rateLimitingThrottle = settings.getAsBytesSize(INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, IndexStore.this.rateLimitingThrottle);
|
||||
if (!rateLimitingThrottle.equals(IndexStore.this.rateLimitingThrottle)) {
|
||||
logger.info("updating index.store.throttle.max_bytes_per_sec from [{}] to [{}], note, type is [{}]", IndexStore.this.rateLimitingThrottle, rateLimitingThrottle, IndexStore.this.rateLimitingType);
|
||||
IndexStore.this.rateLimitingThrottle = rateLimitingThrottle;
|
||||
IndexStore.this.rateLimiting.setMaxRate(rateLimitingThrottle);
|
||||
}
|
||||
public void setMaxRate(ByteSizeValue rate) {
|
||||
rateLimiting.setMaxRate(rate);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,6 +61,7 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.store.ByteArrayIndexInput;
|
||||
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.Callback;
|
||||
|
@ -81,6 +82,7 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.nio.file.Path;
|
||||
import java.sql.Time;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
|
@ -126,7 +128,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
static final int VERSION_START = 0;
|
||||
static final int VERSION = VERSION_WRITE_THROWABLE;
|
||||
static final String CORRUPTED = "corrupted_";
|
||||
public static final String INDEX_STORE_STATS_REFRESH_INTERVAL = "index.store.stats_refresh_interval";
|
||||
public static final Setting<TimeValue> INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.store.stats_refresh_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.INDEX);
|
||||
|
||||
private final AtomicBoolean isClosed = new AtomicBoolean(false);
|
||||
private final StoreDirectory directory;
|
||||
|
@ -154,7 +156,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
this.directory = new StoreDirectory(directoryService.newDirectory(), Loggers.getLogger("index.store.deletes", settings, shardId));
|
||||
this.shardLock = shardLock;
|
||||
this.onClose = onClose;
|
||||
final TimeValue refreshInterval = settings.getAsTime(INDEX_STORE_STATS_REFRESH_INTERVAL, TimeValue.timeValueSeconds(10));
|
||||
final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING);
|
||||
this.statsCache = new StoreStatsCache(refreshInterval, directory, directoryService);
|
||||
logger.debug("store stats are refreshed with refresh_interval [{}]", refreshInterval);
|
||||
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.store;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.env.ShardLock;
|
||||
import org.elasticsearch.index.shard.ShardPath;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class StoreModule extends AbstractModule {
|
||||
private final ShardLock lock;
|
||||
private final Store.OnClose closeCallback;
|
||||
private final ShardPath path;
|
||||
private final Class<? extends DirectoryService> shardDirectory;
|
||||
|
||||
|
||||
public StoreModule(Class<? extends DirectoryService> shardDirectory, ShardLock lock, Store.OnClose closeCallback, ShardPath path) {
|
||||
this.shardDirectory = shardDirectory;
|
||||
this.lock = lock;
|
||||
this.closeCallback = closeCallback;
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(DirectoryService.class).to(shardDirectory).asEagerSingleton();
|
||||
bind(Store.class).asEagerSingleton();
|
||||
bind(ShardLock.class).toInstance(lock);
|
||||
bind(Store.OnClose.class).toInstance(closeCallback);
|
||||
bind(ShardPath.class).toInstance(path);
|
||||
}
|
||||
}
|
|
@ -77,6 +77,7 @@ import java.util.Map;
|
|||
* Configures classes and services that are shared by indices on each node.
|
||||
*/
|
||||
public class IndicesModule extends AbstractModule {
|
||||
|
||||
private final Map<String, Mapper.TypeParser> mapperParsers
|
||||
= new LinkedHashMap<>();
|
||||
// Use a LinkedHashMap for metadataMappers because iteration order matters
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
|
@ -70,7 +71,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
|
@ -101,6 +101,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
private final IndicesQueriesRegistry indicesQueriesRegistry;
|
||||
private final ClusterService clusterService;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
private final IndexScopedSettings indexScopeSetting;
|
||||
private volatile Map<String, IndexService> indices = emptyMap();
|
||||
private final Map<Index, List<PendingDelete>> pendingDeletes = new HashMap<>();
|
||||
private final OldShardsStats oldShardsStats = new OldShardsStats();
|
||||
|
@ -116,7 +117,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvironment nodeEnv,
|
||||
ClusterSettings clusterSettings, AnalysisRegistry analysisRegistry,
|
||||
IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
ClusterService clusterService, MapperRegistry mapperRegistry, ThreadPool threadPool) {
|
||||
ClusterService clusterService, MapperRegistry mapperRegistry, ThreadPool threadPool, IndexScopedSettings indexScopedSettings) {
|
||||
super(settings);
|
||||
this.pluginsService = pluginsService;
|
||||
this.nodeEnv = nodeEnv;
|
||||
|
@ -130,6 +131,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, indexStoreConfig::setRateLimitingType);
|
||||
clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, indexStoreConfig::setRateLimitingThrottle);
|
||||
indexingMemoryController = new IndexingMemoryController(settings, threadPool, this);
|
||||
this.indexScopeSetting = indexScopedSettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -280,7 +282,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
}
|
||||
final String indexName = indexMetaData.getIndex();
|
||||
final Predicate<String> indexNameMatcher = (indexExpression) -> indexNameExpressionResolver.matchesIndex(indexName, indexExpression, clusterService.state());
|
||||
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, Collections.emptyList(), indexNameMatcher);
|
||||
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexNameMatcher, indexScopeSetting);
|
||||
Index index = new Index(indexMetaData.getIndex());
|
||||
if (indices.containsKey(index.name())) {
|
||||
throw new IndexAlreadyExistsException(index);
|
||||
|
@ -570,7 +572,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
// play safe here and make sure that we take node level settings into account.
|
||||
// we might run on nodes where we use shard FS and then in the future don't delete
|
||||
// actual content.
|
||||
return new IndexSettings(metaData, settings, Collections.emptyList());
|
||||
return new IndexSettings(metaData, settings);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -39,8 +39,6 @@ import java.util.concurrent.TimeUnit;
|
|||
*/
|
||||
public final class IndicesWarmer extends AbstractComponent {
|
||||
|
||||
public static final String INDEX_WARMER_ENABLED = "index.warmer.enabled";
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private final CopyOnWriteArrayList<Listener> listeners = new CopyOnWriteArrayList<>();
|
||||
|
@ -62,8 +60,7 @@ public final class IndicesWarmer extends AbstractComponent {
|
|||
if (shard.state() == IndexShardState.CLOSED) {
|
||||
return;
|
||||
}
|
||||
final Settings indexSettings = settings.getSettings();
|
||||
if (!indexSettings.getAsBoolean(INDEX_WARMER_ENABLED, settings.getNodeSettings().getAsBoolean(INDEX_WARMER_ENABLED, true))) {
|
||||
if (settings.isWarmerEnabled() == false) {
|
||||
return;
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
|
|
|
@ -78,7 +78,7 @@ public final class AnalysisModule extends AbstractModule {
|
|||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.build();
|
||||
IndexMetaData metaData = IndexMetaData.builder("_na_").settings(build).build();
|
||||
NA_INDEX_SETTINGS = new IndexSettings(metaData, Settings.EMPTY, Collections.emptyList());
|
||||
NA_INDEX_SETTINGS = new IndexSettings(metaData, Settings.EMPTY);
|
||||
}
|
||||
private static final IndexSettings NA_INDEX_SETTINGS;
|
||||
private final Environment environment;
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.MemorySizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -78,7 +79,7 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis
|
|||
* A setting to enable or disable request caching on an index level. Its dynamic by default
|
||||
* since we are checking on the cluster state IndexMetaData always.
|
||||
*/
|
||||
public static final String INDEX_CACHE_REQUEST_ENABLED = "index.requests.cache.enable";
|
||||
public static final Setting<Boolean> INDEX_CACHE_REQUEST_ENABLED_SETTING = Setting.boolSetting("index.requests.cache.enable", true, true, Setting.Scope.INDEX);
|
||||
public static final String INDICES_CACHE_REQUEST_CLEAN_INTERVAL = "indices.requests.cache.clean_interval";
|
||||
|
||||
public static final String INDICES_CACHE_QUERY_SIZE = "indices.requests.cache.size";
|
||||
|
@ -118,9 +119,6 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis
|
|||
threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, reaper);
|
||||
}
|
||||
|
||||
private boolean isCacheEnabled(Settings settings, boolean defaultEnable) {
|
||||
return settings.getAsBoolean(INDEX_CACHE_REQUEST_ENABLED, defaultEnable);
|
||||
}
|
||||
|
||||
private void buildCache() {
|
||||
long sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size, INDICES_CACHE_QUERY_SIZE).bytes();
|
||||
|
@ -182,7 +180,7 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis
|
|||
}
|
||||
// if not explicitly set in the request, use the index setting, if not, use the request
|
||||
if (request.requestCache() == null) {
|
||||
if (!isCacheEnabled(index.getSettings(), Boolean.FALSE)) {
|
||||
if (INDEX_CACHE_REQUEST_ENABLED_SETTING.get(index.getSettings()) == false) {
|
||||
return false;
|
||||
}
|
||||
} else if (!request.requestCache()) {
|
||||
|
|
|
@ -257,7 +257,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
} else {
|
||||
final IndexMetaData metaData = previousState.metaData().index(index);
|
||||
assert metaData != null;
|
||||
indexSettings = new IndexSettings(metaData, settings, Collections.emptyList());
|
||||
indexSettings = new IndexSettings(metaData, settings);
|
||||
indicesService.deleteClosedIndex("closed index no longer part of the metadata", metaData, event.state());
|
||||
}
|
||||
try {
|
||||
|
|
|
@ -111,7 +111,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
|||
}
|
||||
|
||||
for (IndexRoutingTable indexRoutingTable : event.state().routingTable()) {
|
||||
IndexSettings indexSettings = new IndexSettings(event.state().getMetaData().index(indexRoutingTable.index()), settings, Collections.emptyList());
|
||||
IndexSettings indexSettings = new IndexSettings(event.state().getMetaData().index(indexRoutingTable.index()), settings);
|
||||
// Note, closed indices will not have any routing information, so won't be deleted
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
if (shardCanBeDeleted(event.state(), indexShardRoutingTable)) {
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -171,11 +172,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
|
|||
if (metaData == null) {
|
||||
return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
|
||||
}
|
||||
String storeType = metaData.getSettings().get(IndexModule.STORE_TYPE, "fs");
|
||||
if (!storeType.contains("fs")) {
|
||||
return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
|
||||
}
|
||||
final IndexSettings indexSettings = indexService != null ? indexService.getIndexSettings() : new IndexSettings(metaData, settings, Collections.emptyList());
|
||||
final IndexSettings indexSettings = indexService != null ? indexService.getIndexSettings() : new IndexSettings(metaData, settings);
|
||||
final ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings);
|
||||
if (shardPath == null) {
|
||||
return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
|
||||
|
|
|
@ -68,7 +68,6 @@ import java.util.concurrent.locks.ReentrantLock;
|
|||
public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLService> {
|
||||
|
||||
public static final Setting<TimeValue> INDICES_TTL_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.ttl.interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER);
|
||||
public static final String INDEX_TTL_DISABLE_PURGE = "index.ttl.disable_purge";
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final IndicesService indicesService;
|
||||
|
@ -164,8 +163,7 @@ public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLServ
|
|||
if (indexMetaData == null) {
|
||||
continue;
|
||||
}
|
||||
boolean disablePurge = indexMetaData.getSettings().getAsBoolean(INDEX_TTL_DISABLE_PURGE, false);
|
||||
if (disablePurge) {
|
||||
if (indexService.getIndexSettings().isTTLPurgeDisabled()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ public class RestClusterGetSettingsAction extends BaseRestHandler {
|
|||
ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest()
|
||||
.routingTable(false)
|
||||
.nodes(false);
|
||||
final boolean renderDefaults = request.paramAsBoolean("defaults", false);
|
||||
final boolean renderDefaults = request.paramAsBoolean("include_defaults", false);
|
||||
clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
|
||||
client.admin().cluster().state(clusterStateRequest, new RestBuilderListener<ClusterStateResponse>(channel) {
|
||||
@Override
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.cluster.metadata.MappingMetaData;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent.Params;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -52,9 +53,12 @@ import static org.elasticsearch.rest.RestStatus.OK;
|
|||
*/
|
||||
public class RestGetIndicesAction extends BaseRestHandler {
|
||||
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
|
||||
@Inject
|
||||
public RestGetIndicesAction(Settings settings, RestController controller, Client client) {
|
||||
public RestGetIndicesAction(Settings settings, RestController controller, Client client, IndexScopedSettings indexScopedSettings) {
|
||||
super(settings, controller, client);
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
controller.registerHandler(GET, "/{index}", this);
|
||||
controller.registerHandler(GET, "/{index}/{type}", this);
|
||||
}
|
||||
|
@ -133,9 +137,15 @@ public class RestGetIndicesAction extends BaseRestHandler {
|
|||
}
|
||||
|
||||
private void writeSettings(Settings settings, XContentBuilder builder, Params params) throws IOException {
|
||||
final boolean renderDefaults = request.paramAsBoolean("include_defaults", false);
|
||||
builder.startObject(Fields.SETTINGS);
|
||||
settings.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
if (renderDefaults) {
|
||||
builder.startObject("defaults");
|
||||
indexScopedSettings.diff(settings, settings).toXContent(builder, request);
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
|
@ -145,7 +155,6 @@ public class RestGetIndicesAction extends BaseRestHandler {
|
|||
static final XContentBuilderString ALIASES = new XContentBuilderString("aliases");
|
||||
static final XContentBuilderString MAPPINGS = new XContentBuilderString("mappings");
|
||||
static final XContentBuilderString SETTINGS = new XContentBuilderString("settings");
|
||||
static final XContentBuilderString WARMERS = new XContentBuilderString("warmers");
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,9 +26,9 @@ import org.elasticsearch.action.support.IndicesOptions;
|
|||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
|
@ -42,9 +42,12 @@ import static org.elasticsearch.rest.RestStatus.OK;
|
|||
|
||||
public class RestGetSettingsAction extends BaseRestHandler {
|
||||
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
|
||||
@Inject
|
||||
public RestGetSettingsAction(Settings settings, RestController controller, Client client) {
|
||||
public RestGetSettingsAction(Settings settings, RestController controller, Client client, IndexScopedSettings indexScopedSettings) {
|
||||
super(settings, controller, client);
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
controller.registerHandler(GET, "/{index}/_settings/{name}", this);
|
||||
controller.registerHandler(GET, "/_settings/{name}", this);
|
||||
controller.registerHandler(GET, "/{index}/_setting/{name}", this);
|
||||
|
@ -53,6 +56,7 @@ public class RestGetSettingsAction extends BaseRestHandler {
|
|||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
|
||||
final String[] names = request.paramAsStringArrayOrEmptyIfAll("name");
|
||||
final boolean renderDefaults = request.paramAsBoolean("include_defaults", false);
|
||||
GetSettingsRequest getSettingsRequest = new GetSettingsRequest()
|
||||
.indices(Strings.splitStringByCommaToArray(request.param("index")))
|
||||
.indicesOptions(IndicesOptions.fromRequest(request, IndicesOptions.strictExpandOpen()))
|
||||
|
@ -71,9 +75,14 @@ public class RestGetSettingsAction extends BaseRestHandler {
|
|||
continue;
|
||||
}
|
||||
builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.startObject(Fields.SETTINGS);
|
||||
builder.startObject("settings");
|
||||
cursor.value.toXContent(builder, request);
|
||||
builder.endObject();
|
||||
if (renderDefaults) {
|
||||
builder.startObject("defaults");
|
||||
indexScopedSettings.diff(cursor.value, settings).toXContent(builder, request);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
|
@ -81,8 +90,4 @@ public class RestGetSettingsAction extends BaseRestHandler {
|
|||
}
|
||||
});
|
||||
}
|
||||
|
||||
static class Fields {
|
||||
static final XContentBuilderString SETTINGS = new XContentBuilderString("settings");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -121,7 +121,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
|
|||
*/
|
||||
public class SearchService extends AbstractLifecycleComponent<SearchService> implements IndexEventListener {
|
||||
|
||||
public static final String NORMS_LOADING_KEY = "index.norms.loading";
|
||||
public static final Setting<Loading> INDEX_NORMS_LOADING_SETTING = new Setting<>("index.norms.loading", Loading.LAZY.toString(), (s) -> Loading.parse(s, Loading.LAZY), false, Setting.Scope.INDEX);
|
||||
public static final String DEFAULT_KEEPALIVE_KEY = "search.default_keep_alive";
|
||||
public static final String KEEPALIVE_INTERVAL_KEY = "search.keep_alive_interval";
|
||||
|
||||
|
@ -962,7 +962,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
}
|
||||
@Override
|
||||
public TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
final Loading defaultLoading = Loading.parse(indexShard.getIndexSettings().getSettings().get(NORMS_LOADING_KEY), Loading.LAZY);
|
||||
final Loading defaultLoading = indexShard.indexSettings().getValue(INDEX_NORMS_LOADING_SETTING);
|
||||
final MapperService mapperService = indexShard.mapperService();
|
||||
final ObjectSet<String> warmUp = new ObjectHashSet<>();
|
||||
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.lucene.search.function.WeightFactorFunction;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
|
@ -78,20 +79,6 @@ import java.util.Map;
|
|||
*
|
||||
*/
|
||||
public class DefaultSearchContext extends SearchContext {
|
||||
/**
|
||||
* Index setting describing the maximum value of from + size on a query.
|
||||
*/
|
||||
public static final String MAX_RESULT_WINDOW = "index.max_result_window";
|
||||
public static class Defaults {
|
||||
/**
|
||||
* Default maximum value of from + size on a query. 10,000 was chosen as
|
||||
* a conservative default as it is sure to not cause trouble. Users can
|
||||
* certainly profile their cluster and decide to set it to 100,000
|
||||
* safely. 1,000,000 is probably way to high for any cluster to set
|
||||
* safely.
|
||||
*/
|
||||
public static final int MAX_RESULT_WINDOW = 10000;
|
||||
}
|
||||
|
||||
private final long id;
|
||||
private final ShardSearchRequest request;
|
||||
|
@ -202,13 +189,13 @@ public class DefaultSearchContext extends SearchContext {
|
|||
long resultWindow = from + size;
|
||||
// We need settingsService's view of the settings because its dynamic.
|
||||
// indexService's isn't.
|
||||
int maxResultWindow = indexService.getIndexSettings().getSettings().getAsInt(MAX_RESULT_WINDOW, Defaults.MAX_RESULT_WINDOW);
|
||||
int maxResultWindow = indexService.getIndexSettings().getMaxResultWindow();
|
||||
|
||||
if (resultWindow > maxResultWindow) {
|
||||
throw new QueryPhaseExecutionException(this,
|
||||
"Result window is too large, from + size must be less than or equal to: [" + maxResultWindow + "] but was ["
|
||||
+ resultWindow + "]. See the scroll api for a more efficient way to request large data sets. "
|
||||
+ "This limit can be set by changing the [" + DefaultSearchContext.MAX_RESULT_WINDOW
|
||||
+ "This limit can be set by changing the [" + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey()
|
||||
+ "] index level parameter.");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ public class ClusterStatsIT extends ESIntegTestCase {
|
|||
public void testValuesSmokeScreen() throws IOException {
|
||||
internalCluster().ensureAtMostNumDataNodes(5);
|
||||
internalCluster().ensureAtLeastNumDataNodes(1);
|
||||
assertAcked(prepareCreate("test1").setSettings(settingsBuilder().put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL, 0).build()));
|
||||
assertAcked(prepareCreate("test1").setSettings(settingsBuilder().put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0).build()));
|
||||
index("test1", "type", "1", "f", "f");
|
||||
/*
|
||||
* Ensure at least one shard is allocated otherwise the FS stats might
|
||||
|
|
|
@ -52,19 +52,13 @@ import static org.hamcrest.core.IsNull.notNullValue;
|
|||
|
||||
@ClusterScope(scope = Scope.TEST)
|
||||
public class CreateIndexIT extends ESIntegTestCase {
|
||||
public void testCreationDateGiven() {
|
||||
public void testCreationDateGivenFails() {
|
||||
try {
|
||||
prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 4l)).get();
|
||||
ClusterStateResponse response = client().admin().cluster().prepareState().get();
|
||||
ClusterState state = response.getState();
|
||||
assertThat(state, notNullValue());
|
||||
MetaData metadata = state.getMetaData();
|
||||
assertThat(metadata, notNullValue());
|
||||
ImmutableOpenMap<String, IndexMetaData> indices = metadata.getIndices();
|
||||
assertThat(indices, notNullValue());
|
||||
assertThat(indices.size(), equalTo(1));
|
||||
IndexMetaData index = indices.get("test");
|
||||
assertThat(index, notNullValue());
|
||||
assertThat(index.getCreationDate(), equalTo(4l));
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("unknown setting [index.creation_date]", ex.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testCreationDateGenerated() {
|
||||
|
@ -112,41 +106,27 @@ public class CreateIndexIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testInvalidShardCountSettings() throws Exception {
|
||||
int value = randomIntBetween(-10, 0);
|
||||
try {
|
||||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(-10, 0))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, value)
|
||||
.build())
|
||||
.get();
|
||||
fail("should have thrown an exception about the primary shard count");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat("message contains error about shard count: " + e.getMessage(),
|
||||
e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true));
|
||||
assertEquals("Failed to parse value [" + value + "] for setting [index.number_of_shards] must be >= 1", e.getMessage());
|
||||
}
|
||||
|
||||
value = randomIntBetween(-10, -1);
|
||||
try {
|
||||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, value)
|
||||
.build())
|
||||
.get();
|
||||
fail("should have thrown an exception about the replica shard count");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat("message contains error about shard count: " + e.getMessage(),
|
||||
e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true));
|
||||
assertEquals("Failed to parse value [" + value + "] for setting [index.number_of_replicas] must be >= 0", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(-10, 0))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1))
|
||||
.build())
|
||||
.get();
|
||||
fail("should have thrown an exception about the shard count");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat("message contains error about shard count: " + e.getMessage(),
|
||||
e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true));
|
||||
assertThat("message contains error about shard count: " + e.getMessage(),
|
||||
e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true));
|
||||
}
|
||||
}
|
||||
|
||||
public void testCreateIndexWithBlocks() {
|
||||
|
@ -164,39 +144,38 @@ public class CreateIndexIT extends ESIntegTestCase {
|
|||
disableIndexBlock("test", IndexMetaData.SETTING_BLOCKS_METADATA);
|
||||
}
|
||||
|
||||
public void testUnknownSettingFails() {
|
||||
try {
|
||||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put("index.unknown.value", "this must fail")
|
||||
.build())
|
||||
.get();
|
||||
fail("should have thrown an exception about the shard count");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("unknown setting [index.unknown.value]", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testInvalidShardCountSettingsWithoutPrefix() throws Exception {
|
||||
int value = randomIntBetween(-10, 0);
|
||||
try {
|
||||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, 0))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), value)
|
||||
.build())
|
||||
.get();
|
||||
fail("should have thrown an exception about the shard count");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat("message contains error about shard count: " + e.getMessage(),
|
||||
e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true));
|
||||
assertEquals("Failed to parse value [" + value + "] for setting [index.number_of_shards] must be >= 1", e.getMessage());
|
||||
}
|
||||
value = randomIntBetween(-10, -1);
|
||||
try {
|
||||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, -1))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), value)
|
||||
.build())
|
||||
.get();
|
||||
fail("should have thrown an exception about the shard count");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat("message contains error about shard count: " + e.getMessage(),
|
||||
e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true));
|
||||
}
|
||||
try {
|
||||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, 0))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, -1))
|
||||
.build())
|
||||
.get();
|
||||
fail("should have thrown an exception about the shard count");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat("message contains error about shard count: " + e.getMessage(),
|
||||
e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true));
|
||||
assertThat("message contains error about shard count: " + e.getMessage(),
|
||||
e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true));
|
||||
assertEquals("Failed to parse value [" + value + "] for setting [index.number_of_replicas] must be >= 0", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,15 +24,23 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.index.engine.Segment;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class IndicesSegmentsRequestTests extends ESSingleNodeTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
return pluginList(InternalSettingsPlugin.class);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setupIndex() {
|
||||
Settings settings = Settings.builder()
|
||||
|
|
|
@ -35,10 +35,15 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
import org.elasticsearch.test.MockIndexEventListener;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.test.store.MockFSIndexStore;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -57,6 +62,12 @@ import static org.hamcrest.Matchers.nullValue;
|
|||
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
|
||||
public class IndicesShardStoreRequestIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return pluginList( MockFSIndexStore.TestPlugin.class);
|
||||
}
|
||||
|
||||
public void testEmpty() {
|
||||
ensureGreen();
|
||||
IndicesShardStoresResponse rsp = client().admin().indices().prepareShardStores().get();
|
||||
|
@ -148,7 +159,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
|
|||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
assertAcked(prepareCreate(index).setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "5")
|
||||
.put(MockFSIndexStore.CHECK_INDEX_ON_CLOSE, false)
|
||||
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
|
||||
));
|
||||
indexRandomData(index);
|
||||
ensureGreen(index);
|
||||
|
|
|
@ -93,7 +93,7 @@ public class MetaDataIndexTemplateServiceTests extends ESTestCase {
|
|||
null,
|
||||
new HashSet<>(),
|
||||
null,
|
||||
null);
|
||||
null, null);
|
||||
MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(Settings.EMPTY, null, createIndexService, new AliasValidator(Settings.EMPTY));
|
||||
|
||||
final List<Throwable> throwables = new ArrayList<>();
|
||||
|
|
|
@ -443,7 +443,7 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
|
||||
public void testWaitForAliasCreationSingleShard() throws Exception {
|
||||
logger.info("--> creating index [test]");
|
||||
assertAcked(admin().indices().create(createIndexRequest("test").settings(settingsBuilder().put("index.numberOfReplicas", 0).put("index.numberOfShards", 1))).get());
|
||||
assertAcked(admin().indices().create(createIndexRequest("test").settings(settingsBuilder().put("index.number_of_replicas", 0).put("index.number_of_shards", 1))).get());
|
||||
|
||||
ensureGreen();
|
||||
|
||||
|
|
|
@ -51,12 +51,14 @@ import org.elasticsearch.index.engine.Segment;
|
|||
import org.elasticsearch.index.mapper.string.StringFieldMapperPositionIncrementGapTests;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
||||
|
@ -73,6 +75,7 @@ import java.nio.file.Path;
|
|||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
@ -91,6 +94,12 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
// TODO: test for proper exception on unsupported indexes (maybe via separate test?)
|
||||
// We have a 0.20.6.zip etc for this.
|
||||
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return pluginList(InternalSettingsPlugin.class);
|
||||
}
|
||||
|
||||
List<String> indexes;
|
||||
List<String> unsupportedIndexes;
|
||||
static Path singleDataPath;
|
||||
|
|
|
@ -139,8 +139,8 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
|
|||
public void testClusterInfoServiceCollectsInformation() throws Exception {
|
||||
internalCluster().startNodesAsync(2).get();
|
||||
assertAcked(prepareCreate("test").setSettings(settingsBuilder()
|
||||
.put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL, 0)
|
||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE).build()));
|
||||
.put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0)
|
||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE).build()));
|
||||
ensureGreen("test");
|
||||
InternalTestCluster internalTestCluster = internalCluster();
|
||||
// Get the cluster info service on the master node
|
||||
|
|
|
@ -31,15 +31,13 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllo
|
|||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.settings.DynamicSettings;
|
||||
import org.elasticsearch.cluster.settings.Validator;
|
||||
import org.elasticsearch.common.inject.ModuleTestCase;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsFilter;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.index.settings.IndexDynamicSettings;
|
||||
|
||||
public class ClusterModuleTests extends ModuleTestCase {
|
||||
|
||||
|
@ -93,18 +91,19 @@ public class ClusterModuleTests extends ModuleTestCase {
|
|||
}
|
||||
|
||||
public void testRegisterIndexDynamicSettingDuplicate() {
|
||||
ClusterModule module = new ClusterModule(Settings.EMPTY);
|
||||
SettingsModule module = new SettingsModule(Settings.EMPTY, new SettingsFilter(Settings.EMPTY));
|
||||
try {
|
||||
module.registerIndexDynamicSetting(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, Validator.EMPTY);
|
||||
module.registerSetting(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING);
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals(e.getMessage(), "Cannot register setting [" + EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE + "] twice");
|
||||
assertEquals(e.getMessage(), "Cannot register setting [" + EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey() + "] twice");
|
||||
}
|
||||
}
|
||||
|
||||
public void testRegisterIndexDynamicSetting() {
|
||||
ClusterModule module = new ClusterModule(Settings.EMPTY);
|
||||
module.registerIndexDynamicSetting("foo.bar", Validator.EMPTY);
|
||||
assertInstanceBindingWithAnnotation(module, DynamicSettings.class, dynamicSettings -> dynamicSettings.hasDynamicSetting("foo.bar"), IndexDynamicSettings.class);
|
||||
final SettingsFilter settingsFilter = new SettingsFilter(Settings.EMPTY);
|
||||
SettingsModule module = new SettingsModule(Settings.EMPTY, settingsFilter);
|
||||
module.registerSetting(Setting.boolSetting("foo.bar", false, true, Setting.Scope.INDEX));
|
||||
assertInstanceBinding(module, IndexScopedSettings.class, service -> service.hasDynamicSetting("foo.bar"));
|
||||
}
|
||||
|
||||
public void testRegisterAllocationDeciderDuplicate() {
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
public class AutoExpandReplicasTests extends ESTestCase {
|
||||
|
||||
public void testParseSettings() {
|
||||
AutoExpandReplicas autoExpandReplicas = AutoExpandReplicas.SETTING.get(Settings.builder().put("index.auto_expand_replicas", "0-5").build());
|
||||
assertEquals(0, autoExpandReplicas.getMinReplicas());
|
||||
assertEquals(5, autoExpandReplicas.getMaxReplicas(8));
|
||||
assertEquals(2, autoExpandReplicas.getMaxReplicas(3));
|
||||
|
||||
autoExpandReplicas = AutoExpandReplicas.SETTING.get(Settings.builder().put("index.auto_expand_replicas", "0-all").build());
|
||||
assertEquals(0, autoExpandReplicas.getMinReplicas());
|
||||
assertEquals(5, autoExpandReplicas.getMaxReplicas(6));
|
||||
assertEquals(2, autoExpandReplicas.getMaxReplicas(3));
|
||||
|
||||
autoExpandReplicas = AutoExpandReplicas.SETTING.get(Settings.builder().put("index.auto_expand_replicas", "1-all").build());
|
||||
assertEquals(1, autoExpandReplicas.getMinReplicas());
|
||||
assertEquals(5, autoExpandReplicas.getMaxReplicas(6));
|
||||
assertEquals(2, autoExpandReplicas.getMaxReplicas(3));
|
||||
|
||||
}
|
||||
|
||||
public void testInvalidValues() {
|
||||
try {
|
||||
AutoExpandReplicas.SETTING.get(Settings.builder().put("index.auto_expand_replicas", "boom").build());
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("failed to parse [index.auto_expand_replicas] from value: [boom] at index -1", ex.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
AutoExpandReplicas.SETTING.get(Settings.builder().put("index.auto_expand_replicas", "1-boom").build());
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("failed to parse [index.auto_expand_replicas] from value: [1-boom] at index 1", ex.getMessage());
|
||||
assertEquals("For input string: \"boom\"", ex.getCause().getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
AutoExpandReplicas.SETTING.get(Settings.builder().put("index.auto_expand_replicas", "boom-1").build());
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("failed to parse [index.auto_expand_replicas] from value: [boom-1] at index 4", ex.getMessage());
|
||||
assertEquals("For input string: \"boom\"", ex.getCause().getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
AutoExpandReplicas.SETTING.get(Settings.builder().put("index.auto_expand_replicas", "2-1").build());
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("[index.auto_expand_replicas] minReplicas must be =< maxReplicas but wasn't 2 > 1", ex.getMessage());
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -47,7 +47,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
|||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, 0)).get();
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0)).get();
|
||||
ensureGreen("test");
|
||||
indexRandomData();
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard()));
|
||||
|
@ -66,7 +66,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
|||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, TimeValue.timeValueHours(1))).get();
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1))).get();
|
||||
ensureGreen("test");
|
||||
indexRandomData();
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard()));
|
||||
|
@ -90,14 +90,14 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
|||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, TimeValue.timeValueMillis(100))).get();
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(100))).get();
|
||||
ensureGreen("test");
|
||||
indexRandomData();
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard()));
|
||||
ensureGreen("test");
|
||||
internalCluster().startNode();
|
||||
// do a second round with longer delay to make sure it happens
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, TimeValue.timeValueMillis(100))).get());
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(100))).get());
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard()));
|
||||
ensureGreen("test");
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
|||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, TimeValue.timeValueHours(1))).get();
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1))).get();
|
||||
ensureGreen("test");
|
||||
indexRandomData();
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard()));
|
||||
|
@ -123,7 +123,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
|||
}
|
||||
});
|
||||
assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1));
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, TimeValue.timeValueMillis(100))).get());
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(100))).get());
|
||||
ensureGreen("test");
|
||||
assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(0));
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
|||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, TimeValue.timeValueHours(1))).get();
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1))).get();
|
||||
ensureGreen("test");
|
||||
indexRandomData();
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard()));
|
||||
|
@ -149,7 +149,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
|||
}
|
||||
});
|
||||
assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1));
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, TimeValue.timeValueMillis(0))).get());
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(0))).get());
|
||||
ensureGreen("test");
|
||||
assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(0));
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ public class RoutingServiceTests extends ESAllocationTestCase {
|
|||
public void testNoDelayedUnassigned() throws Exception {
|
||||
AllocationService allocation = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator());
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, "0"))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "0"))
|
||||
.numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
|
||||
|
@ -97,7 +97,7 @@ public class RoutingServiceTests extends ESAllocationTestCase {
|
|||
public void testDelayedUnassignedScheduleReroute() throws Exception {
|
||||
MockAllocationService allocation = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator());
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, "100ms"))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms"))
|
||||
.numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
|
||||
|
@ -144,9 +144,9 @@ public class RoutingServiceTests extends ESAllocationTestCase {
|
|||
try {
|
||||
MockAllocationService allocation = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator());
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("short_delay").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, "100ms"))
|
||||
.put(IndexMetaData.builder("short_delay").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms"))
|
||||
.numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("long_delay").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, "10s"))
|
||||
.put(IndexMetaData.builder("long_delay").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "10s"))
|
||||
.numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData)
|
||||
|
|
|
@ -259,7 +259,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
|
|||
public void testUnassignedDelayedOnlyOnNodeLeft() throws Exception {
|
||||
final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, null);
|
||||
long delay = unassignedInfo.updateDelay(unassignedInfo.getUnassignedTimeInNanos() + 1, // add 1 tick delay
|
||||
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, "10h").build(), Settings.EMPTY);
|
||||
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "10h").build(), Settings.EMPTY);
|
||||
long cachedDelay = unassignedInfo.getLastComputedLeftDelayNanos();
|
||||
assertThat(delay, equalTo(cachedDelay));
|
||||
assertThat(delay, equalTo(TimeValue.timeValueHours(10).nanos() - 1));
|
||||
|
@ -273,7 +273,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
|
|||
reasons.remove(UnassignedInfo.Reason.NODE_LEFT);
|
||||
UnassignedInfo unassignedInfo = new UnassignedInfo(RandomPicks.randomFrom(getRandom(), reasons), null);
|
||||
long delay = unassignedInfo.updateDelay(unassignedInfo.getUnassignedTimeInNanos() + 1, // add 1 tick delay
|
||||
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, "10h").build(), Settings.EMPTY);
|
||||
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "10h").build(), Settings.EMPTY);
|
||||
assertThat(delay, equalTo(0l));
|
||||
delay = unassignedInfo.getLastComputedLeftDelayNanos();
|
||||
assertThat(delay, equalTo(0l));
|
||||
|
@ -286,7 +286,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
|
|||
final long baseTime = System.nanoTime();
|
||||
final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "test", null, baseTime, System.currentTimeMillis());
|
||||
final long totalDelayNanos = TimeValue.timeValueMillis(10).nanos();
|
||||
final Settings settings = Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, TimeValue.timeValueNanos(totalDelayNanos)).build();
|
||||
final Settings settings = Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueNanos(totalDelayNanos)).build();
|
||||
long delay = unassignedInfo.updateDelay(baseTime, settings, Settings.EMPTY);
|
||||
assertThat(delay, equalTo(totalDelayNanos));
|
||||
assertThat(delay, equalTo(unassignedInfo.getLastComputedLeftDelayNanos()));
|
||||
|
@ -336,8 +336,8 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
|
|||
final long expectMinDelaySettingsNanos = Math.min(delayTest1.nanos(), delayTest2.nanos());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, delayTest1)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, delayTest2)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayTest1)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayTest2)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
|
||||
.metaData(metaData)
|
||||
|
|
|
@ -753,7 +753,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
|
|||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT).put(FilterAllocationDecider.INDEX_ROUTING_EXCLUDE_GROUP + "_id", "node1,node2")).numberOfShards(2).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT).put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_id", "node1,node2")).numberOfShards(2).numberOfReplicas(0))
|
||||
.build();
|
||||
|
||||
// we use a second index here (test1) that never gets assigned otherwise allocateUnassinged is never called if we don't have unassigned shards.
|
||||
|
|
|
@ -54,7 +54,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
|
|||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, 2)))
|
||||
.put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 2)))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -212,12 +212,12 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
|
|||
assertThat(shardRouting.index(), equalTo("test1"));
|
||||
}
|
||||
|
||||
logger.info("update " + ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE + " for test, see that things move");
|
||||
logger.info("update " + ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey() + " for test, see that things move");
|
||||
metaData = MetaData.builder(metaData)
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, 3)
|
||||
.put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 3)
|
||||
))
|
||||
.build();
|
||||
|
||||
|
|
|
@ -49,14 +49,14 @@ public class EnableAllocationDeciderIT extends ESIntegTestCase {
|
|||
|
||||
final String secondNode = internalCluster().startNode();
|
||||
// prevent via index setting but only on index test
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)).get();
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)).get();
|
||||
client().admin().cluster().prepareReroute().get();
|
||||
ensureGreen();
|
||||
assertAllShardsOnNodes("test", firstNode);
|
||||
assertAllShardsOnNodes("test_1", firstNode);
|
||||
|
||||
// now enable the index test to relocate since index settings override cluster settings
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL)).get();
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL)).get();
|
||||
logger.info("--> balance index [test]");
|
||||
client().admin().cluster().prepareReroute().get();
|
||||
ensureGreen("test");
|
||||
|
|
|
@ -44,7 +44,7 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
|
|||
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING;
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
|
@ -121,7 +121,7 @@ public class EnableAllocationTests extends ESAllocationTestCase {
|
|||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("disabled").settings(settings(Version.CURRENT)
|
||||
.put(INDEX_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()))
|
||||
.put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name()))
|
||||
.numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("enabled").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
@ -163,12 +163,12 @@ public class EnableAllocationTests extends ESAllocationTestCase {
|
|||
.build();
|
||||
ClusterSettings clusterSettings = new ClusterSettings(build, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
||||
AllocationService strategy = createAllocationService(build, clusterSettings, getRandom());
|
||||
Settings indexSettings = useClusterSetting ? Settings.EMPTY : settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE).build();
|
||||
Settings indexSettings = useClusterSetting ? Settings.EMPTY : settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE).build();
|
||||
|
||||
logger.info("Building initial routing table");
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(indexSettings)).numberOfShards(3).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("always_disabled").settings(settings(Version.CURRENT).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("always_disabled").settings(settings(Version.CURRENT).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -219,7 +219,7 @@ public class EnableAllocationTests extends ESAllocationTestCase {
|
|||
IndexMetaData meta = clusterState.getMetaData().index("test");
|
||||
IndexMetaData meta1 = clusterState.getMetaData().index("always_disabled");
|
||||
clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).removeAllIndices().put(IndexMetaData.builder(meta1))
|
||||
.put(IndexMetaData.builder(meta).settings(settingsBuilder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, allowedOnes).build())))
|
||||
.put(IndexMetaData.builder(meta).settings(settingsBuilder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), allowedOnes).build())))
|
||||
.build();
|
||||
|
||||
}
|
||||
|
@ -265,7 +265,7 @@ public class EnableAllocationTests extends ESAllocationTestCase {
|
|||
.build();
|
||||
ClusterSettings clusterSettings = new ClusterSettings(build, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
||||
AllocationService strategy = createAllocationService(build, clusterSettings, getRandom());
|
||||
Settings indexSettings = useClusterSetting ? Settings.EMPTY : settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE).build();
|
||||
Settings indexSettings = useClusterSetting ? Settings.EMPTY : settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE).build();
|
||||
|
||||
logger.info("Building initial routing table");
|
||||
MetaData metaData = MetaData.builder()
|
||||
|
@ -312,7 +312,7 @@ public class EnableAllocationTests extends ESAllocationTestCase {
|
|||
prevState = clusterState;
|
||||
IndexMetaData meta = clusterState.getMetaData().index("test");
|
||||
clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).removeAllIndices()
|
||||
.put(IndexMetaData.builder(meta).settings(settingsBuilder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL).build()))).build();
|
||||
.put(IndexMetaData.builder(meta).settings(settingsBuilder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL).build()))).build();
|
||||
}
|
||||
clusterSettings.applySettings(clusterState.metaData().settings());
|
||||
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
|
||||
|
|
|
@ -265,7 +265,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
.get();
|
||||
fail("bogus value");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [discovery.zen.commit_timeout] with value [whatever] as a time value: unit is missing or unrecognized");
|
||||
assertEquals(ex.getMessage(), "Failed to parse setting [discovery.zen.publish_timeout] with value [whatever] as a time value: unit is missing or unrecognized");
|
||||
}
|
||||
|
||||
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l));
|
||||
|
|
|
@ -23,8 +23,10 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
|||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsFilter;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
@ -62,6 +64,10 @@ public class SettingsFilteringIT extends ESIntegTestCase {
|
|||
return "Settings Filtering Plugin";
|
||||
}
|
||||
|
||||
public void onModule(SettingsModule module) {
|
||||
module.registerSetting(Setting.groupSetting("index.filter_test.", false, Setting.Scope.INDEX));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Module> nodeModules() {
|
||||
return Collections.<Module>singletonList(new SettingsFilteringModule());
|
||||
|
|
|
@ -1,106 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.settings;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class SettingsValidatorTests extends ESTestCase {
|
||||
public void testValidators() throws Exception {
|
||||
assertThat(Validator.EMPTY.validate("", "anything goes", null), nullValue());
|
||||
|
||||
assertThat(Validator.TIME.validate("", "10m", null), nullValue());
|
||||
assertThat(Validator.TIME.validate("", "10g", null), notNullValue());
|
||||
assertThat(Validator.TIME.validate("", "bad timing", null), notNullValue());
|
||||
|
||||
assertThat(Validator.BYTES_SIZE.validate("", "10m", null), nullValue());
|
||||
assertThat(Validator.BYTES_SIZE.validate("", "10g", null), nullValue());
|
||||
assertThat(Validator.BYTES_SIZE.validate("", "bad", null), notNullValue());
|
||||
|
||||
assertThat(Validator.FLOAT.validate("", "10.2", null), nullValue());
|
||||
assertThat(Validator.FLOAT.validate("", "10.2.3", null), notNullValue());
|
||||
|
||||
assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "10.2", null), nullValue());
|
||||
assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "0.0", null), nullValue());
|
||||
assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "-1.0", null), notNullValue());
|
||||
assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "10.2.3", null), notNullValue());
|
||||
|
||||
assertThat(Validator.DOUBLE.validate("", "10.2", null), nullValue());
|
||||
assertThat(Validator.DOUBLE.validate("", "10.2.3", null), notNullValue());
|
||||
|
||||
assertThat(Validator.DOUBLE_GTE_2.validate("", "10.2", null), nullValue());
|
||||
assertThat(Validator.DOUBLE_GTE_2.validate("", "2.0", null), nullValue());
|
||||
assertThat(Validator.DOUBLE_GTE_2.validate("", "1.0", null), notNullValue());
|
||||
assertThat(Validator.DOUBLE_GTE_2.validate("", "10.2.3", null), notNullValue());
|
||||
|
||||
assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "10.2", null), nullValue());
|
||||
assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "0.0", null), nullValue());
|
||||
assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "-1.0", null), notNullValue());
|
||||
assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "10.2.3", null), notNullValue());
|
||||
|
||||
assertThat(Validator.INTEGER.validate("", "10", null), nullValue());
|
||||
assertThat(Validator.INTEGER.validate("", "10.2", null), notNullValue());
|
||||
|
||||
assertThat(Validator.INTEGER_GTE_2.validate("", "2", null), nullValue());
|
||||
assertThat(Validator.INTEGER_GTE_2.validate("", "1", null), notNullValue());
|
||||
assertThat(Validator.INTEGER_GTE_2.validate("", "0", null), notNullValue());
|
||||
assertThat(Validator.INTEGER_GTE_2.validate("", "10.2.3", null), notNullValue());
|
||||
|
||||
assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "2", null), nullValue());
|
||||
assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "1", null), nullValue());
|
||||
assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "0", null), nullValue());
|
||||
assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "-1", null), notNullValue());
|
||||
assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "10.2", null), notNullValue());
|
||||
|
||||
assertThat(Validator.POSITIVE_INTEGER.validate("", "2", null), nullValue());
|
||||
assertThat(Validator.POSITIVE_INTEGER.validate("", "1", null), nullValue());
|
||||
assertThat(Validator.POSITIVE_INTEGER.validate("", "0", null), notNullValue());
|
||||
assertThat(Validator.POSITIVE_INTEGER.validate("", "-1", null), notNullValue());
|
||||
assertThat(Validator.POSITIVE_INTEGER.validate("", "10.2", null), notNullValue());
|
||||
|
||||
assertThat(Validator.PERCENTAGE.validate("", "asdasd", null), notNullValue());
|
||||
assertThat(Validator.PERCENTAGE.validate("", "-1", null), notNullValue());
|
||||
assertThat(Validator.PERCENTAGE.validate("", "20", null), notNullValue());
|
||||
assertThat(Validator.PERCENTAGE.validate("", "-1%", null), notNullValue());
|
||||
assertThat(Validator.PERCENTAGE.validate("", "101%", null), notNullValue());
|
||||
assertThat(Validator.PERCENTAGE.validate("", "100%", null), nullValue());
|
||||
assertThat(Validator.PERCENTAGE.validate("", "99%", null), nullValue());
|
||||
assertThat(Validator.PERCENTAGE.validate("", "0%", null), nullValue());
|
||||
|
||||
assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "asdasd", null), notNullValue());
|
||||
assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "20", null), notNullValue());
|
||||
assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "20mb", null), nullValue());
|
||||
assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "-1%", null), notNullValue());
|
||||
assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "101%", null), notNullValue());
|
||||
assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "100%", null), nullValue());
|
||||
assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "99%", null), nullValue());
|
||||
assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "0%", null), nullValue());
|
||||
}
|
||||
|
||||
public void testDynamicValidators() throws Exception {
|
||||
DynamicSettings.Builder ds = new DynamicSettings.Builder();
|
||||
ds.addSetting("my.test.*", Validator.POSITIVE_INTEGER);
|
||||
String valid = ds.build().validateDynamicSetting("my.test.setting", "-1", null);
|
||||
assertThat(valid, equalTo("the value of the setting my.test.setting must be a positive integer"));
|
||||
}
|
||||
}
|
|
@ -28,16 +28,24 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.mapper.DocumentMapperParser;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class CodecTests extends ESSingleNodeTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
return pluginList(InternalSettingsPlugin.class);
|
||||
}
|
||||
|
||||
public void testAcceptPostingsFormat() throws IOException {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "string").field("postings_format", Codec.getDefault().postingsFormat().getName()).endObject().endObject()
|
||||
|
|
|
@ -34,6 +34,7 @@ import static org.hamcrest.Matchers.lessThan;
|
|||
* Basic Tests for {@link GeoDistance}
|
||||
*/
|
||||
public class GeoDistanceTests extends ESTestCase {
|
||||
|
||||
public void testGeoDistanceSerialization() throws IOException {
|
||||
// make sure that ordinals don't change, because we rely on then in serialization
|
||||
assertThat(GeoDistance.PLANE.ordinal(), equalTo(0));
|
||||
|
|
|
@ -18,8 +18,11 @@
|
|||
*/
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -165,4 +168,61 @@ public class ScopedSettingsTests extends ESTestCase {
|
|||
assertTrue(ref.get().contains("internal:index/shard/recovery/*"));
|
||||
assertTrue(ref.get().contains("internal:gateway/local*"));
|
||||
}
|
||||
|
||||
public void testGetSetting() {
|
||||
IndexScopedSettings settings = new IndexScopedSettings(
|
||||
Settings.EMPTY,
|
||||
IndexScopedSettings.BUILT_IN_INDEX_SETTINGS);
|
||||
IndexScopedSettings copy = settings.copy(Settings.builder().put("index.store.type", "boom").build(), newIndexMeta("foo", Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 3).build()));
|
||||
assertEquals(3, copy.get(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING).intValue());
|
||||
assertEquals(1, copy.get(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING).intValue());
|
||||
assertEquals("boom", copy.get(IndexModule.INDEX_STORE_TYPE_SETTING)); // test fallback to node settings
|
||||
}
|
||||
|
||||
public void testValidate() {
|
||||
IndexScopedSettings settings = new IndexScopedSettings(
|
||||
Settings.EMPTY,
|
||||
IndexScopedSettings.BUILT_IN_INDEX_SETTINGS);
|
||||
settings.validate(Settings.builder().put("index.store.type", "boom"));
|
||||
settings.validate(Settings.builder().put("index.store.type", "boom").build());
|
||||
try {
|
||||
settings.validate(Settings.builder().put("index.store.type", "boom", "i.am.not.a.setting", true));
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("unknown setting [i.am.not.a.setting]", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
settings.validate(Settings.builder().put("index.store.type", "boom", "i.am.not.a.setting", true).build());
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("unknown setting [i.am.not.a.setting]", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
settings.validate(Settings.builder().put("index.store.type", "boom", "index.number_of_replicas", true).build());
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("Failed to parse value [true] for setting [index.number_of_replicas]", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
settings.validate("index.number_of_replicas", Settings.builder().put("index.number_of_replicas", "true").build());
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("Failed to parse value [true] for setting [index.number_of_replicas]", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static IndexMetaData newIndexMeta(String name, Settings indexSettings) {
|
||||
Settings build = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(indexSettings)
|
||||
.build();
|
||||
IndexMetaData metaData = IndexMetaData.builder(name).settings(build).build();
|
||||
return metaData;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
|||
String node2 = nodeName2.get();
|
||||
|
||||
String index = "index";
|
||||
assertAcked(prepareCreate(index).setSettings(Settings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "_name", node1)));
|
||||
assertAcked(prepareCreate(index).setSettings(Settings.builder().put("index.number_of_replicas", 0).put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node1)));
|
||||
index(index, "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
|
||||
ensureGreen();
|
||||
assertIndexInMetaState(node1, index);
|
||||
|
@ -72,7 +72,7 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
|||
assertIndexInMetaState(masterNode, index);
|
||||
|
||||
logger.debug("relocating index...");
|
||||
client().admin().indices().prepareUpdateSettings(index).setSettings(Settings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "_name", node2)).get();
|
||||
client().admin().indices().prepareUpdateSettings(index).setSettings(Settings.builder().put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node2)).get();
|
||||
client().admin().cluster().prepareHealth().setWaitForRelocatingShards(0).get();
|
||||
ensureGreen();
|
||||
assertIndexDirectoryDeleted(node1, index);
|
||||
|
|
|
@ -60,7 +60,7 @@ public class RecoveryBackwardsCompatibilityIT extends ESBackcompatTestCase {
|
|||
public void testReusePeerRecovery() throws Exception {
|
||||
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(indexSettings())
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)));
|
||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)));
|
||||
logger.info("--> indexing docs");
|
||||
int numDocs = scaledRandomIntBetween(100, 1000);
|
||||
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
|
@ -39,6 +40,8 @@ import org.elasticsearch.test.junit.annotations.TestLogging;
|
|||
import org.elasticsearch.test.store.MockFSDirectoryService;
|
||||
import org.elasticsearch.test.store.MockFSIndexStore;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
|
@ -54,6 +57,12 @@ import static org.hamcrest.Matchers.notNullValue;
|
|||
|
||||
@ClusterScope(numDataNodes = 0, scope = Scope.TEST)
|
||||
public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return pluginList(MockFSIndexStore.TestPlugin.class);
|
||||
}
|
||||
|
||||
public void testOneNodeRecoverFromGateway() throws Exception {
|
||||
|
||||
internalCluster().startNode();
|
||||
|
@ -322,11 +331,11 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
|||
public void testReusePeerRecovery() throws Exception {
|
||||
final Settings settings = settingsBuilder()
|
||||
.put("action.admin.cluster.node.shutdown.delay", "10ms")
|
||||
.put(MockFSIndexStore.CHECK_INDEX_ON_CLOSE, false)
|
||||
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING, false)
|
||||
.put("gateway.recover_after_nodes", 4)
|
||||
.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, 4)
|
||||
.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING, 4)
|
||||
.put(MockFSDirectoryService.CRASH_INDEX, false).build();
|
||||
.put(MockFSDirectoryService.CRASH_INDEX_SETTING.getKey(), false).build();
|
||||
|
||||
internalCluster().startNodesAsync(4, settings).get();
|
||||
// prevent any rebalance actions during the peer recovery
|
||||
|
@ -334,7 +343,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
|||
// we reuse the files on disk after full restarts for replicas.
|
||||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(indexSettings())
|
||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)));
|
||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)));
|
||||
ensureGreen();
|
||||
logger.info("--> indexing docs");
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
|
|
|
@ -228,7 +228,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
|
||||
public void testDelayedAllocation() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(),
|
||||
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT);
|
||||
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT);
|
||||
testAllocator.addData(node1, true, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||
if (randomBoolean()) {
|
||||
// we sometime return empty list of files, make sure we test this as well
|
||||
|
@ -241,7 +241,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
|
||||
|
||||
allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(),
|
||||
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT);
|
||||
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT);
|
||||
testAllocator.addData(node2, false, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||
AllocationService.updateLeftDelayOfUnassignedShards(allocation, Settings.EMPTY);
|
||||
changed = testAllocator.allocateUnassigned(allocation);
|
||||
|
|
|
@ -65,7 +65,7 @@ public class ReusePeerRecoverySharedTest {
|
|||
* for replicas.
|
||||
*/
|
||||
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put(indexSettings)
|
||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)));
|
||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)));
|
||||
client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get();
|
||||
logger.info("--> indexing docs");
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
|
|
|
@ -40,10 +40,13 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
|
@ -62,6 +65,12 @@ import static org.hamcrest.Matchers.nullValue;
|
|||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
public class GetActionIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return pluginList(InternalSettingsPlugin.class); // uses index.version.created
|
||||
}
|
||||
|
||||
public void testSimpleGet() {
|
||||
assertAcked(prepareCreate("test")
|
||||
.setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1))
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.cache.recycler.PageCacheRecycler;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
@ -59,7 +60,6 @@ import org.elasticsearch.index.similarity.SimilarityProvider;
|
|||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.IndexingMemoryController;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
|
@ -148,7 +148,7 @@ public class IndexModuleTests extends ESTestCase {
|
|||
|
||||
public void testRegisterIndexStore() throws IOException {
|
||||
final Index index = new Index("foo");
|
||||
final Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).put("path.home", createTempDir().toString()).put(IndexModule.STORE_TYPE, "foo_store").build();
|
||||
final Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).put("path.home", createTempDir().toString()).put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "foo_store").build();
|
||||
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings);
|
||||
IndexModule module = new IndexModule(indexSettings, null, new AnalysisRegistry(null, environment));
|
||||
module.addIndexStore("foo_store", FooStore::new);
|
||||
|
@ -174,13 +174,11 @@ public class IndexModuleTests extends ESTestCase {
|
|||
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings);
|
||||
IndexModule module = new IndexModule(indexSettings, null, new AnalysisRegistry(null, environment));
|
||||
Consumer<Settings> listener = (s) -> {};
|
||||
module.addIndexSettingsListener(listener);
|
||||
module.addIndexEventListener(eventListener);
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry);
|
||||
IndexSettings x = indexService.getIndexSettings();
|
||||
assertEquals(x.getSettings().getAsMap(), indexSettings.getSettings().getAsMap());
|
||||
assertEquals(x.getIndex(), index);
|
||||
assertSame(x.getUpdateListeners().get(0), listener);
|
||||
indexService.getIndexEventListener().beforeIndexDeleted(null);
|
||||
assertTrue(atomicBoolean.get());
|
||||
indexService.close("simon says", false);
|
||||
|
@ -188,28 +186,22 @@ public class IndexModuleTests extends ESTestCase {
|
|||
|
||||
|
||||
public void testListener() throws IOException {
|
||||
IndexModule module = new IndexModule(indexSettings, null, new AnalysisRegistry(null, environment));
|
||||
Consumer<Settings> listener = (s) -> {
|
||||
};
|
||||
module.addIndexSettingsListener(listener);
|
||||
Setting<Boolean> booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.INDEX);
|
||||
IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings(index, settings, booleanSetting), null, new AnalysisRegistry(null, environment));
|
||||
Setting<Boolean> booleanSetting2 = Setting.boolSetting("foo.bar.baz", false, true, Setting.Scope.INDEX);
|
||||
AtomicBoolean atomicBoolean = new AtomicBoolean(false);
|
||||
module.addSettingsUpdateConsumer(booleanSetting, atomicBoolean::set);
|
||||
|
||||
try {
|
||||
module.addIndexSettingsListener(listener);
|
||||
fail("already added");
|
||||
} catch (IllegalStateException ex) {
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
module.addIndexSettingsListener(null);
|
||||
fail("must not be null");
|
||||
module.addSettingsUpdateConsumer(booleanSetting2, atomicBoolean::set);
|
||||
fail("not registered");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
|
||||
}
|
||||
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry);
|
||||
IndexSettings x = indexService.getIndexSettings();
|
||||
assertEquals(1, x.getUpdateListeners().size());
|
||||
assertSame(x.getUpdateListeners().get(0), listener);
|
||||
assertSame(booleanSetting, indexService.getIndexSettings().getScopedSettings().get(booleanSetting.getKey()));
|
||||
|
||||
indexService.close("simon says", false);
|
||||
}
|
||||
|
||||
|
@ -299,7 +291,7 @@ public class IndexModuleTests extends ESTestCase {
|
|||
|
||||
public void testRegisterCustomQueryCache() throws IOException {
|
||||
Settings indexSettings = Settings.settingsBuilder()
|
||||
.put(IndexModule.QUERY_CACHE_TYPE, "custom")
|
||||
.put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), "custom")
|
||||
.put("path.home", createTempDir().toString())
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
|
||||
IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings(new Index("foo"), indexSettings), null, new AnalysisRegistry(null, environment));
|
||||
|
@ -375,8 +367,6 @@ public class IndexModuleTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
public static final class FooStore extends IndexStore {
|
||||
|
||||
public FooStore(IndexSettings indexSettings, IndexStoreConfig config) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue