From fbbb04b87e9819925bac9bc6772c794ec87499d7 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 27 Nov 2015 10:25:59 +0100 Subject: [PATCH 001/167] Add infrastructure to transactionally apply and reset dynamic settings This commit adds the infrastructure to make settings that are updateable resetable and changes the application of updates to be transactional. This means setting updates are either applied or not. If the application failes all values are rejected. This initial commit converts all dynamic cluster settings to make use of the new infrastructure. All cluster level dynamic settings are not resettable to their defaults or to the node level settings. The infrastructure also allows to list default values and descriptions which is not fully implemented yet. Values can be reset using a list of key or simple regular expressions. This has only been implemented on the java layer yet. For instance to reset all recovery settings to their defaults a user can just specify `indices.recovery.*`. This commit also adds strict settings validation, if a setting is unknown or if a setting can not be applied the entire settings update request will fail. --- .../ClusterUpdateSettingsRequest.java | 22 +- .../ClusterUpdateSettingsRequestBuilder.java | 22 + .../TransportClusterUpdateSettingsAction.java | 90 ++-- .../close/TransportCloseIndexAction.java | 28 +- .../delete/TransportDeleteIndexAction.java | 4 +- .../open/TransportOpenIndexAction.java | 6 +- .../action/support/DestructiveOperations.java | 26 +- .../client/transport/TransportClient.java | 4 - .../elasticsearch/cluster/ClusterModule.java | 107 ++--- .../cluster/InternalClusterInfoService.java | 54 +-- .../action/index/MappingUpdatedAction.java | 28 +- .../cluster/metadata/MetaData.java | 33 +- .../allocator/BalancedShardsAllocator.java | 59 +-- .../decider/AwarenessAllocationDecider.java | 57 +-- .../ClusterRebalanceAllocationDecider.java | 56 +-- .../ConcurrentRebalanceAllocationDecider.java | 27 +- .../decider/DiskThresholdDecider.java | 126 +++--- .../decider/EnableAllocationDecider.java | 53 +-- .../decider/FilterAllocationDecider.java | 62 +-- .../decider/ShardsLimitAllocationDecider.java | 30 +- .../SnapshotInProgressAllocationDecider.java | 29 +- .../decider/ThrottlingAllocationDecider.java | 49 +-- .../service/InternalClusterService.java | 37 +- .../settings/ClusterDynamicSettings.java | 38 -- .../common/settings/ClusterSettings.java | 170 ++++++++ .../settings/ClusterSettingsService.java | 53 +++ .../common/settings/Setting.java | 394 ++++++++++++++++++ .../common/settings/Settings.java | 9 + .../common/settings/SettingsService.java | 201 +++++++++ .../elasticsearch/common/unit/TimeValue.java | 24 ++ .../discovery/DiscoverySettings.java | 87 ++-- .../discovery/zen/ZenDiscovery.java | 43 +- .../zen/elect/ElectMasterService.java | 23 +- .../elasticsearch/gateway/GatewayService.java | 2 +- .../index/store/IndexStoreConfig.java | 39 +- .../elasticsearch/indices/IndicesService.java | 12 +- .../HierarchyCircuitBreakerService.java | 124 ++---- .../indices/recovery/RecoverySettings.java | 185 ++++---- .../indices/ttl/IndicesTTLService.java | 26 +- .../java/org/elasticsearch/node/Node.java | 9 +- .../org/elasticsearch/node/NodeModule.java | 8 +- .../node/settings/NodeSettingsService.java | 122 ------ .../settings/RestUpdateSettingsAction.java | 2 +- .../elasticsearch/search/SearchService.java | 23 +- .../snapshots/RestoreService.java | 33 +- .../elasticsearch/threadpool/ThreadPool.java | 77 +--- .../transport/TransportService.java | 44 +- .../admin/indices/upgrade/UpgradeIT.java | 8 +- .../master/IndexingMasterFailoverIT.java | 4 +- .../recovery/ReplicaRecoveryBenchmark.java | 2 +- .../BenchmarkNettyLargeMessages.java | 2 - .../OldIndexBackwardsCompatibilityIT.java | 2 +- .../bwcompat/RestoreBackwardsCompatIT.java | 2 +- .../cluster/ClusterInfoServiceIT.java | 11 +- .../cluster/ClusterModuleTests.java | 12 +- .../cluster/ClusterServiceIT.java | 4 +- .../cluster/MinimumMasterNodesIT.java | 24 +- .../elasticsearch/cluster/NoMasterNodeIT.java | 4 +- .../ack/AckClusterUpdateSettingsIT.java | 6 +- .../org/elasticsearch/cluster/ack/AckIT.java | 4 +- .../allocation/AwarenessAllocationIT.java | 6 +- .../cluster/allocation/ClusterRerouteIT.java | 16 +- .../allocation/AddIncrementallyTests.java | 6 +- .../allocation/AllocationCommandsTests.java | 8 +- .../allocation/AllocationPriorityTests.java | 4 +- .../allocation/AwarenessAllocationTests.java | 22 +- .../allocation/BalanceConfigurationTests.java | 53 ++- .../ClusterRebalanceRoutingTests.java | 14 +- .../allocation/DeadNodesAllocationTests.java | 6 +- .../allocation/FailedNodeRoutingTests.java | 2 +- .../allocation/FailedShardsRoutingTests.java | 12 +- .../routing/allocation/IndexBalanceTests.java | 6 +- .../NodeVersionAllocationDeciderTests.java | 6 +- .../allocation/RebalanceAfterActiveTests.java | 2 +- .../RoutingNodesIntegrityTests.java | 6 +- .../allocation/ShardVersioningTests.java | 2 +- .../ShardsLimitAllocationTests.java | 4 +- .../SingleShardNoReplicasRoutingTests.java | 4 +- .../TenShardsOneReplicaRoutingTests.java | 2 +- .../decider/DiskThresholdDeciderTests.java | 96 ++--- .../DiskThresholdDeciderUnitTests.java | 24 +- .../decider/EnableAllocationDeciderIT.java | 4 +- .../decider/EnableAllocationTests.java | 36 +- .../allocation/decider/MockDiskUsagesIT.java | 20 +- .../cluster/settings/ClusterSettingsIT.java | 146 ++++++- .../structure/RoutingIteratorTests.java | 4 +- .../breaker/MemoryCircuitBreakerTests.java | 7 +- .../common/settings/SettingTests.java | 282 +++++++++++++ .../common/settings/SettingsServiceTests.java | 125 ++++++ .../common/unit/TimeValueTests.java | 10 + .../common/util/BigArraysTests.java | 11 +- .../DiscoveryWithServiceDisruptionsIT.java | 12 +- .../zen/NodeJoinControllerTests.java | 5 +- .../discovery/zen/ZenDiscoveryIT.java | 2 +- .../PublishClusterStateActionTests.java | 17 +- .../gateway/GatewayMetaStateTests.java | 4 +- .../RecoveryBackwardsCompatibilityIT.java | 4 +- .../gateway/RecoveryFromGatewayIT.java | 4 +- .../index/TransportIndexFailuresIT.java | 2 +- .../index/store/CorruptedFileIT.java | 6 +- .../indices/IndicesLifecycleListenerIT.java | 4 +- .../memory/breaker/CircuitBreakerNoopIT.java | 4 +- .../breaker/CircuitBreakerServiceIT.java | 34 +- .../breaker/CircuitBreakerUnitTests.java | 5 +- .../indices/recovery/IndexRecoveryIT.java | 12 +- .../recovery/RecoverySourceHandlerTests.java | 5 +- .../state/CloseIndexDisableCloseAllIT.java | 4 +- .../indices/state/RareClusterStateIT.java | 4 +- .../store/IndicesStoreIntegrationIT.java | 10 +- .../DestructiveOperationsIntegrationIT.java | 8 +- .../recovery/RecoverySettingsTests.java | 24 +- .../elasticsearch/recovery/RelocationIT.java | 2 +- .../recovery/TruncatedRecoveryIT.java | 2 +- .../AbstractSnapshotIntegTestCase.java | 2 +- .../DedicatedClusterSnapshotRestoreIT.java | 28 +- .../ThreadPoolTypeSettingsValidatorTests.java | 73 ---- .../UpdateThreadPoolSettingsTests.java | 65 +-- .../AbstractSimpleTransportTestCase.java | 13 +- .../NettySizeHeaderFrameDecoderTests.java | 5 +- .../discovery/azure/AzureDiscovery.java | 6 +- .../discovery/ec2/Ec2Discovery.java | 6 +- .../discovery/gce/GceDiscovery.java | 6 +- .../MockInternalClusterInfoService.java | 12 +- .../search/MockSearchService.java | 10 +- .../test/ESAllocationTestCase.java | 19 +- .../test/ESBackcompatTestCase.java | 4 +- .../elasticsearch/test/ESIntegTestCase.java | 8 +- .../test/InternalTestCluster.java | 32 +- 128 files changed, 2527 insertions(+), 1724 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettings.java create mode 100644 core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java create mode 100644 core/src/main/java/org/elasticsearch/common/settings/ClusterSettingsService.java create mode 100644 core/src/main/java/org/elasticsearch/common/settings/Setting.java create mode 100644 core/src/main/java/org/elasticsearch/common/settings/SettingsService.java delete mode 100644 core/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java create mode 100644 core/src/test/java/org/elasticsearch/common/settings/SettingTests.java create mode 100644 core/src/test/java/org/elasticsearch/common/settings/SettingsServiceTests.java delete mode 100644 core/src/test/java/org/elasticsearch/threadpool/ThreadPoolTypeSettingsValidatorTests.java diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index 0090d7db057..96e90e1afc3 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -30,7 +30,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; -import java.util.Map; +import java.util.*; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; @@ -44,6 +44,8 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest transientReset = new HashSet<>(); + private Set persistentReset = new HashSet<>(); public ClusterUpdateSettingsRequest() { } @@ -51,7 +53,7 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest getTransientReset() { return Collections.unmodifiableSet(transientReset); } + + public Set getPersistentReset() { return Collections.unmodifiableSet(persistentReset); } + + public void addTransientResetKeys(Collection keys) { + transientReset.addAll(keys); + } + + public void addPersistentResetKeys(Collection keys) { + persistentReset.addAll(keys); + } + /** * Sets the transient settings to be updated. They will not survive a full cluster restart */ @@ -148,6 +162,8 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest(Arrays.asList(in.readStringArray())); + persistentReset = new HashSet<>(Arrays.asList(in.readStringArray())); readTimeout(in); } @@ -156,6 +172,8 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest keys) { + request.addTransientResetKeys(keys); + return this; + } + + public ClusterUpdateSettingsRequestBuilder addPersistentResetKeys(Collection keys) { + request.addPersistentResetKeys(keys); + return this; + } + + public ClusterUpdateSettingsRequestBuilder addTransientResetKeys(String... keys) { + request.addTransientResetKeys(Arrays.asList(keys)); + return this; + } + + public ClusterUpdateSettingsRequestBuilder addPersistentResetKeys(String... keys) { + request.addPersistentResetKeys(Arrays.asList(keys)); + return this; + } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 73d14a2bb11..90cc68abe91 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -34,16 +34,19 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.settings.ClusterDynamicSettings; -import org.elasticsearch.cluster.settings.DynamicSettings; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import static org.elasticsearch.cluster.ClusterState.builder; @@ -54,15 +57,17 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct private final AllocationService allocationService; - private final DynamicSettings dynamicSettings; + private final ClusterSettings dynamicSettings; + private final ClusterSettingsService clusterSettingsService; @Inject public TransportClusterUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - AllocationService allocationService, @ClusterDynamicSettings DynamicSettings dynamicSettings, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + AllocationService allocationService, ClusterSettings dynamicSettings, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterSettingsService clusterSettingsService) { super(settings, ClusterUpdateSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterUpdateSettingsRequest::new); this.allocationService = allocationService; this.dynamicSettings = dynamicSettings; + this.clusterSettingsService = clusterSettingsService; } @Override @@ -73,8 +78,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override protected ClusterBlockException checkBlock(ClusterUpdateSettingsRequest request, ClusterState state) { // allow for dedicated changes to the metadata blocks, so we don't block those to allow to "re-enable" it - if ((request.transientSettings().getAsMap().isEmpty() && request.persistentSettings().getAsMap().size() == 1 && request.persistentSettings().get(MetaData.SETTING_READ_ONLY) != null) || - request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && request.transientSettings().get(MetaData.SETTING_READ_ONLY) != null) { + if ((request.transientSettings().getAsMap().isEmpty() && request.persistentSettings().getAsMap().size() == 1 && MetaData.SETTING_READ_ONLY_SETTING.exists(request.persistentSettings())) || + request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && MetaData.SETTING_READ_ONLY_SETTING.exists(request.transientSettings())) { return null; } return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); @@ -184,37 +189,58 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct Settings.Builder transientSettings = Settings.settingsBuilder(); transientSettings.put(currentState.metaData().transientSettings()); for (Map.Entry entry : request.transientSettings().getAsMap().entrySet()) { - if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) { - String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state()); - if (error == null) { - transientSettings.put(entry.getKey(), entry.getValue()); - transientUpdates.put(entry.getKey(), entry.getValue()); - changed = true; - } else { - logger.warn("ignoring transient setting [{}], [{}]", entry.getKey(), error); - } + if (dynamicSettings.isLoggerSetting(entry.getKey()) || dynamicSettings.hasDynamicSetting(entry.getKey())) { + transientSettings.put(entry.getKey(), entry.getValue()); + transientUpdates.put(entry.getKey(), entry.getValue()); + changed = true; } else { - logger.warn("ignoring transient setting [{}], not dynamically updateable", entry.getKey()); + throw new IllegalArgumentException("transient setting [" + entry.getKey() + "], not dynamically updateable"); } } Settings.Builder persistentSettings = Settings.settingsBuilder(); persistentSettings.put(currentState.metaData().persistentSettings()); for (Map.Entry entry : request.persistentSettings().getAsMap().entrySet()) { - if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) { - String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state()); - if (error == null) { - persistentSettings.put(entry.getKey(), entry.getValue()); - persistentUpdates.put(entry.getKey(), entry.getValue()); - changed = true; - } else { - logger.warn("ignoring persistent setting [{}], [{}]", entry.getKey(), error); - } + if (dynamicSettings.isLoggerSetting(entry.getKey()) || dynamicSettings.hasDynamicSetting(entry.getKey())) { + persistentSettings.put(entry.getKey(), entry.getValue()); + persistentUpdates.put(entry.getKey(), entry.getValue()); + changed = true; } else { - logger.warn("ignoring persistent setting [{}], not dynamically updateable", entry.getKey()); + throw new IllegalArgumentException("persistent setting [" + entry.getKey() + "], not dynamically updateable"); } } + for (String entry : request.getPersistentReset()) { + Set strings = persistentSettings.internalMap().keySet(); + Set keysToRemove = new HashSet(); + for (String key : strings) { + if (Regex.simpleMatch(entry, key)) { + keysToRemove.add(key); + } + } + for (String keyToRemove : keysToRemove) { + persistentSettings.remove(keyToRemove); + persistentUpdates.remove(keyToRemove); + } + changed |= keysToRemove.isEmpty() == false; + } + + for (String entry : request.getTransientReset()) { + Set strings = transientSettings.internalMap().keySet(); + Set keysToRemove = new HashSet<>(); + for (String key : strings) { + if (Regex.simpleMatch(entry, key)) { + keysToRemove.add(key); + } + } + for (String keyToRemove : keysToRemove) { + transientSettings.remove(keyToRemove); + transientUpdates.remove(keyToRemove); + } + changed |= keysToRemove.isEmpty() == false; + } + + if (!changed) { return currentState; } @@ -224,14 +250,18 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct .transientSettings(transientSettings.build()); ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - boolean updatedReadOnly = metaData.persistentSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || metaData.transientSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false); + boolean updatedReadOnly = MetaData.SETTING_READ_ONLY_SETTING.get(metaData.persistentSettings()) || MetaData.SETTING_READ_ONLY_SETTING.get(metaData.transientSettings()); if (updatedReadOnly) { blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); } else { blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); } - - return builder(currentState).metaData(metaData).blocks(blocks).build(); + ClusterState build = builder(currentState).metaData(metaData).blocks(blocks).build(); + Settings settings = build.metaData().settings(); + // now we try to apply things and if they are invalid we fail + // this dryRun will validate & parse settings but won't actually apply them. + clusterSettingsService.dryRun(settings); + return build; } }); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 2c25ee34f18..994fdcced1f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -31,31 +31,36 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; /** * Close index action */ -public class TransportCloseIndexAction extends TransportMasterNodeAction implements NodeSettingsService.Listener { +public class TransportCloseIndexAction extends TransportMasterNodeAction { private final MetaDataIndexStateService indexStateService; private final DestructiveOperations destructiveOperations; private volatile boolean closeIndexEnabled; - public static final String SETTING_CLUSTER_INDICES_CLOSE_ENABLE = "cluster.indices.close.enable"; + public static final Setting CLUSTER_INDICES_CLOSE_ENABLE_SETTING = Setting.boolSetting("cluster.indices.close.enable", true, true, Setting.Scope.Cluster); @Inject public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataIndexStateService indexStateService, - NodeSettingsService nodeSettingsService, ActionFilters actionFilters, + ClusterSettingsService clusterSettingsService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, CloseIndexRequest::new); this.indexStateService = indexStateService; this.destructiveOperations = destructiveOperations; - this.closeIndexEnabled = settings.getAsBoolean(SETTING_CLUSTER_INDICES_CLOSE_ENABLE, true); - nodeSettingsService.addListener(this); + this.closeIndexEnabled = CLUSTER_INDICES_CLOSE_ENABLE_SETTING.get(settings); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_INDICES_CLOSE_ENABLE_SETTING, this::setCloseIndexEnabled); + } + + private void setCloseIndexEnabled(boolean closeIndexEnabled) { + this.closeIndexEnabled = closeIndexEnabled; } @Override @@ -73,7 +78,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction listener) { destructiveOperations.failDestructive(request.indices()); if (closeIndexEnabled == false) { - throw new IllegalStateException("closing indices is disabled - set [" + SETTING_CLUSTER_INDICES_CLOSE_ENABLE + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace"); + throw new IllegalStateException("closing indices is disabled - set [" + CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey() + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace"); } super.doExecute(request, listener); } @@ -104,13 +109,4 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction REQUIRES_NAME_SETTING = Setting.boolSetting("action.destructive_requires_name", false, true, Setting.Scope.Cluster); private volatile boolean destructiveRequiresName; @Inject - public DestructiveOperations(Settings settings, NodeSettingsService nodeSettingsService) { + public DestructiveOperations(Settings settings, ClusterSettingsService clusterSettingsService) { super(settings); - destructiveRequiresName = settings.getAsBoolean(DestructiveOperations.REQUIRES_NAME, false); - nodeSettingsService.addListener(this); + destructiveRequiresName = REQUIRES_NAME_SETTING.get(settings); + clusterSettingsService.addSettingsUpdateConsumer(REQUIRES_NAME_SETTING, this::setDestructiveRequiresName); + } + + private void setDestructiveRequiresName(boolean destructiveRequiresName) { + this.destructiveRequiresName = destructiveRequiresName; } /** @@ -65,15 +70,6 @@ public final class DestructiveOperations extends AbstractComponent implements No } } - @Override - public void onRefreshSettings(Settings settings) { - boolean newValue = settings.getAsBoolean(DestructiveOperations.REQUIRES_NAME, destructiveRequiresName); - if (destructiveRequiresName != newValue) { - logger.info("updating [action.operate_all_indices] from [{}] to [{}]", destructiveRequiresName, newValue); - this.destructiveRequiresName = newValue; - } - } - private static boolean hasWildcardUsage(String aliasOrIndex) { return "_all".equals(aliasOrIndex) || aliasOrIndex.indexOf('*') != -1; } diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 33cf3479419..eb0639917c1 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -32,7 +32,6 @@ import org.elasticsearch.client.support.Headers; import org.elasticsearch.client.transport.support.TransportProxyClient; import org.elasticsearch.cluster.ClusterNameModule; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.Module; @@ -43,12 +42,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.EnvironmentModule; import org.elasticsearch.indices.breaker.CircuitBreakerModule; import org.elasticsearch.monitor.MonitorService; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsModule; import org.elasticsearch.plugins.PluginsService; diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index b2e793ba0ab..3739b4a3dc8 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -19,9 +19,6 @@ package org.elasticsearch.cluster; -import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; -import org.elasticsearch.action.support.DestructiveOperations; -import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction; import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; @@ -29,7 +26,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateFilter; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService; import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; @@ -60,17 +56,16 @@ import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocatio import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.InternalClusterService; -import org.elasticsearch.cluster.settings.ClusterDynamicSettings; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.ClusterSettingsService; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ExtensionPoint; -import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.index.engine.EngineConfig; @@ -81,21 +76,13 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.MergePolicyConfig; import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.elasticsearch.index.store.IndexStore; -import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.IndicesWarmer; -import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.cache.request.IndicesRequestCache; -import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.ttl.IndicesTTLService; -import org.elasticsearch.search.SearchService; import org.elasticsearch.search.internal.DefaultSearchContext; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; +import java.util.*; /** * Configures classes and services that affect the entire cluster. @@ -122,7 +109,7 @@ public class ClusterModule extends AbstractModule { SnapshotInProgressAllocationDecider.class)); private final Settings settings; - private final DynamicSettings.Builder clusterDynamicSettings = new DynamicSettings.Builder(); + private final Map> clusterDynamicSettings = new HashMap<>(); private final DynamicSettings.Builder indexDynamicSettings = new DynamicSettings.Builder(); private final ExtensionPoint.SelectedType shardsAllocators = new ExtensionPoint.SelectedType<>("shards_allocator", ShardsAllocator.class); private final ExtensionPoint.ClassSet allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class); @@ -145,73 +132,15 @@ public class ClusterModule extends AbstractModule { } private void registerBuiltinClusterSettings() { - registerClusterDynamicSetting(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, Validator.EMPTY); - registerClusterDynamicSetting(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP + "*", Validator.EMPTY); - registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, Validator.FLOAT); - registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, Validator.FLOAT); - registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_THRESHOLD, Validator.NON_NEGATIVE_FLOAT); - registerClusterDynamicSetting(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ALLOCATION_ALLOW_REBALANCE_VALIDATOR); - registerClusterDynamicSetting(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, Validator.INTEGER); - registerClusterDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, Validator.EMPTY); - registerClusterDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, Validator.EMPTY); - registerClusterDynamicSetting(ZenDiscovery.SETTING_REJOIN_ON_MASTER_GONE, Validator.BOOLEAN); - registerClusterDynamicSetting(DiscoverySettings.NO_MASTER_BLOCK, Validator.EMPTY); - registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP + "*", Validator.EMPTY); - registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "*", Validator.EMPTY); - registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP + "*", Validator.EMPTY); - registerClusterDynamicSetting(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE, Validator.EMPTY); - registerClusterDynamicSetting(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); - registerClusterDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME); - registerClusterDynamicSetting(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT, Validator.TIME); - registerClusterDynamicSetting(MetaData.SETTING_READ_ONLY, Validator.EMPTY); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, Validator.POSITIVE_BYTES_SIZE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS, Validator.INTEGER); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE, Validator.BYTES_SIZE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_COMPRESS, Validator.EMPTY); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, Validator.POSITIVE_INTEGER); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, Validator.POSITIVE_INTEGER); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(ThreadPool.THREADPOOL_GROUP + "*", ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR); - registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, Validator.INTEGER); - registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, Validator.INTEGER); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, Validator.EMPTY); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, Validator.EMPTY); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, Validator.BOOLEAN); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, Validator.BOOLEAN); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, Validator.EMPTY); - registerClusterDynamicSetting(DestructiveOperations.REQUIRES_NAME, Validator.EMPTY); - registerClusterDynamicSetting(DiscoverySettings.PUBLISH_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(DiscoverySettings.PUBLISH_DIFF_ENABLE, Validator.BOOLEAN); - registerClusterDynamicSetting(DiscoverySettings.COMMIT_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE); - registerClusterDynamicSetting(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(SearchService.DEFAULT_SEARCH_TIMEOUT, Validator.TIMEOUT); - registerClusterDynamicSetting(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_VALIDATOR); - registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_INCLUDE, Validator.EMPTY); - registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_INCLUDE + ".*", Validator.EMPTY); - registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_EXCLUDE, Validator.EMPTY); - registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_EXCLUDE + ".*", Validator.EMPTY); - registerClusterDynamicSetting(TransportCloseIndexAction.SETTING_CLUSTER_INDICES_CLOSE_ENABLE, Validator.BOOLEAN); - registerClusterDynamicSetting(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, Validator.INTEGER); - registerClusterDynamicSetting(TransportReplicationAction.SHARD_FAILURE_TIMEOUT, Validator.TIME_NON_NEGATIVE); + for (Setting setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) { + registerSetting(setting); + } } private void registerBuiltinIndexSettings() { registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_TYPE, Validator.EMPTY); - registerIndexDynamicSetting(MergeSchedulerConfig.MAX_THREAD_COUNT, Validator.EMPTY); + registerIndexDynamicSetting(MergeSchedulerConfig.MAX_THREAD_COUNT, Validator.NON_NEGATIVE_INTEGER); registerIndexDynamicSetting(MergeSchedulerConfig.MAX_MERGE_COUNT, Validator.EMPTY); registerIndexDynamicSetting(MergeSchedulerConfig.AUTO_THROTTLE, Validator.EMPTY); registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_REQUIRE_GROUP + "*", Validator.EMPTY); @@ -276,8 +205,17 @@ public class ClusterModule extends AbstractModule { indexDynamicSettings.addSetting(setting, validator); } - public void registerClusterDynamicSetting(String setting, Validator validator) { - clusterDynamicSettings.addSetting(setting, validator); + public void registerSetting(Setting setting) { + switch (setting.getScope()) { + case Cluster: + if (clusterDynamicSettings.containsKey(setting.getKey())) { + throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); + } + clusterDynamicSettings.put(setting.getKey(), setting); + break; + case Index: + throw new UnsupportedOperationException("not yet implemented"); + } } public void registerAllocationDecider(Class allocationDecider) { @@ -294,7 +232,6 @@ public class ClusterModule extends AbstractModule { @Override protected void configure() { - bind(DynamicSettings.class).annotatedWith(ClusterDynamicSettings.class).toInstance(clusterDynamicSettings.build()); bind(DynamicSettings.class).annotatedWith(IndexDynamicSettings.class).toInstance(indexDynamicSettings.build()); // bind ShardsAllocator @@ -325,5 +262,9 @@ public class ClusterModule extends AbstractModule { bind(NodeIndexDeletedAction.class).asEagerSingleton(); bind(NodeMappingRefreshAction.class).asEagerSingleton(); bind(MappingUpdatedAction.class).asEagerSingleton(); + final ClusterSettingsService clusterSettingsService = new ClusterSettingsService(settings, new ClusterSettings(new HashSet<>(clusterDynamicSettings.values()))); + bind(ClusterSettingsService.class).toInstance(clusterSettingsService); + + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 039868d16c4..13fe2472bd4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -37,11 +37,12 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.monitor.fs.FsInfo; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ReceiveTimeoutTransportException; @@ -63,8 +64,8 @@ import java.util.concurrent.TimeUnit; */ public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener { - public static final String INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL = "cluster.info.update.interval"; - public static final String INTERNAL_CLUSTER_INFO_TIMEOUT = "cluster.info.update.timeout"; + public static final Setting INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), true, Setting.Scope.Cluster); + public static final Setting INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), true, Setting.Scope.Cluster); private volatile TimeValue updateFrequency; @@ -82,7 +83,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu private final List listeners = new CopyOnWriteArrayList<>(); @Inject - public InternalClusterInfoService(Settings settings, NodeSettingsService nodeSettingsService, + public InternalClusterInfoService(Settings settings, ClusterSettingsService clusterSettingsService, TransportNodesStatsAction transportNodesStatsAction, TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService, ThreadPool threadPool) { @@ -95,10 +96,12 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu this.transportIndicesStatsAction = transportIndicesStatsAction; this.clusterService = clusterService; this.threadPool = threadPool; - this.updateFrequency = settings.getAsTime(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, TimeValue.timeValueSeconds(30)); - this.fetchTimeout = settings.getAsTime(INTERNAL_CLUSTER_INFO_TIMEOUT, TimeValue.timeValueSeconds(15)); - this.enabled = settings.getAsBoolean(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true); - nodeSettingsService.addListener(new ApplySettings()); + this.updateFrequency = INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings); + this.fetchTimeout = INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.get(settings); + this.enabled = DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings); + clusterSettingsService.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, this::setFetchTimeout); + clusterSettingsService.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, this::setUpdateFrequency); + clusterSettingsService.addSettingsUpdateConsumer(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); // Add InternalClusterInfoService to listen for Master changes this.clusterService.add((LocalNodeMasterListener)this); @@ -106,35 +109,16 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu this.clusterService.add((ClusterStateListener)this); } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - TimeValue newUpdateFrequency = settings.getAsTime(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, null); - // ClusterInfoService is only enabled if the DiskThresholdDecider is enabled - Boolean newEnabled = settings.getAsBoolean(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, null); + private void setEnabled(boolean enabled) { + this.enabled = enabled; + } - if (newUpdateFrequency != null) { - if (newUpdateFrequency.getMillis() < TimeValue.timeValueSeconds(10).getMillis()) { - logger.warn("[{}] set too low [{}] (< 10s)", INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, newUpdateFrequency); - throw new IllegalStateException("Unable to set " + INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL + " less than 10 seconds"); - } else { - logger.info("updating [{}] from [{}] to [{}]", INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, updateFrequency, newUpdateFrequency); - InternalClusterInfoService.this.updateFrequency = newUpdateFrequency; - } - } + private void setFetchTimeout(TimeValue fetchTimeout) { + this.fetchTimeout = fetchTimeout; + } - TimeValue newFetchTimeout = settings.getAsTime(INTERNAL_CLUSTER_INFO_TIMEOUT, null); - if (newFetchTimeout != null) { - logger.info("updating fetch timeout [{}] from [{}] to [{}]", INTERNAL_CLUSTER_INFO_TIMEOUT, fetchTimeout, newFetchTimeout); - InternalClusterInfoService.this.fetchTimeout = newFetchTimeout; - } - - - // We don't log about enabling it here, because the DiskThresholdDecider will already be logging about enable/disable - if (newEnabled != null) { - InternalClusterInfoService.this.enabled = newEnabled; - } - } + void setUpdateFrequency(TimeValue updateFrequency) { + this.updateFrequency = updateFrequency; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index e3925aa6f4e..04fea06138e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -26,11 +26,12 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import java.util.concurrent.TimeoutException; @@ -40,30 +41,23 @@ import java.util.concurrent.TimeoutException; */ public class MappingUpdatedAction extends AbstractComponent { - public static final String INDICES_MAPPING_DYNAMIC_TIMEOUT = "indices.mapping.dynamic_timeout"; + public static final Setting INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.Cluster); private IndicesAdminClient client; private volatile TimeValue dynamicMappingUpdateTimeout; - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - TimeValue current = MappingUpdatedAction.this.dynamicMappingUpdateTimeout; - TimeValue newValue = settings.getAsTime(INDICES_MAPPING_DYNAMIC_TIMEOUT, current); - if (!current.equals(newValue)) { - logger.info("updating " + INDICES_MAPPING_DYNAMIC_TIMEOUT + " from [{}] to [{}]", current, newValue); - MappingUpdatedAction.this.dynamicMappingUpdateTimeout = newValue; - } - } + @Inject + public MappingUpdatedAction(Settings settings, ClusterSettingsService clusterSettingsService) { + super(settings); + this.dynamicMappingUpdateTimeout = INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.get(settings); + clusterSettingsService.addSettingsUpdateConsumer(INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, this::setDynamicMappingUpdateTimeout); } - @Inject - public MappingUpdatedAction(Settings settings, NodeSettingsService nodeSettingsService) { - super(settings); - this.dynamicMappingUpdateTimeout = settings.getAsTime(INDICES_MAPPING_DYNAMIC_TIMEOUT, TimeValue.timeValueSeconds(30)); - nodeSettingsService.addListener(new ApplySettings()); + private void setDynamicMappingUpdateTimeout(TimeValue dynamicMappingUpdateTimeout) { + this.dynamicMappingUpdateTimeout = dynamicMappingUpdateTimeout; } + public void setClient(Client client) { this.client = client.admin().indices(); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 68fa6e45d88..5b8514c22bc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -42,6 +42,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.support.LoggerMessageFormat; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.xcontent.FromXContentBuilder; @@ -140,7 +141,7 @@ public class MetaData implements Iterable, Diffable, Fr } - public static final String SETTING_READ_ONLY = "cluster.blocks.read_only"; + public static final Setting SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, true, Setting.Scope.Cluster); public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); @@ -745,25 +746,25 @@ public class MetaData implements Iterable, Diffable, Fr /** All known byte-sized cluster settings. */ public static final Set CLUSTER_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet( - IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, - RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, - RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE, - RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC)); + IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey())); /** All known time cluster settings. */ public static final Set CLUSTER_TIME_SETTINGS = unmodifiableSet(newHashSet( - IndicesTTLService.INDICES_TTL_INTERVAL, - RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, - RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, - RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, - RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, - RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, - InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, - InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT, - DiscoverySettings.PUBLISH_TIMEOUT, - InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD)); + IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getKey(), + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), + DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), + InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey())); /** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't * specify a unit. */ diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index b9ce532a611..e99e5c84ecb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -34,12 +34,14 @@ import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.PriorityComparator; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import java.util.ArrayList; import java.util.Collection; @@ -72,42 +74,47 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; */ public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator { - public static final String SETTING_THRESHOLD = "cluster.routing.allocation.balance.threshold"; - public static final String SETTING_INDEX_BALANCE_FACTOR = "cluster.routing.allocation.balance.index"; - public static final String SETTING_SHARD_BALANCE_FACTOR = "cluster.routing.allocation.balance.shard"; - private static final float DEFAULT_INDEX_BALANCE_FACTOR = 0.55f; - private static final float DEFAULT_SHARD_BALANCE_FACTOR = 0.45f; + public static final float DEFAULT_INDEX_BALANCE_FACTOR = 0.55f; + public static final float DEFAULT_SHARD_BALANCE_FACTOR = 0.45f; + public static final float DEFAULT_THRESHOLD = 1.0f; - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - final float indexBalance = settings.getAsFloat(SETTING_INDEX_BALANCE_FACTOR, weightFunction.indexBalance); - final float shardBalance = settings.getAsFloat(SETTING_SHARD_BALANCE_FACTOR, weightFunction.shardBalance); - float threshold = settings.getAsFloat(SETTING_THRESHOLD, BalancedShardsAllocator.this.threshold); - if (threshold <= 0.0f) { - throw new IllegalArgumentException("threshold must be greater than 0.0f but was: " + threshold); - } - BalancedShardsAllocator.this.threshold = threshold; - BalancedShardsAllocator.this.weightFunction = new WeightFunction(indexBalance, shardBalance); - } - } + public static final Setting INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", DEFAULT_INDEX_BALANCE_FACTOR, true, Setting.Scope.Cluster); + public static final Setting SHARD_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.shard", DEFAULT_SHARD_BALANCE_FACTOR, true, Setting.Scope.Cluster); + public static final Setting THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", DEFAULT_THRESHOLD, true, Setting.Scope.Cluster); private volatile WeightFunction weightFunction = new WeightFunction(DEFAULT_INDEX_BALANCE_FACTOR, DEFAULT_SHARD_BALANCE_FACTOR); - private volatile float threshold = 1.0f; - + private volatile float threshold = DEFAULT_THRESHOLD; public BalancedShardsAllocator(Settings settings) { - this(settings, new NodeSettingsService(settings)); + this(settings, new ClusterSettingsService(settings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); } @Inject - public BalancedShardsAllocator(Settings settings, NodeSettingsService nodeSettingsService) { + public BalancedShardsAllocator(Settings settings, ClusterSettingsService clusterSettingsService) { super(settings); - ApplySettings applySettings = new ApplySettings(); - applySettings.onRefreshSettings(settings); - nodeSettingsService.addListener(applySettings); + setIndexBalance(INDEX_BALANCE_FACTOR_SETTING.get(settings)); + setShardBalance(SHARD_BALANCE_FACTOR_SETTING.get(settings)); + setThreshold(THRESHOLD_SETTING.get(settings)); + clusterSettingsService.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, this::setIndexBalance); + clusterSettingsService.addSettingsUpdateConsumer(SHARD_BALANCE_FACTOR_SETTING, this::setShardBalance); + clusterSettingsService.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); + } + + public void setIndexBalance(float indexBalance) { + weightFunction = new WeightFunction(indexBalance, weightFunction.shardBalance); + } + + public void setShardBalance(float shardBalanceFactor) { + weightFunction = new WeightFunction(weightFunction.indexBalance, shardBalanceFactor); + } + + public void setThreshold(float threshold) { + if (threshold <= 0.0f) { + throw new IllegalArgumentException("threshold must be greater than 0.0f but was: " + threshold); + } + this.threshold = threshold; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 6f7bbac8aea..450b953fd38 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -24,10 +24,12 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import java.util.HashMap; import java.util.Map; @@ -76,37 +78,12 @@ public class AwarenessAllocationDecider extends AllocationDecider { public static final String NAME = "awareness"; - public static final String CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES = "cluster.routing.allocation.awareness.attributes"; - public static final String CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP = "cluster.routing.allocation.awareness.force."; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - String[] awarenessAttributes = settings.getAsArray(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, null); - if (awarenessAttributes == null && "".equals(settings.get(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, null))) { - awarenessAttributes = Strings.EMPTY_ARRAY; // the empty string resets this - } - if (awarenessAttributes != null) { - logger.info("updating [cluster.routing.allocation.awareness.attributes] from [{}] to [{}]", AwarenessAllocationDecider.this.awarenessAttributes, awarenessAttributes); - AwarenessAllocationDecider.this.awarenessAttributes = awarenessAttributes; - } - Map forcedAwarenessAttributes = new HashMap<>(AwarenessAllocationDecider.this.forcedAwarenessAttributes); - Map forceGroups = settings.getGroups(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP); - if (!forceGroups.isEmpty()) { - for (Map.Entry entry : forceGroups.entrySet()) { - String[] aValues = entry.getValue().getAsArray("values"); - if (aValues.length > 0) { - forcedAwarenessAttributes.put(entry.getKey(), aValues); - } - } - } - AwarenessAllocationDecider.this.forcedAwarenessAttributes = forcedAwarenessAttributes; - } - } + public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = new Setting<>("cluster.routing.allocation.awareness.attributes", "_na_", "", Strings::splitStringByCommaToArray , true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", true, Setting.Scope.Cluster); private String[] awarenessAttributes; - private Map forcedAwarenessAttributes; + private volatile Map forcedAwarenessAttributes; /** * Creates a new {@link AwarenessAllocationDecider} instance @@ -121,24 +98,28 @@ public class AwarenessAllocationDecider extends AllocationDecider { * @param settings {@link Settings} to use */ public AwarenessAllocationDecider(Settings settings) { - this(settings, new NodeSettingsService(settings)); + this(settings, new ClusterSettingsService(settings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); } @Inject - public AwarenessAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public AwarenessAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { super(settings); - this.awarenessAttributes = settings.getAsArray(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES); + this.awarenessAttributes = CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, this::setAwarenessAttributes); + setForcedAwarenessAttributes(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.get(settings)); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, this::setForcedAwarenessAttributes); + } - forcedAwarenessAttributes = new HashMap<>(); - Map forceGroups = settings.getGroups(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP); + private void setForcedAwarenessAttributes(Settings forceSettings) { + Map forcedAwarenessAttributes = new HashMap<>(); + Map forceGroups = forceSettings.getAsGroups(); for (Map.Entry entry : forceGroups.entrySet()) { String[] aValues = entry.getValue().getAsArray("values"); if (aValues.length > 0) { forcedAwarenessAttributes.put(entry.getKey(), aValues); } } - - nodeSettingsService.addListener(new ApplySettings()); + this.forcedAwarenessAttributes = forcedAwarenessAttributes; } /** @@ -150,6 +131,10 @@ public class AwarenessAllocationDecider extends AllocationDecider { return this.awarenessAttributes; } + private void setAwarenessAttributes(String[] awarenessAttributes) { + this.awarenessAttributes = awarenessAttributes; + } + @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return underCapacity(shardRouting, node, allocation, true); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index 7638c7aeee8..f20489a795e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -19,13 +19,12 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import java.util.Locale; @@ -49,19 +48,10 @@ import java.util.Locale; public class ClusterRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "cluster_rebalance"; - - public static final String CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE = "cluster.routing.allocation.allow_rebalance"; - public static final Validator ALLOCATION_ALLOW_REBALANCE_VALIDATOR = (setting, value, clusterState) -> { - try { - ClusterRebalanceType.parseString(value); - return null; - } catch (IllegalArgumentException e) { - return "the value of " + setting + " must be one of: [always, indices_primaries_active, indices_all_active]"; - } - }; + public static final Setting CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = new Setting<>("cluster.routing.allocation.allow_rebalance", "_na_", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), ClusterRebalanceType::parseString, true, Setting.Scope.Cluster); /** - * An enum representation for the configured re-balance type. + * An enum representation for the configured re-balance type. */ public static enum ClusterRebalanceType { /** @@ -73,7 +63,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { */ INDICES_PRIMARIES_ACTIVE, /** - * Re-balancing is allowed only once all shards on all indices are active. + * Re-balancing is allowed only once all shards on all indices are active. */ INDICES_ALL_ACTIVE; @@ -85,48 +75,28 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { } else if ("indices_all_active".equalsIgnoreCase(typeString) || "indicesAllActive".equalsIgnoreCase(typeString)) { return ClusterRebalanceType.INDICES_ALL_ACTIVE; } - throw new IllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE + ": " + typeString); + throw new IllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING + ": " + typeString); } } private ClusterRebalanceType type; @Inject - public ClusterRebalanceAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public ClusterRebalanceAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { super(settings); - String allowRebalance = settings.get(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "indices_all_active"); try { - type = ClusterRebalanceType.parseString(allowRebalance); + type = CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.get(settings); } catch (IllegalStateException e) { - logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, allowRebalance); + logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getRaw(settings)); type = ClusterRebalanceType.INDICES_ALL_ACTIVE; } - logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type.toString().toLowerCase(Locale.ROOT)); + logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, type.toString().toLowerCase(Locale.ROOT)); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType); } - class ApplySettings implements NodeSettingsService.Listener { - - @Override - public void onRefreshSettings(Settings settings) { - String newAllowRebalance = settings.get(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, null); - if (newAllowRebalance != null) { - ClusterRebalanceType newType = null; - try { - newType = ClusterRebalanceType.parseString(newAllowRebalance); - } catch (IllegalArgumentException e) { - // ignore - } - - if (newType != null && newType != ClusterRebalanceAllocationDecider.this.type) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, - ClusterRebalanceAllocationDecider.this.type.toString().toLowerCase(Locale.ROOT), - newType.toString().toLowerCase(Locale.ROOT)); - ClusterRebalanceAllocationDecider.this.type = newType; - } - } - } + public void setType(ClusterRebalanceType type) { + this.type = type; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index 6bd1b437acf..040c4481951 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -22,8 +22,9 @@ package org.elasticsearch.cluster.routing.allocation.decider; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; /** * Similar to the {@link ClusterRebalanceAllocationDecider} this @@ -41,27 +42,19 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "concurrent_rebalance"; - public static final String CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE = "cluster.routing.allocation.cluster_concurrent_rebalance"; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - int clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance); - if (clusterConcurrentRebalance != ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance) { - logger.info("updating [cluster.routing.allocation.cluster_concurrent_rebalance] from [{}], to [{}]", ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance, clusterConcurrentRebalance); - ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance = clusterConcurrentRebalance; - } - } - } - + public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, true, Setting.Scope.Cluster); private volatile int clusterConcurrentRebalance; @Inject - public ConcurrentRebalanceAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public ConcurrentRebalanceAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { super(settings); - this.clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 2); + this.clusterConcurrentRebalance = CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.get(settings); logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, this::setClusterConcurrentRebalance); + } + + public void setClusterConcurrentRebalance(int concurrentRebalance) { + clusterConcurrentRebalance = concurrentRebalance; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index a02c72c5745..40a8442ca3a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -33,15 +33,17 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.RatioValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import java.util.Set; @@ -80,53 +82,11 @@ public class DiskThresholdDecider extends AllocationDecider { private volatile boolean enabled; private volatile TimeValue rerouteInterval; - public static final String CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED = "cluster.routing.allocation.disk.threshold_enabled"; - public static final String CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK = "cluster.routing.allocation.disk.watermark.low"; - public static final String CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK = "cluster.routing.allocation.disk.watermark.high"; - public static final String CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS = "cluster.routing.allocation.disk.include_relocations"; - public static final String CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL = "cluster.routing.allocation.disk.reroute_interval"; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - String newLowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, null); - String newHighWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, null); - Boolean newRelocationsSetting = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, null); - Boolean newEnableSetting = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, null); - TimeValue newRerouteInterval = settings.getAsTime(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, null); - - if (newEnableSetting != null) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, - DiskThresholdDecider.this.enabled, newEnableSetting); - DiskThresholdDecider.this.enabled = newEnableSetting; - } - if (newRelocationsSetting != null) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, - DiskThresholdDecider.this.includeRelocations, newRelocationsSetting); - DiskThresholdDecider.this.includeRelocations = newRelocationsSetting; - } - if (newLowWatermark != null) { - if (!validWatermarkSetting(newLowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) { - throw new ElasticsearchParseException("unable to parse low watermark [{}]", newLowWatermark); - } - logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, newLowWatermark); - DiskThresholdDecider.this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(newLowWatermark); - DiskThresholdDecider.this.freeBytesThresholdLow = thresholdBytesFromWatermark(newLowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK); - } - if (newHighWatermark != null) { - if (!validWatermarkSetting(newHighWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) { - throw new ElasticsearchParseException("unable to parse high watermark [{}]", newHighWatermark); - } - logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, newHighWatermark); - DiskThresholdDecider.this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(newHighWatermark); - DiskThresholdDecider.this.freeBytesThresholdHigh = thresholdBytesFromWatermark(newHighWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK); - } - if (newRerouteInterval != null) { - logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, newRerouteInterval); - DiskThresholdDecider.this.rerouteInterval = newRerouteInterval; - } - } - } + public static final Setting CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "_na_", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "_na_", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, true, Setting.Scope.Cluster);; + public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.Cluster); /** * Listens for a node to go over the high watermark and kicks off an empty @@ -231,38 +191,49 @@ public class DiskThresholdDecider extends AllocationDecider { // It's okay the Client is null here, because the empty cluster info // service will never actually call the listener where the client is // needed. Also this constructor is only used for tests - this(settings, new NodeSettingsService(settings), EmptyClusterInfoService.INSTANCE, null); + this(settings, new ClusterSettingsService(settings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), EmptyClusterInfoService.INSTANCE, null); } @Inject - public DiskThresholdDecider(Settings settings, NodeSettingsService nodeSettingsService, ClusterInfoService infoService, Client client) { + public DiskThresholdDecider(Settings settings, ClusterSettingsService clusterSettingsService, ClusterInfoService infoService, Client client) { super(settings); - String lowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "85%"); - String highWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "90%"); - - if (!validWatermarkSetting(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) { - throw new ElasticsearchParseException("unable to parse low watermark [{}]", lowWatermark); - } - if (!validWatermarkSetting(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) { - throw new ElasticsearchParseException("unable to parse high watermark [{}]", highWatermark); - } - // Watermark is expressed in terms of used data, but we need "free" data watermark - this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark); - this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark); - - this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK); - this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK); - this.includeRelocations = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true); - this.rerouteInterval = settings.getAsTime(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, TimeValue.timeValueSeconds(60)); - - this.enabled = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true); - nodeSettingsService.addListener(new ApplySettings()); + final String lowWatermark = CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.get(settings); + final String highWatermark = CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.get(settings); + setHighWatermark(highWatermark); + setLowWatermark(lowWatermark); + this.includeRelocations = CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.get(settings); + this.rerouteInterval = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.get(settings); + this.enabled = CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, this::setLowWatermark); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, this::setHighWatermark); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, this::setIncludeRelocations); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, this::setRerouteInterval); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); infoService.addListener(new DiskListener(client)); } - // For Testing - ApplySettings newApplySettings() { - return new ApplySettings(); + private void setIncludeRelocations(boolean includeRelocations) { + this.includeRelocations = includeRelocations; + } + + private void setRerouteInterval(TimeValue rerouteInterval) { + this.rerouteInterval = rerouteInterval; + } + + private void setEnabled(boolean enabled) { + this.enabled = enabled; + } + + private void setLowWatermark(String lowWatermark) { + // Watermark is expressed in terms of used data, but we need "free" data watermark + this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark); + this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey()); + } + + private void setHighWatermark(String highWatermark) { + // Watermark is expressed in terms of used data, but we need "free" data watermark + this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark); + this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey()); } // For Testing @@ -582,18 +553,19 @@ public class DiskThresholdDecider extends AllocationDecider { * Checks if a watermark string is a valid percentage or byte size value, * returning true if valid, false if invalid. */ - public boolean validWatermarkSetting(String watermark, String settingName) { + public static String validWatermarkSetting(String watermark, String settingName) { try { RatioValue.parseRatioValue(watermark); - return true; } catch (ElasticsearchParseException e) { try { ByteSizeValue.parseBytesSizeValue(watermark, settingName); - return true; } catch (ElasticsearchParseException ex) { - return false; + ex.addSuppressed(e); + throw ex; } } + return watermark; + } private Decision earlyTerminate(RoutingAllocation allocation, ImmutableOpenMap usages) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 0bbd4935044..38183fab830 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -23,14 +23,15 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import java.util.Locale; /** - * This allocation decider allows shard allocations / rebalancing via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE} / - * {@link #CLUSTER_ROUTING_REBALANCE_ENABLE} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE} / {@link #INDEX_ROUTING_REBALANCE_ENABLE}. + * This allocation decider allows shard allocations / rebalancing via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / + * {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE} / {@link #INDEX_ROUTING_REBALANCE_ENABLE}. * The per index settings overrides the cluster wide setting. * *

@@ -54,26 +55,34 @@ import java.util.Locale; * @see Rebalance * @see Allocation */ -public class EnableAllocationDecider extends AllocationDecider implements NodeSettingsService.Listener { +public class EnableAllocationDecider extends AllocationDecider { public static final String NAME = "enable"; - public static final String CLUSTER_ROUTING_ALLOCATION_ENABLE = "cluster.routing.allocation.enable"; - public static final String INDEX_ROUTING_ALLOCATION_ENABLE = "index.routing.allocation.enable"; + public static final Setting CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", "_na_", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.Cluster); + public static final String INDEX_ROUTING_ALLOCATION_ENABLE= "index.routing.allocation.enable"; - public static final String CLUSTER_ROUTING_REBALANCE_ENABLE = "cluster.routing.rebalance.enable"; + public static final Setting CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", "_na_", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.Cluster); public static final String INDEX_ROUTING_REBALANCE_ENABLE = "index.routing.rebalance.enable"; private volatile Rebalance enableRebalance; private volatile Allocation enableAllocation; - @Inject - public EnableAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public EnableAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { super(settings); - this.enableAllocation = Allocation.parse(settings.get(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.ALL.name())); - this.enableRebalance = Rebalance.parse(settings.get(CLUSTER_ROUTING_REBALANCE_ENABLE, Rebalance.ALL.name())); - nodeSettingsService.addListener(this); + this.enableAllocation = CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.get(settings); + this.enableRebalance = CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.get(settings); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, this::setEnableAllocation); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, this::setEnableRebalance); + } + + public void setEnableRebalance(Rebalance enableRebalance) { + this.enableRebalance = enableRebalance; + } + + public void setEnableAllocation(Allocation enableAllocation) { + this.enableAllocation = enableAllocation; } @Override @@ -148,25 +157,9 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe } } - @Override - public void onRefreshSettings(Settings settings) { - final Allocation enable = Allocation.parse(settings.get(CLUSTER_ROUTING_ALLOCATION_ENABLE, this.enableAllocation.name())); - if (enable != this.enableAllocation) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_ENABLE, this.enableAllocation, enable); - EnableAllocationDecider.this.enableAllocation = enable; - } - - final Rebalance enableRebalance = Rebalance.parse(settings.get(CLUSTER_ROUTING_REBALANCE_ENABLE, this.enableRebalance.name())); - if (enableRebalance != this.enableRebalance) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_REBALANCE_ENABLE, this.enableRebalance, enableRebalance); - EnableAllocationDecider.this.enableRebalance = enableRebalance; - } - - } - /** * Allocation values or rather their string representation to be used used with - * {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE} + * {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE} * via cluster / index settings. */ public enum Allocation { @@ -192,7 +185,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe /** * Rebalance values or rather their string representation to be used used with - * {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE} + * {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE} * via cluster / index settings. */ public enum Rebalance { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index e0e2caaf04a..f9ee7ad8182 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -25,10 +25,9 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; - -import java.util.Map; +import org.elasticsearch.common.settings.ClusterSettingsService; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; @@ -65,36 +64,23 @@ public class FilterAllocationDecider extends AllocationDecider { public static final String INDEX_ROUTING_INCLUDE_GROUP = "index.routing.allocation.include."; public static final String INDEX_ROUTING_EXCLUDE_GROUP = "index.routing.allocation.exclude."; - public static final String CLUSTER_ROUTING_REQUIRE_GROUP = "cluster.routing.allocation.require."; - public static final String CLUSTER_ROUTING_INCLUDE_GROUP = "cluster.routing.allocation.include."; - public static final String CLUSTER_ROUTING_EXCLUDE_GROUP = "cluster.routing.allocation.exclude."; + public static final Setting CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.Cluster); private volatile DiscoveryNodeFilters clusterRequireFilters; private volatile DiscoveryNodeFilters clusterIncludeFilters; private volatile DiscoveryNodeFilters clusterExcludeFilters; @Inject - public FilterAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public FilterAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { super(settings); - Map requireMap = settings.getByPrefix(CLUSTER_ROUTING_REQUIRE_GROUP).getAsMap(); - if (requireMap.isEmpty()) { - clusterRequireFilters = null; - } else { - clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap); - } - Map includeMap = settings.getByPrefix(CLUSTER_ROUTING_INCLUDE_GROUP).getAsMap(); - if (includeMap.isEmpty()) { - clusterIncludeFilters = null; - } else { - clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap); - } - Map excludeMap = settings.getByPrefix(CLUSTER_ROUTING_EXCLUDE_GROUP).getAsMap(); - if (excludeMap.isEmpty()) { - clusterExcludeFilters = null; - } else { - clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap); - } - nodeSettingsService.addListener(new ApplySettings()); + setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.get(settings)); + setClusterExcludeFilters(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.get(settings)); + setClusterIncludeFilters(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.get(settings)); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, this::setClusterRequireFilters); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, this::setClusterExcludeFilters); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, this::setClusterIncludeFilters); } @Override @@ -144,21 +130,13 @@ public class FilterAllocationDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters"); } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - Map requireMap = settings.getByPrefix(CLUSTER_ROUTING_REQUIRE_GROUP).getAsMap(); - if (!requireMap.isEmpty()) { - clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap); - } - Map includeMap = settings.getByPrefix(CLUSTER_ROUTING_INCLUDE_GROUP).getAsMap(); - if (!includeMap.isEmpty()) { - clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap); - } - Map excludeMap = settings.getByPrefix(CLUSTER_ROUTING_EXCLUDE_GROUP).getAsMap(); - if (!excludeMap.isEmpty()) { - clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap); - } - } + private void setClusterRequireFilters(Settings settings) { + clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, settings.getAsMap()); + } + private void setClusterIncludeFilters(Settings settings) { + clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, settings.getAsMap()); + } + private void setClusterExcludeFilters(Settings settings) { + clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, settings.getAsMap()); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index 3d68ed50d27..ecd03d92c70 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -24,16 +24,16 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; /** * This {@link AllocationDecider} limits the number of shards per node on a per * index or node-wide basis. The allocator prevents a single node to hold more * than {@value #INDEX_TOTAL_SHARDS_PER_NODE} per index and - * {@value #CLUSTER_TOTAL_SHARDS_PER_NODE} globally during the allocation + * cluster.routing.allocation.total_shards_per_node globally during the allocation * process. The limits of this decider can be changed in real-time via a the * index settings API. *

@@ -64,26 +64,18 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { * Controls the maximum number of shards per node on a global level. * Negative values are interpreted as unlimited. */ - public static final String CLUSTER_TOTAL_SHARDS_PER_NODE = "cluster.routing.allocation.total_shards_per_node"; + public static final Setting CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, true, Setting.Scope.Cluster); - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - Integer newClusterLimit = settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE, null); - - if (newClusterLimit != null) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_TOTAL_SHARDS_PER_NODE, - ShardsLimitAllocationDecider.this.clusterShardLimit, newClusterLimit); - ShardsLimitAllocationDecider.this.clusterShardLimit = newClusterLimit; - } - } - } @Inject - public ShardsLimitAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public ShardsLimitAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { super(settings); - this.clusterShardLimit = settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE, -1); - nodeSettingsService.addListener(new ApplySettings()); + this.clusterShardLimit = CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.get(settings); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, this::setClusterShardLimit); + } + + private void setClusterShardLimit(int clusterShardLimit) { + this.clusterShardLimit = clusterShardLimit; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index 37b9f9f461b..fa447626e63 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -23,9 +23,11 @@ import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; /** * This {@link org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider} prevents shards that @@ -38,18 +40,7 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { /** * Disables relocation of shards that are currently being snapshotted. */ - public static final String CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED = "cluster.routing.allocation.snapshot.relocation_enabled"; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - boolean newEnableRelocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation); - if (newEnableRelocation != enableRelocation) { - logger.info("updating [{}] from [{}], to [{}]", CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation, newEnableRelocation); - enableRelocation = newEnableRelocation; - } - } - } + public static final Setting CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, true, Setting.Scope.Cluster); private volatile boolean enableRelocation = false; @@ -66,14 +57,18 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { * @param settings {@link org.elasticsearch.common.settings.Settings} to use */ public SnapshotInProgressAllocationDecider(Settings settings) { - this(settings, new NodeSettingsService(settings)); + this(settings, new ClusterSettingsService(settings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); } @Inject - public SnapshotInProgressAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public SnapshotInProgressAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { super(settings); - enableRelocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation); - nodeSettingsService.addListener(new ApplySettings()); + enableRelocation = CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING.get(settings); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, this::setEnableRelocation); + } + + private void setEnableRelocation(boolean enableRelocation) { + this.enableRelocation = enableRelocation; } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index ed6814d83af..543f0bf780b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -21,11 +21,11 @@ package org.elasticsearch.cluster.routing.allocation.decider; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; /** * {@link ThrottlingAllocationDecider} controls the recovery process per node in @@ -47,27 +47,33 @@ import org.elasticsearch.node.settings.NodeSettingsService; */ public class ThrottlingAllocationDecider extends AllocationDecider { - public static final String NAME = "throttling"; - - public static final String CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = "cluster.routing.allocation.node_initial_primaries_recoveries"; - public static final String CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = "cluster.routing.allocation.node_concurrent_recoveries"; - public static final String CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES = "cluster.routing.allocation.concurrent_recoveries"; - public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2; public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4; + public static final String NAME = "throttling"; + public static final String CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES = "cluster.routing.allocation.concurrent_recoveries"; + + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", "_na_", (s) -> s.get(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES,Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)), Integer::parseInt, true, Setting.Scope.Cluster); private volatile int primariesInitialRecoveries; private volatile int concurrentRecoveries; @Inject - public ThrottlingAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public ThrottlingAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { super(settings); - - this.primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES); - this.concurrentRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES, settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)); + this.primariesInitialRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.get(settings); + this.concurrentRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.get(settings); logger.debug("using node_concurrent_recoveries [{}], node_initial_primaries_recoveries [{}]", concurrentRecoveries, primariesInitialRecoveries); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, this::setPrimariesInitialRecoveries); + clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, this::setConcurrentRecoveries); + } - nodeSettingsService.addListener(new ApplySettings()); + private void setConcurrentRecoveries(int concurrentRecoveries) { + this.concurrentRecoveries = concurrentRecoveries; + } + + private void setPrimariesInitialRecoveries(int primariesInitialRecoveries) { + this.primariesInitialRecoveries = primariesInitialRecoveries; } @Override @@ -115,21 +121,4 @@ public class ThrottlingAllocationDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "below shard recovery limit of [%d]", concurrentRecoveries); } } - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - int primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, ThrottlingAllocationDecider.this.primariesInitialRecoveries); - if (primariesInitialRecoveries != ThrottlingAllocationDecider.this.primariesInitialRecoveries) { - logger.info("updating [cluster.routing.allocation.node_initial_primaries_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.primariesInitialRecoveries, primariesInitialRecoveries); - ThrottlingAllocationDecider.this.primariesInitialRecoveries = primariesInitialRecoveries; - } - - int concurrentRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, ThrottlingAllocationDecider.this.concurrentRecoveries); - if (concurrentRecoveries != ThrottlingAllocationDecider.this.concurrentRecoveries) { - logger.info("updating [cluster.routing.allocation.node_concurrent_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.concurrentRecoveries, concurrentRecoveries); - ThrottlingAllocationDecider.this.concurrentRecoveries = concurrentRecoveries; - } - } - } } diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index d4b15861846..56bf400d46b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.transport.TransportAddress; @@ -46,7 +47,7 @@ import org.elasticsearch.common.util.concurrent.*; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryService; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -62,7 +63,7 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF */ public class InternalClusterService extends AbstractLifecycleComponent implements ClusterService { - public static final String SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD = "cluster.service.slow_task_logging_threshold"; + public static final Setting CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.Cluster); public static final String SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL = "cluster.service.reconnect_interval"; public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; @@ -74,7 +75,7 @@ public class InternalClusterService extends AbstractLifecycleComponent> groupSettings = new HashMap<>(); + private final Map> keySettings = new HashMap<>(); + private final Settings defaults; + + public ClusterSettings(Set> settingsSet) { + Settings.Builder builder = Settings.builder(); + for (Setting entry : settingsSet) { + if (entry.getScope() != Setting.Scope.Cluster) { + throw new IllegalArgumentException("Setting must be a cluster setting but was: " + entry.getScope()); + } + if (entry.isGroupSetting()) { + groupSettings.put(entry.getKey(), entry); + } else { + keySettings.put(entry.getKey(), entry); + } + builder.put(entry.getKey(), entry.getDefault(Settings.EMPTY)); + } + this.defaults = builder.build(); + } + + public ClusterSettings() { + this(BUILT_IN_CLUSTER_SETTINGS); + } + + /** + * Returns the {@link Setting} for the given key or null if the setting can not be found. + */ + public Setting get(String key) { + Setting setting = keySettings.get(key); + if (setting == null) { + for (Map.Entry> entry : groupSettings.entrySet()) { + if (entry.getValue().match(key)) { + return entry.getValue(); + } + } + } else { + return setting; + } + return null; + } + + /** + * Returns true if the setting for the given key is dynamically updateable. Otherwise false. + */ + public boolean hasDynamicSetting(String key) { + final Setting setting = get(key); + return setting != null && setting.isDynamic(); + } + + /** + * Returns true if the settings is a logger setting. + */ + public boolean isLoggerSetting(String key) { + return key.startsWith("logger."); + } + + /** + * Returns the cluster settings defaults + */ + public Settings getDefaults() { + return defaults; + } + + public static Set> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, + AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, + BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.THRESHOLD_SETTING, + ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, + ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, + ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, + IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, + IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, + IndicesTTLService.INDICES_TTL_INTERVAL_SETTING, + MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, + MetaData.SETTING_READ_ONLY_SETTING, + RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING, + RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS_SETTING, + RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE_SETTING, + RecoverySettings.INDICES_RECOVERY_COMPRESS_SETTING, + RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING, + RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING, + RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, + RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, + RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, + RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, + ThreadPool.THREADPOOL_GROUP_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, + SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, + DestructiveOperations.REQUIRES_NAME_SETTING, + DiscoverySettings.PUBLISH_TIMEOUT_SETTING, + DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING, + DiscoverySettings.COMMIT_TIMEOUT_SETTING, + DiscoverySettings.NO_MASTER_BLOCK_SETTING, + HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, + InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, + TransportService.TRACE_LOG_EXCLUDE_SETTING, + TransportService.TRACE_LOG_INCLUDE_SETTING, + TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, + ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING))); +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettingsService.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettingsService.java new file mode 100644 index 00000000000..e66844d4e88 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettingsService.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Predicate; + +/** + * A service that allows to register for node settings change that can come from cluster + * events holding new settings. + */ +public final class ClusterSettingsService extends SettingsService { + private final ClusterSettings clusterSettings; + + @Inject + public ClusterSettingsService(Settings settings, ClusterSettings clusterSettings) { + super(settings); + this.clusterSettings = clusterSettings; + } + + protected Setting getSetting(String key) { + return this.clusterSettings.get(key); + } + + public ClusterSettings getClusterSettings() { + return clusterSettings; + } +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java new file mode 100644 index 00000000000..169867ef384 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -0,0 +1,394 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.settings; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.support.ToXContentToBytes; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.MemorySizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.*; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Predicate; + +/** + */ +public class Setting extends ToXContentToBytes { + private final String key; + private final String description; + private final Function defaultValue; + private final Function parser; + private final boolean dynamic; + private final Scope scope; + + public Setting(String key, String description, Function defaultValue, Function parser, boolean dynamic, Scope scope) { + this.key = key; + this.description = description; + this.defaultValue = defaultValue; + this.parser = parser; + this.dynamic = dynamic; + this.scope = scope; + } + + /** + * Returns the settings key or a prefix if this setting is a group setting + * @see #isGroupSetting() + */ + public String getKey() { + return key; + } + + /** + * Returns a human readable description of this setting + */ + public String getDescription() { + return description; + } + + /** + * Returns true iff this setting is dynamically updateable, otherwise false + */ + public boolean isDynamic() { + return dynamic; + } + + /** + * Returns the settings scope + */ + public Scope getScope() { + return scope; + } + + /** + * Returns true iff this setting is a group setting. Group settings represent a set of settings + * rather than a single value. The key, see {@link #getKey()}, in contrast to non-group settings is a prefix like cluster.store. + * that matches all settings with this prefix. + */ + public boolean isGroupSetting() { + return false; + } + + /** + * Returns the default values string representation for this setting. + * @param settings a settings object for settings that has a default value depending on another setting if available + */ + public String getDefault(Settings settings) { + return defaultValue.apply(settings); + } + + /** + * Returns true iff this setting is present in the given settings object. Otherwise false + */ + public boolean exists(Settings settings) { + return settings.get(key) != null; + } + + /** + * Returns the settings value. If the setting is not present in the given settings object the default value is returned + * instead. + */ + public T get(Settings settings) { + String value = getRaw(settings); + try { + return parser.apply(value); + } catch (ElasticsearchParseException ex) { + throw ex; + } catch (Throwable t) { + throw new IllegalArgumentException("Failed to parse value [" + value + "] for setting [" + getKey() + "]", t); + } + } + + /** + * Returns the raw (string) settings value. If the setting is not present in the given settings object the default value is returned + * instead. This is useful if the value can't be parsed due to an invalid value to access the actual value. + */ + public String getRaw(Settings settings) { + return settings.get(key, defaultValue.apply(settings)); + } + + /** + * Returns true iff the given key matches the settings key or if this setting is a group setting if the + * given key is part of the settings group. + * @see #isGroupSetting() + */ + public boolean match(String toTest) { + return Regex.simpleMatch(key, toTest); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("key", key); + builder.field("description", description); + builder.field("type", scope.name()); + builder.field("dynamic", dynamic); + builder.field("default", defaultValue.apply(Settings.EMPTY)); + builder.endObject(); + return builder; + } + + /** + * The settings scope - settings can either be cluster settings or per index settings. + */ + public enum Scope { + Cluster, + Index; + } + + SettingsService.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings) { + return newUpdater(consumer, logger, settings, (s) -> true); + } + + SettingsService.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings, Predicate accept) { + if (isDynamic()) { + return new Updater(consumer, logger, settings, accept); + } else { + throw new IllegalStateException("setting [" + getKey() + "] is not dynamic"); + } + } + + static SettingsService.SettingUpdater compoundUpdater(final BiConsumer consumer, final Setting aSettting, final Setting bSetting, ESLogger logger, Settings settings) { + final AtomicReference aRef = new AtomicReference<>(); + final AtomicReference bRef = new AtomicReference<>(); + final SettingsService.SettingUpdater aSettingUpdater = aSettting.newUpdater(aRef::set, logger, settings); + final SettingsService.SettingUpdater bSettingUpdater = bSetting.newUpdater(bRef::set, logger, settings); + return new SettingsService.SettingUpdater() { + boolean aHasChanged = false; + boolean bHasChanged = false; + @Override + public boolean prepareApply(Settings settings) { + aHasChanged = aSettingUpdater.prepareApply(settings); + bHasChanged = bSettingUpdater.prepareApply(settings); + return aHasChanged || bHasChanged; + } + + @Override + public void apply() { + aSettingUpdater.apply(); + bSettingUpdater.apply(); + if (aHasChanged || bHasChanged) { + consumer.accept(aRef.get(), bRef.get()); + } + } + + @Override + public void rollback() { + try { + aRef.set(null); + aSettingUpdater.rollback(); + } finally { + bRef.set(null); + bSettingUpdater.rollback(); + } + } + + @Override + public String toString() { + return "CompoundUpdater for: " + aSettingUpdater + " and " + bSettingUpdater; + } + }; + } + + + private class Updater implements SettingsService.SettingUpdater { + private final Consumer consumer; + private final ESLogger logger; + private final Predicate accept; + private String value; + private boolean commitPending; + private String pendingValue; + private T valueInstance; + + public Updater(Consumer consumer, ESLogger logger, Settings settings, Predicate accept) { + this.consumer = consumer; + this.logger = logger; + value = getRaw(settings); + this.accept = accept; + } + + + public boolean prepareApply(Settings settings) { + String newValue = settings.get(key); + if (newValue == null) { + newValue = getRaw(settings); + } + if (value.equals(newValue) == false) { + T inst = get(settings); + if (accept.test(inst) == false) { + throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + value + "] to [" + getRaw(settings) + "]"); + } + logger.info("update [{}] from [{}] to [{}]", key, value, getRaw(settings)); + pendingValue = newValue; + valueInstance = inst; + commitPending = true; + + } else { + commitPending = false; + } + return commitPending; + } + + public void apply() { + if (commitPending) { + value = pendingValue; + consumer.accept(valueInstance); + } + commitPending = false; + valueInstance = null; + pendingValue = null; + } + + public void rollback() { + commitPending = false; + valueInstance = null; + pendingValue = null; + } + + @Override + public String toString() { + return "Updater for: " + Setting.this.toString(); + } + } + + + public Setting(String key, String description, String defaultValue, Function parser, boolean dynamic, Scope scope) { + this(key, description, (s) -> defaultValue, parser, dynamic, scope); + } + + public static Setting floatSetting(String key, float defaultValue, boolean dynamic, Scope scope) { + return new Setting<>(key, "_na_", (s) -> Float.toString(defaultValue), Float::parseFloat, dynamic, scope); + } + + public static Setting intSetting(String key, int defaultValue, boolean dynamic, Scope scope) { + return new Setting<>(key, "_na_", (s) -> Integer.toString(defaultValue), Integer::parseInt, dynamic, scope); + } + + public static Setting boolSetting(String key, boolean defaultValue, boolean dynamic, Scope scope) { + return new Setting<>(key, "_na_", (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope); + } + + public static Setting byteSizeSetting(String key, String percentage, boolean dynamic, Scope scope) { + return new Setting<>(key, "_na_", (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope); + } + + public static Setting byteSizeSetting(String key, ByteSizeValue value, boolean dynamic, Scope scope) { + return new Setting<>(key, "_na_", (s) -> value.toString(), (s) -> ByteSizeValue.parseBytesSizeValue(s, key), dynamic, scope); + } + + public static Setting positiveTimeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) { + return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), dynamic, scope); + } + + public static Setting groupSetting(String key, boolean dynamic, Scope scope) { + String prefix = key.endsWith(".") ? key : key + "."; + return new Setting(key, "_na_", "", (s) -> null, dynamic, scope) { + + @Override + public boolean isGroupSetting() { + return true; + } + + @Override + public Settings get(Settings settings) { + return settings.getByPrefix(prefix); + } + + @Override + public boolean match(String toTest) { + return Regex.simpleMatch(prefix + "*", toTest); + } + + @Override + public SettingsService.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings, Predicate accept) { + if (isDynamic() == false) { + throw new IllegalStateException("setting [" + getKey() + "] is not dynamic"); + } + final Setting setting = this; + return new SettingsService.SettingUpdater() { + private Settings pendingSettings; + private Settings committedSettings = get(settings); + + @Override + public boolean prepareApply(Settings settings) { + Settings currentSettings = get(settings); + if (currentSettings.equals(committedSettings) == false) { + if (accept.test(currentSettings) == false) { + throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + committedSettings.getAsMap() + "] to [" + currentSettings.getAsMap() + "]"); + } + pendingSettings = currentSettings; + return true; + } else { + return false; + } + } + + @Override + public void apply() { + if (pendingSettings != null) { + consumer.accept(pendingSettings); + committedSettings = pendingSettings; + } + pendingSettings = null; + } + + @Override + public void rollback() { + pendingSettings = null; + } + + @Override + public String toString() { + return "Updater for: " + setting.toString(); + } + }; + } + }; + } + + public static Setting timeSetting(String key, Function defaultValue, TimeValue minValue, boolean dynamic, Scope scope) { + return new Setting<>(key, "_na_", defaultValue, (s) -> { + TimeValue timeValue = TimeValue.parseTimeValue(s, null, key); + if (timeValue.millis() < minValue.millis()) { + throw new ElasticsearchParseException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + return timeValue; + }, dynamic, scope); + } + + public static Setting timeSetting(String key, TimeValue defaultValue, TimeValue minValue, boolean dynamic, Scope scope) { + return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, dynamic, scope); + } + + public static Setting timeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) { + return new Setting<>(key, "_na_", (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, defaultValue, key), dynamic, scope); + } + + public static Setting nonNegativeDouble(String key, double defaultValue, boolean dynamic, Scope scope) { + return new Setting<>(key, "_na_", (s) -> Double.toString(defaultValue), Double::parseDouble, dynamic, scope); + } + +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index 5e083a9e740..aae4cb2b54d 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -614,6 +614,9 @@ public final class Settings implements ToXContent { if (settingPrefix.charAt(settingPrefix.length() - 1) != '.') { settingPrefix = settingPrefix + "."; } + return getGroupsInternal(settingPrefix, ignoreNonGrouped); + } + private Map getGroupsInternal(String settingPrefix, boolean ignoreNonGrouped) throws SettingsException { // we don't really care that it might happen twice Map> map = new LinkedHashMap<>(); for (Object o : settings.keySet()) { @@ -643,6 +646,12 @@ public final class Settings implements ToXContent { } return Collections.unmodifiableMap(retVal); } + /** + * Returns group settings for the given setting prefix. + */ + public Map getAsGroups() throws SettingsException { + return getGroupsInternal("", false); + } /** * Returns a parsed version. diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsService.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsService.java new file mode 100644 index 00000000000..b1a1319e800 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsService.java @@ -0,0 +1,201 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Predicate; + +/** + * A basic setting service that can be used for per-index and per-cluster settings. + * This service offers transactional application of updates settings. + */ +public abstract class SettingsService extends AbstractComponent { + private Settings lastSettingsApplied; + private final List settingUpdaters = new ArrayList<>(); + + protected SettingsService(Settings settings) { + super(settings); + } + + /** + * Applies the given settings to all listeners and rolls back the result after application. This + * method will not change any settings but will fail if any of the settings can't be applied. + */ + public synchronized Settings dryRun(Settings settings) { + final Settings build = Settings.builder().put(this.settings).put(settings).build(); + try { + List exceptions = new ArrayList<>(); + for (SettingUpdater settingUpdater : settingUpdaters) { + try { + settingUpdater.prepareApply(build); + } catch (RuntimeException ex) { + exceptions.add(ex); + logger.debug("failed to prepareCommit settings for [{}]", ex, settingUpdater); + } + } + // here we are exhaustive and record all settings that failed. + ExceptionsHelper.rethrowAndSuppress(exceptions); + } finally { + for (SettingUpdater settingUpdater : settingUpdaters) { + try { + settingUpdater.rollback(); + } catch (Exception e) { + logger.warn("failed to rollback settings for [{}]", e, settingUpdater); + } + } + } + return build; + } + + /** + * Applies the given settings to all the settings consumers or to none of them. The settings + * will be merged with the node settings before they are applied while given settings override existing node + * settings. + * @param settings the settings to apply + * @return the unmerged applied settings + */ + public synchronized Settings applySettings(Settings settings) { + if (lastSettingsApplied != null && settings.equals(lastSettingsApplied)) { + // nothing changed in the settings, ignore + return settings; + } + final Settings build = Settings.builder().put(this.settings).put(settings).build(); + boolean success = false; + try { + for (SettingUpdater settingUpdater : settingUpdaters) { + try { + settingUpdater.prepareApply(build); + } catch (Exception ex) { + logger.warn("failed to prepareCommit settings for [{}]", ex, settingUpdater); + throw ex; + } + } + for (SettingUpdater settingUpdater : settingUpdaters) { + settingUpdater.apply(); + } + success = true; + } catch (Exception ex) { + logger.warn("failed to apply settings", ex); + throw ex; + } finally { + if (success == false) { + for (SettingUpdater settingUpdater : settingUpdaters) { + try { + settingUpdater.rollback(); + } catch (Exception e) { + logger.warn("failed to refresh settings for [{}]", e, settingUpdater); + } + } + } + } + + try { + for (Map.Entry entry : settings.getAsMap().entrySet()) { + if (entry.getKey().startsWith("logger.")) { + String component = entry.getKey().substring("logger.".length()); + if ("_root".equals(component)) { + ESLoggerFactory.getRootLogger().setLevel(entry.getValue()); + } else { + ESLoggerFactory.getLogger(component).setLevel(entry.getValue()); + } + } + } + } catch (Exception e) { + logger.warn("failed to refresh settings for [{}]", e, "logger"); + } + + return lastSettingsApplied = settings; + } + + /** + * Adds a settings consumer with a predicate that is only evaluated at update time. + *

+ * Note: Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically. + *

+ */ + public synchronized void addSettingsUpdateConsumer(Setting setting, Consumer consumer, Predicate predicate) { + if (setting != getSetting(setting.getKey())) { + throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]"); + } + this.settingUpdaters.add(setting.newUpdater(consumer, logger, settings, predicate)); + } + + /** + * Adds a settings consumer that accepts the values for two settings. The consumer if only notified if one or both settings change. + *

+ * Note: Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically. + *

+ */ + public synchronized void addSettingsUpdateConsumer(Setting
a, Setting b, BiConsumer consumer) { + if (a != getSetting(a.getKey())) { + throw new IllegalArgumentException("Setting is not registered for key [" + a.getKey() + "]"); + } + if (b != getSetting(b.getKey())) { + throw new IllegalArgumentException("Setting is not registered for key [" + b.getKey() + "]"); + } + this.settingUpdaters.add(Setting.compoundUpdater(consumer, a, b, logger, settings)); + } + + /** + * Adds a settings consumer. + *

+ * Note: Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically. + *

+ */ + public synchronized void addSettingsUpdateConsumer(Setting setting, Consumer consumer) { + addSettingsUpdateConsumer(setting, consumer, (s) -> true); + } + + protected abstract Setting getSetting(String key); + + /** + * Transactional interface to update settings. + * @see Setting + */ + public interface SettingUpdater { + /** + * Prepares applying the given settings to this updater. All the heavy lifting like parsing and validation + * happens in this method. Yet the actual setting should not be changed by this call. + * @param settings the settings to apply + * @return true if this updater will update a setting on calling {@link #apply()} otherwise false + */ + boolean prepareApply(Settings settings); + + /** + * Applies the settings passed to {@link #prepareApply(Settings)} + */ + void apply(); + + /** + * Rolls back to the state before {@link #prepareApply(Settings)} was called. All internal prepared state is cleared after this call. + */ + void rollback(); + } + +} diff --git a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java index ee6371605ee..fb44c7dc9a5 100644 --- a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java +++ b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java @@ -229,6 +229,30 @@ public class TimeValue implements Streamable { return Strings.format1Decimals(value, suffix); } + public String getStringRep() { + if (duration < 0) { + return Long.toString(duration); + } + switch (timeUnit) { + case NANOSECONDS: + return Strings.format1Decimals(duration, "nanos"); + case MICROSECONDS: + return Strings.format1Decimals(duration, "micros"); + case MILLISECONDS: + return Strings.format1Decimals(duration, "ms"); + case SECONDS: + return Strings.format1Decimals(duration, "s"); + case MINUTES: + return Strings.format1Decimals(duration, "m"); + case HOURS: + return Strings.format1Decimals(duration, "h"); + case DAYS: + return Strings.format1Decimals(duration, "d"); + default: + throw new IllegalArgumentException("unknown time unit: " + timeUnit.name()); + } + } + public static TimeValue parseTimeValue(String sValue, TimeValue defaultValue, String settingName) { settingName = Objects.requireNonNull(settingName); assert settingName.startsWith("index.") == false || MetaDataIndexUpgradeService.INDEX_TIME_SETTINGS.contains(settingName) : settingName; diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java index 20f2c96b120..829a173060d 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java @@ -23,9 +23,10 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.rest.RestStatus; import java.util.EnumSet; @@ -39,38 +40,38 @@ public class DiscoverySettings extends AbstractComponent { * sets the timeout for a complete publishing cycle, including both sending and committing. the master * will continute to process the next cluster state update after this time has elapsed **/ - public static final String PUBLISH_TIMEOUT = "discovery.zen.publish_timeout"; + public static final Setting PUBLISH_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.Cluster); /** * sets the timeout for receiving enough acks for a specific cluster state and committing it. failing * to receive responses within this window will cause the cluster state change to be rejected. */ - public static final String COMMIT_TIMEOUT = "discovery.zen.commit_timeout"; - public static final String NO_MASTER_BLOCK = "discovery.zen.no_master_block"; - public static final String PUBLISH_DIFF_ENABLE = "discovery.zen.publish_diff.enable"; + public static final Setting COMMIT_TIMEOUT_SETTING = new Setting<>("discovery.zen.commit_timeout", "_na_", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), true, Setting.Scope.Cluster); + public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>("discovery.zen.no_master_block", "_na_", "write", DiscoverySettings::parseNoMasterBlock, true, Setting.Scope.Cluster); + public static final Setting PUBLISH_DIFF_ENABLE_SETTING = Setting.boolSetting("discovery.zen.publish_diff.enable", true, true, Setting.Scope.Cluster); - public static final TimeValue DEFAULT_PUBLISH_TIMEOUT = TimeValue.timeValueSeconds(30); - public static final TimeValue DEFAULT_COMMIT_TIMEOUT = TimeValue.timeValueSeconds(30); - public static final String DEFAULT_NO_MASTER_BLOCK = "write"; public final static int NO_MASTER_BLOCK_ID = 2; - public final static boolean DEFAULT_PUBLISH_DIFF_ENABLE = true; public final static ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); public final static ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); private volatile ClusterBlock noMasterBlock; private volatile TimeValue publishTimeout; + private volatile TimeValue commitTimeout; private volatile boolean publishDiff; @Inject - public DiscoverySettings(Settings settings, NodeSettingsService nodeSettingsService) { + public DiscoverySettings(Settings settings, ClusterSettingsService clusterSettingsService) { super(settings); - nodeSettingsService.addListener(new ApplySettings()); - this.noMasterBlock = parseNoMasterBlock(settings.get(NO_MASTER_BLOCK, DEFAULT_NO_MASTER_BLOCK)); - this.publishTimeout = settings.getAsTime(PUBLISH_TIMEOUT, DEFAULT_PUBLISH_TIMEOUT); - this.commitTimeout = settings.getAsTime(COMMIT_TIMEOUT, new TimeValue(Math.min(DEFAULT_COMMIT_TIMEOUT.millis(), publishTimeout.millis()))); - this.publishDiff = settings.getAsBoolean(PUBLISH_DIFF_ENABLE, DEFAULT_PUBLISH_DIFF_ENABLE); + clusterSettingsService.addSettingsUpdateConsumer(NO_MASTER_BLOCK_SETTING, this::setNoMasterBlock); + clusterSettingsService.addSettingsUpdateConsumer(PUBLISH_DIFF_ENABLE_SETTING, this::setPublishDiff); + clusterSettingsService.addSettingsUpdateConsumer(COMMIT_TIMEOUT_SETTING, this::setCommitTimeout); + clusterSettingsService.addSettingsUpdateConsumer(PUBLISH_TIMEOUT_SETTING, this::setPublishTimeout); + this.noMasterBlock = NO_MASTER_BLOCK_SETTING.get(settings); + this.publishTimeout = PUBLISH_TIMEOUT_SETTING.get(settings); + this.commitTimeout = COMMIT_TIMEOUT_SETTING.get(settings); + this.publishDiff = PUBLISH_DIFF_ENABLE_SETTING.get(settings); } /** @@ -88,47 +89,25 @@ public class DiscoverySettings extends AbstractComponent { return noMasterBlock; } - public boolean getPublishDiff() { return publishDiff;} - - private class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - TimeValue newPublishTimeout = settings.getAsTime(PUBLISH_TIMEOUT, null); - if (newPublishTimeout != null) { - if (newPublishTimeout.millis() != publishTimeout.millis()) { - logger.info("updating [{}] from [{}] to [{}]", PUBLISH_TIMEOUT, publishTimeout, newPublishTimeout); - publishTimeout = newPublishTimeout; - if (settings.getAsTime(COMMIT_TIMEOUT, null) == null && commitTimeout.millis() > publishTimeout.millis()) { - logger.info("reducing default [{}] to [{}] due to publish timeout change", COMMIT_TIMEOUT, publishTimeout); - commitTimeout = publishTimeout; - } - } - } - TimeValue newCommitTimeout = settings.getAsTime(COMMIT_TIMEOUT, null); - if (newCommitTimeout != null) { - if (newCommitTimeout.millis() != commitTimeout.millis()) { - logger.info("updating [{}] from [{}] to [{}]", COMMIT_TIMEOUT, commitTimeout, newCommitTimeout); - commitTimeout = newCommitTimeout; - } - } - String newNoMasterBlockValue = settings.get(NO_MASTER_BLOCK); - if (newNoMasterBlockValue != null) { - ClusterBlock newNoMasterBlock = parseNoMasterBlock(newNoMasterBlockValue); - if (newNoMasterBlock != noMasterBlock) { - noMasterBlock = newNoMasterBlock; - } - } - Boolean newPublishDiff = settings.getAsBoolean(PUBLISH_DIFF_ENABLE, null); - if (newPublishDiff != null) { - if (newPublishDiff != publishDiff) { - logger.info("updating [{}] from [{}] to [{}]", PUBLISH_DIFF_ENABLE, publishDiff, newPublishDiff); - publishDiff = newPublishDiff; - } - } - } + private void setNoMasterBlock(ClusterBlock noMasterBlock) { + this.noMasterBlock = noMasterBlock; } - private ClusterBlock parseNoMasterBlock(String value) { + private void setPublishDiff(boolean publishDiff) { + this.publishDiff = publishDiff; + } + + private void setPublishTimeout(TimeValue publishTimeout) { + this.publishTimeout = publishTimeout; + } + + private void setCommitTimeout(TimeValue commitTimeout) { + this.commitTimeout = commitTimeout; + } + + public boolean getPublishDiff() { return publishDiff;} + + private static ClusterBlock parseNoMasterBlock(String value) { switch (value) { case "all": return NO_MASTER_BLOCK_ALL; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 03111d141ef..7333618aef6 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; @@ -55,7 +56,7 @@ import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; @@ -74,7 +75,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; */ public class ZenDiscovery extends AbstractLifecycleComponent implements Discovery, PingContextProvider { - public final static String SETTING_REJOIN_ON_MASTER_GONE = "discovery.zen.rejoin_on_master_gone"; + public final static Setting REJOIN_ON_MASTER_GONE_SETTING = Setting.boolSetting("discovery.zen.rejoin_on_master_gone", true, true, Setting.Scope.Cluster); public final static String SETTING_PING_TIMEOUT = "discovery.zen.ping_timeout"; public final static String SETTING_JOIN_TIMEOUT = "discovery.zen.join_timeout"; public final static String SETTING_JOIN_RETRY_ATTEMPTS = "discovery.zen.join_retry_attempts"; @@ -139,7 +140,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen @Inject public ZenDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, - TransportService transportService, final ClusterService clusterService, NodeSettingsService nodeSettingsService, + TransportService transportService, final ClusterService clusterService, ClusterSettingsService clusterSettingsService, ZenPingService pingService, ElectMasterService electMasterService, DiscoverySettings discoverySettings) { super(settings); @@ -160,7 +161,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen this.masterElectionFilterClientNodes = settings.getAsBoolean(SETTING_MASTER_ELECTION_FILTER_CLIENT, true); this.masterElectionFilterDataNodes = settings.getAsBoolean(SETTING_MASTER_ELECTION_FILTER_DATA, false); this.masterElectionWaitForJoinsTimeout = settings.getAsTime(SETTING_MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT, TimeValue.timeValueMillis(joinTimeout.millis() / 2)); - this.rejoinOnMasterGone = settings.getAsBoolean(SETTING_REJOIN_ON_MASTER_GONE, true); + this.rejoinOnMasterGone = REJOIN_ON_MASTER_GONE_SETTING.get(settings); if (this.joinRetryAttempts < 1) { throw new IllegalArgumentException("'" + SETTING_JOIN_RETRY_ATTEMPTS + "' must be a positive number. got [" + SETTING_JOIN_RETRY_ATTEMPTS + "]"); @@ -171,7 +172,15 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettingsService.addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, this::handleMinimumMasterNodesChanged, (value) -> { + final ClusterState clusterState = clusterService.state(); + int masterNodes = clusterState.nodes().masterNodes().size(); + if (value > masterNodes) { + throw new IllegalArgumentException("cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current master nodes count [" + masterNodes + "]"); + } + return true; + }); + clusterSettingsService.addSettingsUpdateConsumer(REJOIN_ON_MASTER_GONE_SETTING, this::setRejoingOnMasterGone); this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterName, clusterService); this.masterFD.addListener(new MasterNodeFailureListener()); @@ -306,6 +315,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen return clusterJoinsCounter.get() > 0; } + private void setRejoingOnMasterGone(boolean rejoin) { + this.rejoinOnMasterGone = rejoin; + } + /** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */ @@ -1139,26 +1152,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - int minimumMasterNodes = settings.getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, - ZenDiscovery.this.electMaster.minimumMasterNodes()); - if (minimumMasterNodes != ZenDiscovery.this.electMaster.minimumMasterNodes()) { - logger.info("updating {} from [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, - ZenDiscovery.this.electMaster.minimumMasterNodes(), minimumMasterNodes); - handleMinimumMasterNodesChanged(minimumMasterNodes); - } - - boolean rejoinOnMasterGone = settings.getAsBoolean(SETTING_REJOIN_ON_MASTER_GONE, ZenDiscovery.this.rejoinOnMasterGone); - if (rejoinOnMasterGone != ZenDiscovery.this.rejoinOnMasterGone) { - logger.info("updating {} from [{}] to [{}]", SETTING_REJOIN_ON_MASTER_GONE, ZenDiscovery.this.rejoinOnMasterGone, rejoinOnMasterGone); - ZenDiscovery.this.rejoinOnMasterGone = rejoinOnMasterGone; - } - } - } - - /** * All control of the join thread should happen under the cluster state update task thread. * This is important to make sure that the background joining process is always in sync with any cluster state updates diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java index 9164a85388a..3ba338b4070 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java @@ -22,11 +22,10 @@ package org.elasticsearch.discovery.zen.elect; import com.carrotsearch.hppc.ObjectContainer; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -41,23 +40,7 @@ import java.util.List; */ public class ElectMasterService extends AbstractComponent { - public static final String DISCOVERY_ZEN_MINIMUM_MASTER_NODES = "discovery.zen.minimum_master_nodes"; - public static final Validator DISCOVERY_ZEN_MINIMUM_MASTER_NODES_VALIDATOR = new Validator() { - @Override - public String validate(String setting, String value, ClusterState clusterState) { - int intValue; - try { - intValue = Integer.parseInt(value); - } catch (NumberFormatException ex) { - return "cannot parse value [" + value + "] as an integer"; - } - int masterNodes = clusterState.nodes().masterNodes().size(); - if (intValue > masterNodes) { - return "cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES + " to more than the current master nodes count [" + masterNodes + "]"; - } - return null; - } - }; + public static final Setting DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = Setting.intSetting("discovery.zen.minimum_master_nodes", -1, true, Setting.Scope.Cluster); // This is the minimum version a master needs to be on, otherwise it gets ignored // This is based on the minimum compatible version of the current version this node is on @@ -70,7 +53,7 @@ public class ElectMasterService extends AbstractComponent { public ElectMasterService(Settings settings, Version version) { super(settings); this.minMasterVersion = version.minimumCompatibilityVersion(); - this.minimumMasterNodes = settings.getAsInt(DISCOVERY_ZEN_MINIMUM_MASTER_NODES, -1); + this.minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings); logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes); } diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java index e83ec695a96..5e410fb6d53 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -227,7 +227,7 @@ public class GatewayService extends AbstractLifecycleComponent i // automatically generate a UID for the metadata if we need to metaDataBuilder.generateClusterUuidIfNeeded(); - if (recoveredState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || currentState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false)) { + if (MetaData.SETTING_READ_ONLY_SETTING.get(recoveredState.metaData().settings()) || MetaData.SETTING_READ_ONLY_SETTING.get(currentState.metaData().settings())) { blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); } diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java index 1bd023abdb0..48f24613f6f 100644 --- a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java +++ b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java @@ -21,37 +21,36 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.StoreRateLimiting; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.node.settings.NodeSettingsService; /** * IndexStoreConfig encapsulates node / cluster level configuration for index level {@link IndexStore} instances. * For instance it maintains the node level rate limiter configuration: updates to the cluster that disable or enable - * {@value #INDICES_STORE_THROTTLE_TYPE} or {@value #INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC} are reflected immediately + * indices.store.throttle.type or indices.store.throttle.max_bytes_per_sec are reflected immediately * on all referencing {@link IndexStore} instances */ -public class IndexStoreConfig implements NodeSettingsService.Listener { +public class IndexStoreConfig{ /** * Configures the node / cluster level throttle type. See {@link StoreRateLimiting.Type}. */ - public static final String INDICES_STORE_THROTTLE_TYPE = "indices.store.throttle.type"; + public static final Setting INDICES_STORE_THROTTLE_TYPE_SETTING = new Setting<>("indices.store.throttle.type", "_na_", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, true, Setting.Scope.Cluster); /** * Configures the node / cluster level throttle intensity. The default is 10240 MB */ - public static final String INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC = "indices.store.throttle.max_bytes_per_sec"; - private volatile String rateLimitingType; + public static final Setting INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.Cluster); + private volatile StoreRateLimiting.Type rateLimitingType; private volatile ByteSizeValue rateLimitingThrottle; private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); private final ESLogger logger; public IndexStoreConfig(Settings settings) { logger = Loggers.getLogger(IndexStoreConfig.class, settings); // we don't limit by default (we default to CMS's auto throttle instead): - this.rateLimitingType = settings.get("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name()); + this.rateLimitingType = INDICES_STORE_THROTTLE_TYPE_SETTING.get(settings); rateLimiting.setType(rateLimitingType); - this.rateLimitingThrottle = settings.getAsBytesSize("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0)); + this.rateLimitingThrottle = INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.get(settings); rateLimiting.setMaxRate(rateLimitingThrottle); logger.debug("using indices.store.throttle.type [{}], with index.store.throttle.max_bytes_per_sec [{}]", rateLimitingType, rateLimitingThrottle); } @@ -63,22 +62,12 @@ public class IndexStoreConfig implements NodeSettingsService.Listener { return rateLimiting; } - @Override - public void onRefreshSettings(Settings settings) { - String rateLimitingType = settings.get(INDICES_STORE_THROTTLE_TYPE, this.rateLimitingType); - // try and parse the type - StoreRateLimiting.Type.fromString(rateLimitingType); - if (!rateLimitingType.equals(this.rateLimitingType)) { - logger.info("updating indices.store.throttle.type from [{}] to [{}]", this.rateLimitingType, rateLimitingType); - this.rateLimitingType = rateLimitingType; - this.rateLimiting.setType(rateLimitingType); - } + public void setRateLimitingType(StoreRateLimiting.Type rateLimitingType) { + this.rateLimitingType = rateLimitingType; + rateLimiting.setType(rateLimitingType); + } - ByteSizeValue rateLimitingThrottle = settings.getAsBytesSize(INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, this.rateLimitingThrottle); - if (!rateLimitingThrottle.equals(this.rateLimitingThrottle)) { - logger.info("updating indices.store.throttle.max_bytes_per_sec from [{}] to [{}], note, type is [{}]", this.rateLimitingThrottle, rateLimitingThrottle, this.rateLimitingType); - this.rateLimitingThrottle = rateLimitingThrottle; - this.rateLimiting.setMaxRate(rateLimitingThrottle); - } + public void setRateLimitingThrottle(ByteSizeValue rateLimitingThrottle) { + this.rateLimitingThrottle = rateLimitingThrottle; } } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index dead72aee8b..ad98e3e9a8f 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -58,7 +58,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.plugins.PluginsService; import java.io.IOException; @@ -100,9 +100,9 @@ public class IndicesService extends AbstractLifecycleComponent i @Inject public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvironment nodeEnv, - NodeSettingsService nodeSettingsService, AnalysisRegistry analysisRegistry, - IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver, - ClusterService clusterService, MapperRegistry mapperRegistry) { + ClusterSettingsService clusterSettingsService, AnalysisRegistry analysisRegistry, + IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver, + ClusterService clusterService, MapperRegistry mapperRegistry) { super(settings); this.pluginsService = pluginsService; this.nodeEnv = nodeEnv; @@ -113,7 +113,9 @@ public class IndicesService extends AbstractLifecycleComponent i this.clusterService = clusterService; this.indexNameExpressionResolver = indexNameExpressionResolver; this.mapperRegistry = mapperRegistry; - nodeSettingsService.addListener(indexStoreConfig); + clusterSettingsService.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, indexStoreConfig::setRateLimitingType); + clusterSettingsService.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, indexStoreConfig::setRateLimitingThrottle); + } @Override diff --git a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 33f3c127d67..e3837fb391c 100644 --- a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -25,9 +25,10 @@ import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import java.util.ArrayList; import java.util.List; @@ -45,23 +46,15 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { private final ConcurrentMap breakers = new ConcurrentHashMap(); - // Old pre-1.4.0 backwards compatible settings - public static final String OLD_CIRCUIT_BREAKER_MAX_BYTES_SETTING = "indices.fielddata.breaker.limit"; - public static final String OLD_CIRCUIT_BREAKER_OVERHEAD_SETTING = "indices.fielddata.breaker.overhead"; + public static final Setting TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.total.limit", "70%", true, Setting.Scope.Cluster); - public static final String TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = "indices.breaker.total.limit"; - public static final String DEFAULT_TOTAL_CIRCUIT_BREAKER_LIMIT = "70%"; - - public static final String FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = "indices.breaker.fielddata.limit"; - public static final String FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = "indices.breaker.fielddata.overhead"; + public static final Setting FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", true, Setting.Scope.Cluster); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.nonNegativeDouble("indices.breaker.fielddata.overhead", 1.03d, true, Setting.Scope.Cluster); public static final String FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = "indices.breaker.fielddata.type"; - public static final String DEFAULT_FIELDDATA_BREAKER_LIMIT = "60%"; - public static final double DEFAULT_FIELDDATA_OVERHEAD_CONSTANT = 1.03; - public static final String REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = "indices.breaker.request.limit"; - public static final String REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = "indices.breaker.request.overhead"; + public static final Setting REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.request.limit", "40%", true, Setting.Scope.Cluster); + public static final Setting REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.nonNegativeDouble("indices.breaker.request.overhead", 1.0d, true, Setting.Scope.Cluster); public static final String REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = "indices.breaker.request.type"; - public static final String DEFAULT_REQUEST_BREAKER_LIMIT = "40%"; public static final String DEFAULT_BREAKER_TYPE = "memory"; @@ -73,41 +66,21 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { private final AtomicLong parentTripCount = new AtomicLong(0); @Inject - public HierarchyCircuitBreakerService(Settings settings, NodeSettingsService nodeSettingsService) { + public HierarchyCircuitBreakerService(Settings settings, ClusterSettingsService clusterSettingsService) { super(settings); - - // This uses the old InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING - // setting to keep backwards compatibility with 1.3, it can be safely - // removed when compatibility with 1.3 is no longer needed - String compatibilityFielddataLimitDefault = DEFAULT_FIELDDATA_BREAKER_LIMIT; - ByteSizeValue compatibilityFielddataLimit = settings.getAsMemory(OLD_CIRCUIT_BREAKER_MAX_BYTES_SETTING, null); - if (compatibilityFielddataLimit != null) { - compatibilityFielddataLimitDefault = compatibilityFielddataLimit.toString(); - } - - // This uses the old InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING - // setting to keep backwards compatibility with 1.3, it can be safely - // removed when compatibility with 1.3 is no longer needed - double compatibilityFielddataOverheadDefault = DEFAULT_FIELDDATA_OVERHEAD_CONSTANT; - Double compatibilityFielddataOverhead = settings.getAsDouble(OLD_CIRCUIT_BREAKER_OVERHEAD_SETTING, null); - if (compatibilityFielddataOverhead != null) { - compatibilityFielddataOverheadDefault = compatibilityFielddataOverhead; - } - this.fielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, - settings.getAsMemory(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, compatibilityFielddataLimitDefault).bytes(), - settings.getAsDouble(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, compatibilityFielddataOverheadDefault), + FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(), + FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), CircuitBreaker.Type.parseValue(settings.get(FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, DEFAULT_BREAKER_TYPE)) ); this.requestSettings = new BreakerSettings(CircuitBreaker.REQUEST, - settings.getAsMemory(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, DEFAULT_REQUEST_BREAKER_LIMIT).bytes(), - settings.getAsDouble(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0), + REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(), + REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), CircuitBreaker.Type.parseValue(settings.get(REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, DEFAULT_BREAKER_TYPE)) ); - this.parentSettings = new BreakerSettings(CircuitBreaker.PARENT, - settings.getAsMemory(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, DEFAULT_TOTAL_CIRCUIT_BREAKER_LIMIT).bytes(), 1.0, CircuitBreaker.Type.PARENT); + this.parentSettings = new BreakerSettings(CircuitBreaker.PARENT, TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(), 1.0, CircuitBreaker.Type.PARENT); if (logger.isTraceEnabled()) { logger.trace("parent circuit breaker with settings {}", this.parentSettings); } @@ -115,52 +88,41 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { registerBreaker(this.requestSettings); registerBreaker(this.fielddataSettings); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettingsService.addSettingsUpdateConsumer(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, this::setTotalCircuitBreakerLimit, this::validateTotalCircuitBreakerLimit); + clusterSettingsService.addSettingsUpdateConsumer(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, this::setFieldDataBreakerLimit); + clusterSettingsService.addSettingsUpdateConsumer(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, this::setRequestBreakerLimit); + } + private void setRequestBreakerLimit(ByteSizeValue newRequestMax, Double newRequestOverhead) { + long newRequestLimitBytes = newRequestMax == null ? HierarchyCircuitBreakerService.this.requestSettings.getLimit() : newRequestMax.bytes(); + newRequestOverhead = newRequestOverhead == null ? HierarchyCircuitBreakerService.this.requestSettings.getOverhead() : newRequestOverhead; + + BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestLimitBytes, newRequestOverhead, + HierarchyCircuitBreakerService.this.requestSettings.getType()); + registerBreaker(newRequestSettings); + HierarchyCircuitBreakerService.this.requestSettings = newRequestSettings; + logger.info("Updated breaker settings request: {}", newRequestSettings); } - public class ApplySettings implements NodeSettingsService.Listener { + private void setFieldDataBreakerLimit(ByteSizeValue newFielddataMax, Double newFielddataOverhead) { + long newFielddataLimitBytes = newFielddataMax == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getLimit() : newFielddataMax.bytes(); + newFielddataOverhead = newFielddataOverhead == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getOverhead() : newFielddataOverhead; + BreakerSettings newFielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, newFielddataLimitBytes, newFielddataOverhead, + HierarchyCircuitBreakerService.this.fielddataSettings.getType()); + registerBreaker(newFielddataSettings); + HierarchyCircuitBreakerService.this.fielddataSettings = newFielddataSettings; + logger.info("Updated breaker settings field data: {}", newFielddataSettings); - @Override - public void onRefreshSettings(Settings settings) { + } - // Fielddata settings - ByteSizeValue newFielddataMax = settings.getAsMemory(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, null); - Double newFielddataOverhead = settings.getAsDouble(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, null); - if (newFielddataMax != null || newFielddataOverhead != null) { - long newFielddataLimitBytes = newFielddataMax == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getLimit() : newFielddataMax.bytes(); - newFielddataOverhead = newFielddataOverhead == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getOverhead() : newFielddataOverhead; + private boolean validateTotalCircuitBreakerLimit(ByteSizeValue byteSizeValue) { + BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.bytes(), 1.0, CircuitBreaker.Type.PARENT); + validateSettings(new BreakerSettings[]{newParentSettings}); + return true; + } - BreakerSettings newFielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, newFielddataLimitBytes, newFielddataOverhead, - HierarchyCircuitBreakerService.this.fielddataSettings.getType()); - registerBreaker(newFielddataSettings); - HierarchyCircuitBreakerService.this.fielddataSettings = newFielddataSettings; - logger.info("Updated breaker settings fielddata: {}", newFielddataSettings); - } - - // Request settings - ByteSizeValue newRequestMax = settings.getAsMemory(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, null); - Double newRequestOverhead = settings.getAsDouble(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, null); - if (newRequestMax != null || newRequestOverhead != null) { - long newRequestLimitBytes = newRequestMax == null ? HierarchyCircuitBreakerService.this.requestSettings.getLimit() : newRequestMax.bytes(); - newRequestOverhead = newRequestOverhead == null ? HierarchyCircuitBreakerService.this.requestSettings.getOverhead() : newRequestOverhead; - - BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestLimitBytes, newRequestOverhead, - HierarchyCircuitBreakerService.this.requestSettings.getType()); - registerBreaker(newRequestSettings); - HierarchyCircuitBreakerService.this.requestSettings = newRequestSettings; - logger.info("Updated breaker settings request: {}", newRequestSettings); - } - - // Parent settings - long oldParentMax = HierarchyCircuitBreakerService.this.parentSettings.getLimit(); - ByteSizeValue newParentMax = settings.getAsMemory(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, null); - if (newParentMax != null && (newParentMax.bytes() != oldParentMax)) { - BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, newParentMax.bytes(), 1.0, CircuitBreaker.Type.PARENT); - validateSettings(new BreakerSettings[]{newParentSettings}); - HierarchyCircuitBreakerService.this.parentSettings = newParentSettings; - logger.info("Updated breaker settings parent: {}", newParentSettings); - } - } + private void setTotalCircuitBreakerLimit(ByteSizeValue byteSizeValue) { + BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.bytes(), 1.0, CircuitBreaker.Type.PARENT); + this.parentSettings = newParentSettings; } /** diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 749ba4f3360..eb1c643038f 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -23,16 +23,16 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; -import java.util.Objects; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -40,38 +40,37 @@ import java.util.concurrent.TimeUnit; */ public class RecoverySettings extends AbstractComponent implements Closeable { - public static final String INDICES_RECOVERY_FILE_CHUNK_SIZE = "indices.recovery.file_chunk_size"; - public static final String INDICES_RECOVERY_TRANSLOG_OPS = "indices.recovery.translog_ops"; - public static final String INDICES_RECOVERY_TRANSLOG_SIZE = "indices.recovery.translog_size"; - public static final String INDICES_RECOVERY_COMPRESS = "indices.recovery.compress"; - public static final String INDICES_RECOVERY_CONCURRENT_STREAMS = "indices.recovery.concurrent_streams"; - public static final String INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS = "indices.recovery.concurrent_small_file_streams"; - public static final String INDICES_RECOVERY_MAX_BYTES_PER_SEC = "indices.recovery.max_bytes_per_sec"; + public static final Setting INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING = Setting.byteSizeSetting("indices.recovery.file_chunk_size", new ByteSizeValue(512, ByteSizeUnit.KB), true, Setting.Scope.Cluster); + public static final Setting INDICES_RECOVERY_TRANSLOG_OPS_SETTING = Setting.intSetting("indices.recovery.translog_ops", 1000, true, Setting.Scope.Cluster); + public static final Setting INDICES_RECOVERY_TRANSLOG_SIZE_SETTING = Setting.byteSizeSetting("indices.recovery.translog_size", new ByteSizeValue(512, ByteSizeUnit.KB), true, Setting.Scope.Cluster); + public static final Setting INDICES_RECOVERY_COMPRESS_SETTING = Setting.boolSetting("indices.recovery.compress", true, true, Setting.Scope.Cluster); + public static final Setting INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_streams", 3, true, Setting.Scope.Cluster); + public static final Setting INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_small_file_streams", 2, true, Setting.Scope.Cluster); + public static final Setting INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), true, Setting.Scope.Cluster); /** * how long to wait before retrying after issues cause by cluster state syncing between nodes * i.e., local node is not yet known on remote node, remote shard not yet started etc. */ - public static final String INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC = "indices.recovery.retry_delay_state_sync"; + public static final Setting INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_state_sync", TimeValue.timeValueMillis(500), true, Setting.Scope.Cluster); /** how long to wait before retrying after network related issues */ - public static final String INDICES_RECOVERY_RETRY_DELAY_NETWORK = "indices.recovery.retry_delay_network"; - - /** - * recoveries that don't show any activity for more then this interval will be failed. - * defaults to `indices.recovery.internal_action_long_timeout` - */ - public static final String INDICES_RECOVERY_ACTIVITY_TIMEOUT = "indices.recovery.recovery_activity_timeout"; + public static final Setting INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_network", TimeValue.timeValueSeconds(5), true, Setting.Scope.Cluster); /** timeout value to use for requests made as part of the recovery process */ - public static final String INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT = "indices.recovery.internal_action_timeout"; + public static final Setting INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.recovery.internal_action_timeout", TimeValue.timeValueMinutes(15), true, Setting.Scope.Cluster); /** * timeout value to use for requests made as part of the recovery process that are expected to take long time. * defaults to twice `indices.recovery.internal_action_timeout`. */ - public static final String INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT = "indices.recovery.internal_action_long_timeout"; + public static final Setting INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.internal_action_long_timeout", (s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2).toString(), TimeValue.timeValueSeconds(0), true, Setting.Scope.Cluster); + /** + * recoveries that don't show any activity for more then this interval will be failed. + * defaults to `indices.recovery.internal_action_long_timeout` + */ + public static final Setting INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.recovery_activity_timeout", (s) -> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getRaw(s) , TimeValue.timeValueSeconds(0), true, Setting.Scope.Cluster); public static final long SMALL_FILE_CUTOFF_BYTES = ByteSizeValue.parseBytesSizeValue("5mb", "SMALL_FILE_CUTOFF_BYTES").bytes(); @@ -96,36 +95,32 @@ public class RecoverySettings extends AbstractComponent implements Closeable { @Inject - public RecoverySettings(Settings settings, NodeSettingsService nodeSettingsService) { + public RecoverySettings(Settings settings, ClusterSettingsService clusterSettingsService) { super(settings); + this.fileChunkSize = INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING.get(settings); + this.translogOps = INDICES_RECOVERY_TRANSLOG_OPS_SETTING.get(settings); + this.translogSize = INDICES_RECOVERY_TRANSLOG_SIZE_SETTING.get(settings); + this.compress = INDICES_RECOVERY_COMPRESS_SETTING.get(settings); - this.fileChunkSize = settings.getAsBytesSize(INDICES_RECOVERY_FILE_CHUNK_SIZE, new ByteSizeValue(512, ByteSizeUnit.KB)); - this.translogOps = settings.getAsInt(INDICES_RECOVERY_TRANSLOG_OPS, 1000); - this.translogSize = settings.getAsBytesSize(INDICES_RECOVERY_TRANSLOG_SIZE, new ByteSizeValue(512, ByteSizeUnit.KB)); - this.compress = settings.getAsBoolean(INDICES_RECOVERY_COMPRESS, true); - - this.retryDelayStateSync = settings.getAsTime(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(500)); + this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings); // doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes) // and we want to give the master time to remove a faulty node - this.retryDelayNetwork = settings.getAsTime(INDICES_RECOVERY_RETRY_DELAY_NETWORK, TimeValue.timeValueSeconds(5)); + this.retryDelayNetwork = INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.get(settings); - this.internalActionTimeout = settings.getAsTime(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, TimeValue.timeValueMinutes(15)); - this.internalActionLongTimeout = settings.getAsTime(INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, new TimeValue(internalActionTimeout.millis() * 2)); + this.internalActionTimeout = INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(settings); + this.internalActionLongTimeout = INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.get(settings); - this.activityTimeout = settings.getAsTime(INDICES_RECOVERY_ACTIVITY_TIMEOUT, - // default to the internalActionLongTimeout used as timeouts on RecoverySource - internalActionLongTimeout - ); + this.activityTimeout = INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.get(settings); - this.concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, 3); + this.concurrentStreams = INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.get(settings); this.concurrentStreamPool = EsExecutors.newScaling("recovery_stream", 0, concurrentStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[recovery_stream]")); - this.concurrentSmallFileStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 2); + this.concurrentSmallFileStreams = INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.get(settings); this.concurrentSmallFileStreamPool = EsExecutors.newScaling("small_file_recovery_stream", 0, concurrentSmallFileStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[small_file_recovery_stream]")); - this.maxBytesPerSec = settings.getAsBytesSize(INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(40, ByteSizeUnit.MB)); + this.maxBytesPerSec = INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.get(settings); if (maxBytesPerSec.bytes() <= 0) { rateLimiter = null; } else { @@ -135,7 +130,18 @@ public class RecoverySettings extends AbstractComponent implements Closeable { logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}], file_chunk_size [{}], translog_size [{}], translog_ops [{}], and compress [{}]", maxBytesPerSec, concurrentStreams, fileChunkSize, translogSize, translogOps, compress); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING, this::setFileChunkSize); + clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_TRANSLOG_OPS_SETTING, this::setTranslogOps); + clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_TRANSLOG_SIZE_SETTING, this::setTranslogSize); + clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_COMPRESS_SETTING, this::setCompress); + clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING, this::setConcurrentStreams); + clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING, this::setConcurrentSmallFileStreams); + clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec); + clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, this::setRetryDelayStateSync); + clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, this::setRetryDelayNetwork); + clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, this::setInternalActionTimeout); + clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, this::setInternalActionLongTimeout); + clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout); } @Override @@ -196,75 +202,60 @@ public class RecoverySettings extends AbstractComponent implements Closeable { return internalActionLongTimeout; } + private void setFileChunkSize(ByteSizeValue fileChunkSize) { + this.fileChunkSize = fileChunkSize; + } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - ByteSizeValue maxSizePerSec = settings.getAsBytesSize(INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.this.maxBytesPerSec); - if (!Objects.equals(maxSizePerSec, RecoverySettings.this.maxBytesPerSec)) { - logger.info("updating [{}] from [{}] to [{}]", INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.this.maxBytesPerSec, maxSizePerSec); - RecoverySettings.this.maxBytesPerSec = maxSizePerSec; - if (maxSizePerSec.bytes() <= 0) { - rateLimiter = null; - } else if (rateLimiter != null) { - rateLimiter.setMBPerSec(maxSizePerSec.mbFrac()); - } else { - rateLimiter = new SimpleRateLimiter(maxSizePerSec.mbFrac()); - } - } + private void setCompress(boolean compress) { + this.compress = compress; + } - ByteSizeValue fileChunkSize = settings.getAsBytesSize(INDICES_RECOVERY_FILE_CHUNK_SIZE, RecoverySettings.this.fileChunkSize); - if (!fileChunkSize.equals(RecoverySettings.this.fileChunkSize)) { - logger.info("updating [indices.recovery.file_chunk_size] from [{}] to [{}]", RecoverySettings.this.fileChunkSize, fileChunkSize); - RecoverySettings.this.fileChunkSize = fileChunkSize; - } + private void setTranslogOps(int translogOps) { + this.translogOps = translogOps; + } - int translogOps = settings.getAsInt(INDICES_RECOVERY_TRANSLOG_OPS, RecoverySettings.this.translogOps); - if (translogOps != RecoverySettings.this.translogOps) { - logger.info("updating [indices.recovery.translog_ops] from [{}] to [{}]", RecoverySettings.this.translogOps, translogOps); - RecoverySettings.this.translogOps = translogOps; - } + private void setTranslogSize(ByteSizeValue translogSize) { + this.translogSize = translogSize; + } - ByteSizeValue translogSize = settings.getAsBytesSize(INDICES_RECOVERY_TRANSLOG_SIZE, RecoverySettings.this.translogSize); - if (!translogSize.equals(RecoverySettings.this.translogSize)) { - logger.info("updating [indices.recovery.translog_size] from [{}] to [{}]", RecoverySettings.this.translogSize, translogSize); - RecoverySettings.this.translogSize = translogSize; - } + private void setConcurrentStreams(int concurrentStreams) { + this.concurrentStreams = concurrentStreams; + concurrentStreamPool.setMaximumPoolSize(concurrentStreams); + } - boolean compress = settings.getAsBoolean(INDICES_RECOVERY_COMPRESS, RecoverySettings.this.compress); - if (compress != RecoverySettings.this.compress) { - logger.info("updating [indices.recovery.compress] from [{}] to [{}]", RecoverySettings.this.compress, compress); - RecoverySettings.this.compress = compress; - } + public void setRetryDelayStateSync(TimeValue retryDelayStateSync) { + this.retryDelayStateSync = retryDelayStateSync; + } - int concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, RecoverySettings.this.concurrentStreams); - if (concurrentStreams != RecoverySettings.this.concurrentStreams) { - logger.info("updating [indices.recovery.concurrent_streams] from [{}] to [{}]", RecoverySettings.this.concurrentStreams, concurrentStreams); - RecoverySettings.this.concurrentStreams = concurrentStreams; - RecoverySettings.this.concurrentStreamPool.setMaximumPoolSize(concurrentStreams); - } + public void setRetryDelayNetwork(TimeValue retryDelayNetwork) { + this.retryDelayNetwork = retryDelayNetwork; + } - int concurrentSmallFileStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RecoverySettings.this.concurrentSmallFileStreams); - if (concurrentSmallFileStreams != RecoverySettings.this.concurrentSmallFileStreams) { - logger.info("updating [indices.recovery.concurrent_small_file_streams] from [{}] to [{}]", RecoverySettings.this.concurrentSmallFileStreams, concurrentSmallFileStreams); - RecoverySettings.this.concurrentSmallFileStreams = concurrentSmallFileStreams; - RecoverySettings.this.concurrentSmallFileStreamPool.setMaximumPoolSize(concurrentSmallFileStreams); - } + public void setActivityTimeout(TimeValue activityTimeout) { + this.activityTimeout = activityTimeout; + } - RecoverySettings.this.retryDelayNetwork = maybeUpdate(RecoverySettings.this.retryDelayNetwork, settings, INDICES_RECOVERY_RETRY_DELAY_NETWORK); - RecoverySettings.this.retryDelayStateSync = maybeUpdate(RecoverySettings.this.retryDelayStateSync, settings, INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC); - RecoverySettings.this.activityTimeout = maybeUpdate(RecoverySettings.this.activityTimeout, settings, INDICES_RECOVERY_ACTIVITY_TIMEOUT); - RecoverySettings.this.internalActionTimeout = maybeUpdate(RecoverySettings.this.internalActionTimeout, settings, INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT); - RecoverySettings.this.internalActionLongTimeout = maybeUpdate(RecoverySettings.this.internalActionLongTimeout, settings, INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT); - } + public void setInternalActionTimeout(TimeValue internalActionTimeout) { + this.internalActionTimeout = internalActionTimeout; + } - private TimeValue maybeUpdate(final TimeValue currentValue, final Settings settings, final String key) { - final TimeValue value = settings.getAsTime(key, currentValue); - if (value.equals(currentValue)) { - return currentValue; - } - logger.info("updating [] from [{}] to [{}]", key, currentValue, value); - return value; + public void setInternalActionLongTimeout(TimeValue internalActionLongTimeout) { + this.internalActionLongTimeout = internalActionLongTimeout; + } + + private void setMaxBytesPerSec(ByteSizeValue maxBytesPerSec) { + this.maxBytesPerSec = maxBytesPerSec; + if (maxBytesPerSec.bytes() <= 0) { + rateLimiter = null; + } else if (rateLimiter != null) { + rateLimiter.setMBPerSec(maxBytesPerSec.mbFrac()); + } else { + rateLimiter = new SimpleRateLimiter(maxBytesPerSec.mbFrac()); } } + + private void setConcurrentSmallFileStreams(int concurrentSmallFileStreams) { + this.concurrentSmallFileStreams = concurrentSmallFileStreams; + concurrentSmallFileStreamPool.setMaximumPoolSize(concurrentSmallFileStreams); + } } diff --git a/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java b/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java index f095cc355ef..47d18105d0f 100644 --- a/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java +++ b/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java @@ -36,6 +36,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -49,7 +50,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import java.io.IOException; import java.util.ArrayList; @@ -66,7 +67,7 @@ import java.util.concurrent.locks.ReentrantLock; */ public class IndicesTTLService extends AbstractLifecycleComponent { - public static final String INDICES_TTL_INTERVAL = "indices.ttl.interval"; + public static final Setting INDICES_TTL_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.ttl.interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.Cluster); public static final String INDEX_TTL_DISABLE_PURGE = "index.ttl.disable_purge"; private final ClusterService clusterService; @@ -77,16 +78,15 @@ public class IndicesTTLService extends AbstractLifecycleComponent pageCacheRecyclerImpl = PageCacheRecycler.class; Class bigArraysImpl = BigArrays.class; - public NodeModule(Node node, NodeSettingsService nodeSettingsService, MonitorService monitorService) { + public NodeModule(Node node, MonitorService monitorService) { this.node = node; - this.nodeSettingsService = nodeSettingsService; this.monitorService = monitorService; } @@ -60,7 +57,6 @@ public class NodeModule extends AbstractModule { } bind(Node.class).toInstance(node); - bind(NodeSettingsService.class).toInstance(nodeSettingsService); bind(MonitorService.class).toInstance(monitorService); bind(NodeService.class).asEagerSingleton(); } diff --git a/core/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java b/core/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java deleted file mode 100644 index dbe6a33172b..00000000000 --- a/core/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.node.settings; - -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; - -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; - -/** - * A service that allows to register for node settings change that can come from cluster - * events holding new settings. - */ -public class NodeSettingsService extends AbstractComponent implements ClusterStateListener { - - private static volatile Settings globalSettings = Settings.Builder.EMPTY_SETTINGS; - - /** - * Returns the global (static) settings last updated by a node. Note, if you have multiple - * nodes on the same JVM, it will just return the latest one set... - */ - public static Settings getGlobalSettings() { - return globalSettings; - } - - private volatile Settings lastSettingsApplied; - - private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); - - @Inject - public NodeSettingsService(Settings settings) { - super(settings); - globalSettings = settings; - } - - // inject it as a member, so we won't get into possible cyclic problems - public void setClusterService(ClusterService clusterService) { - clusterService.add(this); - } - - @Override - public void clusterChanged(ClusterChangedEvent event) { - // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency - if (event.state().blocks().disableStatePersistence()) { - return; - } - - if (!event.metaDataChanged()) { - // nothing changed in the metadata, no need to check - return; - } - - if (lastSettingsApplied != null && event.state().metaData().settings().equals(lastSettingsApplied)) { - // nothing changed in the settings, ignore - return; - } - - for (Listener listener : listeners) { - try { - listener.onRefreshSettings(event.state().metaData().settings()); - } catch (Exception e) { - logger.warn("failed to refresh settings for [{}]", e, listener); - } - } - - try { - for (Map.Entry entry : event.state().metaData().settings().getAsMap().entrySet()) { - if (entry.getKey().startsWith("logger.")) { - String component = entry.getKey().substring("logger.".length()); - if ("_root".equals(component)) { - ESLoggerFactory.getRootLogger().setLevel(entry.getValue()); - } else { - ESLoggerFactory.getLogger(component).setLevel(entry.getValue()); - } - } - } - } catch (Exception e) { - logger.warn("failed to refresh settings for [{}]", e, "logger"); - } - - lastSettingsApplied = event.state().metaData().settings(); - globalSettings = lastSettingsApplied; - } - - /** - * Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically. - */ - public void addListener(Listener listener) { - this.listeners.add(listener); - } - - public void removeListener(Listener listener) { - this.listeners.remove(listener); - } - - public interface Listener { - void onRefreshSettings(Settings settings); - } -} diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java index 005b30e6207..bd7e62abf48 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java @@ -88,6 +88,6 @@ public class RestUpdateSettingsAction extends BaseRestHandler { } updateSettingsRequest.settings(updateSettings); - client.admin().indices().updateSettings(updateSettingsRequest, new AcknowledgedRestListener(channel)); + client.admin().indices().updateSettings(updateSettingsRequest, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 9501099997f..bfca6bb322f 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -70,7 +71,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.IndicesWarmer.TerminationHandle; import org.elasticsearch.indices.cache.request.IndicesRequestCache; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; @@ -109,9 +110,10 @@ public class SearchService extends AbstractLifecycleComponent imp public static final String NORMS_LOADING_KEY = "index.norms.loading"; public static final String DEFAULT_KEEPALIVE_KEY = "search.default_keep_alive"; public static final String KEEPALIVE_INTERVAL_KEY = "search.keep_alive_interval"; - public static final String DEFAULT_SEARCH_TIMEOUT = "search.default_search_timeout"; public static final TimeValue NO_TIMEOUT = timeValueMillis(-1); + public static final Setting DEFAULT_SEARCH_TIMEOUT_SETTING = Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, true, Setting.Scope.Cluster); + private final ThreadPool threadPool; @@ -150,7 +152,7 @@ public class SearchService extends AbstractLifecycleComponent imp private final ParseFieldMatcher parseFieldMatcher; @Inject - public SearchService(Settings settings, NodeSettingsService nodeSettingsService, ClusterService clusterService, IndicesService indicesService,IndicesWarmer indicesWarmer, ThreadPool threadPool, + public SearchService(Settings settings, ClusterSettingsService clusterSettingsService, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) { super(settings); @@ -184,19 +186,12 @@ public class SearchService extends AbstractLifecycleComponent imp this.indicesWarmer.addListener(new FieldDataWarmer(indicesWarmer)); this.indicesWarmer.addListener(new SearchWarmer()); - defaultSearchTimeout = settings.getAsTime(DEFAULT_SEARCH_TIMEOUT, NO_TIMEOUT); - nodeSettingsService.addListener(new SearchSettingsListener()); + defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings); + clusterSettingsService.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout); } - class SearchSettingsListener implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - final TimeValue maybeNewDefaultSearchTimeout = settings.getAsTime(SearchService.DEFAULT_SEARCH_TIMEOUT, SearchService.this.defaultSearchTimeout); - if (!maybeNewDefaultSearchTimeout.equals(SearchService.this.defaultSearchTimeout)) { - logger.info("updating [{}] from [{}] to [{}]", SearchService.DEFAULT_SEARCH_TIMEOUT, SearchService.this.defaultSearchTimeout, maybeNewDefaultSearchTimeout); - SearchService.this.defaultSearchTimeout = maybeNewDefaultSearchTimeout; - } - } + private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) { + this.defaultSearchTimeout = defaultSearchTimeout; } @Override diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index cd710d52cdc..c4049573b9b 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -33,8 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.settings.ClusterDynamicSettings; -import org.elasticsearch.cluster.settings.DynamicSettings; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -50,6 +49,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRepository; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.threadpool.ThreadPool; @@ -118,18 +118,19 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis private final MetaDataCreateIndexService createIndexService; - private final DynamicSettings dynamicSettings; + private final ClusterSettings dynamicSettings; private final MetaDataIndexUpgradeService metaDataIndexUpgradeService; private final CopyOnWriteArrayList> listeners = new CopyOnWriteArrayList<>(); private final BlockingQueue updatedSnapshotStateQueue = ConcurrentCollections.newBlockingQueue(); + private final ClusterSettingsService clusterSettingsService; @Inject public RestoreService(Settings settings, ClusterService clusterService, RepositoriesService repositoriesService, TransportService transportService, - AllocationService allocationService, MetaDataCreateIndexService createIndexService, @ClusterDynamicSettings DynamicSettings dynamicSettings, - MetaDataIndexUpgradeService metaDataIndexUpgradeService) { + AllocationService allocationService, MetaDataCreateIndexService createIndexService, ClusterSettings dynamicSettings, + MetaDataIndexUpgradeService metaDataIndexUpgradeService, ClusterSettingsService clusterSettingsService) { super(settings); this.clusterService = clusterService; this.repositoriesService = repositoriesService; @@ -140,6 +141,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis this.metaDataIndexUpgradeService = metaDataIndexUpgradeService; transportService.registerRequestHandler(UPDATE_RESTORE_ACTION_NAME, UpdateIndexShardRestoreStatusRequest::new, ThreadPool.Names.SAME, new UpdateRestoreStateRequestHandler()); clusterService.add(this); + this.clusterSettingsService = clusterSettingsService; } /** @@ -389,24 +391,9 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis private void restoreGlobalStateIfRequested(MetaData.Builder mdBuilder) { if (request.includeGlobalState()) { if (metaData.persistentSettings() != null) { - boolean changed = false; - Settings.Builder persistentSettings = Settings.settingsBuilder().put(); - for (Map.Entry entry : metaData.persistentSettings().getAsMap().entrySet()) { - if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) { - String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state()); - if (error == null) { - persistentSettings.put(entry.getKey(), entry.getValue()); - changed = true; - } else { - logger.warn("ignoring persistent setting [{}], [{}]", entry.getKey(), error); - } - } else { - logger.warn("ignoring persistent setting [{}], not dynamically updateable", entry.getKey()); - } - } - if (changed) { - mdBuilder.persistentSettings(persistentSettings.build()); - } + Settings settings = metaData.persistentSettings(); + clusterSettingsService.dryRun(settings); + mdBuilder.persistentSettings(settings); } if (metaData.templates() != null) { // TODO: Should all existing templates be deleted first? diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index b0d81279b03..f0fc5d86b00 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -20,13 +20,12 @@ package org.elasticsearch.threadpool; import org.apache.lucene.util.Counter; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.SizeValue; @@ -38,14 +37,12 @@ import org.elasticsearch.common.util.concurrent.XRejectedExecutionHandler; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import java.io.IOException; import java.util.*; import java.util.concurrent.*; -import java.util.function.Function; -import java.util.regex.Matcher; -import java.util.regex.Pattern; +import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.settings.Settings.settingsBuilder; @@ -172,7 +169,7 @@ public class ThreadPool extends AbstractComponent { } } - public static final String THREADPOOL_GROUP = "threadpool."; + public static final Setting THREADPOOL_GROUP_SETTING = Setting.groupSetting("threadpool.", true, Setting.Scope.Cluster); private volatile Map executors; @@ -184,7 +181,7 @@ public class ThreadPool extends AbstractComponent { private final EstimatedTimeThread estimatedTimeThread; - private boolean settingsListenerIsSet = false; + private final AtomicBoolean settingsListenerIsSet = new AtomicBoolean(false); static final Executor DIRECT_EXECUTOR = command -> command.run(); @@ -197,7 +194,8 @@ public class ThreadPool extends AbstractComponent { assert settings.get("name") != null : "ThreadPool's settings should contain a name"; - Map groupSettings = getThreadPoolSettingsGroup(settings); + Map groupSettings = THREADPOOL_GROUP_SETTING.get(settings).getAsGroups(); + validate(groupSettings); int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); int halfProcMaxAt5 = Math.min(((availableProcessors + 1) / 2), 5); @@ -252,18 +250,12 @@ public class ThreadPool extends AbstractComponent { this.estimatedTimeThread.start(); } - private Map getThreadPoolSettingsGroup(Settings settings) { - Map groupSettings = settings.getGroups(THREADPOOL_GROUP); - validate(groupSettings); - return groupSettings; - } - - public void setNodeSettingsService(NodeSettingsService nodeSettingsService) { - if(settingsListenerIsSet) { + public void setNodeSettingsService(ClusterSettingsService clusterSettingsService) { + if(settingsListenerIsSet.compareAndSet(false, true)) { + clusterSettingsService.addSettingsUpdateConsumer(THREADPOOL_GROUP_SETTING, this::updateSettings, (s) -> {validate(s.getAsGroups()); return true;}); + } else { throw new IllegalStateException("the node settings listener was set more then once"); } - nodeSettingsService.addListener(new ApplySettings()); - settingsListenerIsSet = true; } public long estimatedTimeInMillis() { @@ -526,8 +518,8 @@ public class ThreadPool extends AbstractComponent { throw new IllegalArgumentException("No type found [" + type + "], for [" + name + "]"); } - public void updateSettings(Settings settings) { - Map groupSettings = getThreadPoolSettingsGroup(settings); + private void updateSettings(Settings settings) { + Map groupSettings = settings.getAsGroups(); if (groupSettings.isEmpty()) { return; } @@ -583,7 +575,7 @@ public class ThreadPool extends AbstractComponent { ThreadPoolType correctThreadPoolType = THREAD_POOL_TYPES.get(key); // TODO: the type equality check can be removed after #3760/#6732 are addressed if (type != null && !correctThreadPoolType.getType().equals(type)) { - throw new IllegalArgumentException("setting " + THREADPOOL_GROUP + key + ".type to " + type + " is not permitted; must be " + correctThreadPoolType.getType()); + throw new IllegalArgumentException("setting " + THREADPOOL_GROUP_SETTING.getKey() + key + ".type to " + type + " is not permitted; must be " + correctThreadPoolType.getType()); } } } @@ -866,13 +858,6 @@ public class ThreadPool extends AbstractComponent { } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - updateSettings(settings); - } - } - /** * Returns true if the given service was terminated successfully. If the termination timed out, * the service is null this method will return false. @@ -911,38 +896,4 @@ public class ThreadPool extends AbstractComponent { } return false; } - - public static ThreadPoolTypeSettingsValidator THREAD_POOL_TYPE_SETTINGS_VALIDATOR = new ThreadPoolTypeSettingsValidator(); - private static class ThreadPoolTypeSettingsValidator implements Validator { - @Override - public String validate(String setting, String value, ClusterState clusterState) { - // TODO: the type equality validation can be removed after #3760/#6732 are addressed - Matcher matcher = Pattern.compile("threadpool\\.(.*)\\.type").matcher(setting); - if (!matcher.matches()) { - return null; - } else { - String threadPool = matcher.group(1); - ThreadPool.ThreadPoolType defaultThreadPoolType = ThreadPool.THREAD_POOL_TYPES.get(threadPool); - ThreadPool.ThreadPoolType threadPoolType; - try { - threadPoolType = ThreadPool.ThreadPoolType.fromType(value); - } catch (IllegalArgumentException e) { - return e.getMessage(); - } - if (defaultThreadPoolType.equals(threadPoolType)) { - return null; - } else { - return String.format( - Locale.ROOT, - "thread pool type for [%s] can only be updated to [%s] but was [%s]", - threadPool, - defaultThreadPoolType.getType(), - threadPoolType.getType() - ); - } - } - - } - } - } diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index 14fc9029b00..f9cbb012b2a 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -37,16 +38,14 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.Arrays; import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.Callable; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicBoolean; @@ -88,14 +87,13 @@ public class TransportService extends AbstractLifecycleComponent TRACE_LOG_INCLUDE_SETTING = new Setting<>("transport.tracer.include", "_na_", "", Strings::splitStringByCommaToArray , true, Setting.Scope.Cluster); + public static final Setting TRACE_LOG_EXCLUDE_SETTING = new Setting<>("transport.tracer.exclude", "_na_", "internal:discovery/zen/fd*," + TransportLivenessAction.NAME, Strings::splitStringByCommaToArray , true, Setting.Scope.Cluster);; private final ESLogger tracerLog; volatile String[] tracerLogInclude; volatile String[] tracelLogExclude; - private final ApplySettings settingsListener = new ApplySettings(); /** if set will call requests sent to this id to shortcut and executed locally */ volatile DiscoveryNode localNode = null; @@ -109,8 +107,8 @@ public class TransportService extends AbstractLifecycleComponent Nodes upgrade complete"); diff --git a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java index 825e3e40894..6946e35861c 100644 --- a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java +++ b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java @@ -45,8 +45,8 @@ public class IndexingMasterFailoverIT extends ESIntegTestCase { .put(FaultDetection.SETTING_PING_TIMEOUT, "1s") // for hitting simulated network failures quickly .put(FaultDetection.SETTING_PING_RETRIES, "1") // for hitting simulated network failures quickly .put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out - .put(DiscoverySettings.PUBLISH_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2) + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) .build(); internalCluster().startMasterOnlyNodesAsync(3, sharedSettings).get(); diff --git a/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java index b857e4d59ee..e1265d577e9 100644 --- a/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java +++ b/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java @@ -62,7 +62,7 @@ public class ReplicaRecoveryBenchmark { BootstrapForTesting.ensureInitialized(); Settings settings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, "false") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), "false") .put(SETTING_NUMBER_OF_SHARDS, 1) .put(SETTING_NUMBER_OF_REPLICAS, 0) .put(TransportModule.TRANSPORT_TYPE_KEY, "local") diff --git a/core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java b/core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java index 553ef0c6ac6..14447fb715f 100644 --- a/core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java +++ b/core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java @@ -22,7 +22,6 @@ package org.elasticsearch.benchmark.transport; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; @@ -30,7 +29,6 @@ import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; import org.elasticsearch.transport.netty.NettyTransport; diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 137c6c5b2c2..b5e36c08f74 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -120,7 +120,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { public Settings nodeSettings(int ord) { return Settings.builder() .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) // disable merging so no segments will be upgraded - .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 30) // increase recovery speed for small files + .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.getKey(), 30) // increase recovery speed for small files .build(); } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java index bccd4290d8e..b536c88ae4e 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java @@ -181,7 +181,7 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase { logger.info("--> check settings"); ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - assertThat(clusterState.metaData().persistentSettings().get(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "version_attr"), equalTo(version)); + assertThat(clusterState.metaData().persistentSettings().get(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "version_attr"), equalTo(version)); logger.info("--> check templates"); IndexTemplateMetaData template = clusterState.getMetaData().templates().get("template_" + version.toLowerCase(Locale.ROOT)); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index 5ed45620a03..8aa065548df 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.Store; @@ -126,7 +127,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() // manual collection or upon cluster forming. - .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT, "1s") + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s") .build(); } @@ -137,9 +138,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { } public void testClusterInfoServiceCollectsInformation() throws Exception { - internalCluster().startNodesAsync(2, - Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, "200ms").build()) - .get(); + internalCluster().startNodesAsync(2).get(); assertAcked(prepareCreate("test").setSettings(settingsBuilder() .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL, 0) .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE).build())); @@ -147,6 +146,8 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { InternalTestCluster internalTestCluster = internalCluster(); // Get the cluster info service on the master node final InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(ClusterInfoService.class, internalTestCluster.getMasterName()); + infoService.setUpdateFrequency(TimeValue.timeValueMillis(200)); + infoService.onMaster(); ClusterInfo info = infoService.refresh(); assertNotNull("info should not be null", info); ImmutableOpenMap leastUsages = info.getNodeLeastAvailableDiskUsages(); @@ -188,7 +189,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { public void testClusterInfoServiceInformationClearOnError() throws InterruptedException, ExecutionException { internalCluster().startNodesAsync(2, // manually control publishing - Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, "60m").build()) + Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), "60m").build()) .get(); prepareCreate("test").setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).get(); ensureGreen("test"); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 4ca0fffbdfc..69fb6cb1a61 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -31,10 +31,12 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllo import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.cluster.settings.ClusterDynamicSettings; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.inject.ModuleTestCase; +import org.elasticsearch.common.settings.ClusterSettingsService; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.settings.IndexDynamicSettings; @@ -75,16 +77,16 @@ public class ClusterModuleTests extends ModuleTestCase { public void testRegisterClusterDynamicSettingDuplicate() { ClusterModule module = new ClusterModule(Settings.EMPTY); try { - module.registerClusterDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, Validator.EMPTY); + module.registerSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING); } catch (IllegalArgumentException e) { - assertEquals(e.getMessage(), "Cannot register setting [" + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE + "] twice"); + assertEquals(e.getMessage(), "Cannot register setting [" + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey() + "] twice"); } } public void testRegisterClusterDynamicSetting() { ClusterModule module = new ClusterModule(Settings.EMPTY); - module.registerClusterDynamicSetting("foo.bar", Validator.EMPTY); - assertInstanceBindingWithAnnotation(module, DynamicSettings.class, dynamicSettings -> dynamicSettings.hasDynamicSetting("foo.bar"), ClusterDynamicSettings.class); + module.registerSetting(Setting.boolSetting("foo.bar", false, true, Setting.Scope.Cluster)); + assertInstanceBinding(module, ClusterSettingsService.class, service -> service.getClusterSettings().hasDynamicSetting("foo.bar")); } public void testRegisterIndexDynamicSettingDuplicate() { diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java index 9e842a38722..5199d3fc2ef 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java @@ -940,7 +940,7 @@ public class ClusterServiceIT extends ESIntegTestCase { public void testLongClusterStateUpdateLogging() throws Exception { Settings settings = settingsBuilder() .put("discovery.type", "local") - .put(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, "10s") + .put(InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey(), "10s") .build(); internalCluster().startNode(settings); ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class); @@ -976,7 +976,7 @@ public class ClusterServiceIT extends ESIntegTestCase { processedFirstTask.await(1, TimeUnit.SECONDS); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() - .put(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, "10ms"))); + .put(InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey(), "10ms"))); clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override diff --git a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 648356be173..2f1e5d33f7e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -280,7 +280,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { assertNoMasterBlockOnAllNodes(); logger.info("--> bringing another node up"); - internalCluster().startNode(settingsBuilder().put(settings).put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2).build()); + internalCluster().startNode(settingsBuilder().put(settings).put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2).build()); clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); } @@ -317,7 +317,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { // set an initial value which is at least quorum to avoid split brains during initial startup int initialMinMasterNodes = randomIntBetween(nodeCount / 2 + 1, nodeCount); - settings.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, initialMinMasterNodes); + settings.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), initialMinMasterNodes); logger.info("--> starting [{}] nodes. min_master_nodes set to [{}]", nodeCount, initialMinMasterNodes); @@ -328,19 +328,21 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { int updateCount = randomIntBetween(1, nodeCount); - logger.info("--> updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount); + logger.info("--> updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount); assertAcked(client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount))); + .setPersistentSettings(settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount))); logger.info("--> verifying no node left and master is up"); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut()); updateCount = nodeCount + randomIntBetween(1, 2000); - logger.info("--> trying to updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount); - assertThat(client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount)) - .get().getPersistentSettings().getAsMap().keySet(), - empty()); + logger.info("--> trying to updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount); + try { + client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount)); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "cannot set discovery.zen.minimum_master_nodes to more than the current master nodes count [" +updateCount+ "]"); + } logger.info("--> verifying no node left and master is up"); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut()); @@ -351,8 +353,8 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { .put("discovery.type", "zen") .put(FaultDetection.SETTING_PING_TIMEOUT, "1h") // disable it .put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms") - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2) - .put(DiscoverySettings.COMMIT_TIMEOUT, "100ms") // speed things up + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) + .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "100ms") // speed things up .build(); internalCluster().startNodesAsync(3, settings).get(); ensureGreen(); // ensure cluster state is recovered before we disrupt things diff --git a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java index e0f8b2cb840..8e5479d6f84 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -67,7 +67,7 @@ public class NoMasterNodeIT extends ESIntegTestCase { .put("discovery.zen.minimum_master_nodes", 2) .put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms") .put("discovery.initial_state_timeout", "500ms") - .put(DiscoverySettings.NO_MASTER_BLOCK, "all") + .put(DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "all") .build(); TimeValue timeout = TimeValue.timeValueMillis(200); @@ -219,7 +219,7 @@ public class NoMasterNodeIT extends ESIntegTestCase { .put("discovery.zen.minimum_master_nodes", 2) .put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms") .put("discovery.initial_state_timeout", "500ms") - .put(DiscoverySettings.NO_MASTER_BLOCK, "write") + .put(DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "write") .build(); internalCluster().startNode(settings); diff --git a/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java b/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java index 81de8b1a43c..c5e48a97dfd 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java @@ -50,8 +50,8 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase { .put(super.nodeSettings(nodeOrdinal)) //make sure that enough concurrent reroutes can happen at the same time //we have a minimum of 2 nodes, and a maximum of 10 shards, thus 5 should be enough - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, 5) - .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 10) + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), 5) + .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 10) .build(); } @@ -69,7 +69,7 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase { private void removePublishTimeout() { //to test that the acknowledgement mechanism is working we better disable the wait for publish //otherwise the operation is most likely acknowledged even if it doesn't support ack - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0"))); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0"))); } public void testClusterUpdateSettingsAcknowledgement() { diff --git a/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java b/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java index 47517a753af..7d3825a14b8 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java @@ -67,8 +67,8 @@ public class AckIT extends ESIntegTestCase { //to test that the acknowledgement mechanism is working we better disable the wait for publish //otherwise the operation is most likely acknowledged even if it doesn't support ack return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(DiscoverySettings.PUBLISH_TIMEOUT, 0).build(); - } + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), 0).build(); +} public void testUpdateSettingsAcknowledgement() { createIndex("test"); diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index f9151628b8a..726590104f1 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -106,9 +106,9 @@ public class AwarenessAllocationIT extends ESIntegTestCase { public void testAwarenessZones() throws Exception { Settings commonSettings = Settings.settingsBuilder() - .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP + "zone.values", "a,b") - .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, "zone") - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 3) + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a,b") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 3) .put(ZenDiscovery.SETTING_JOIN_TIMEOUT, "10s") .build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 1605e70637e..b85c17097f2 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -56,7 +56,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_ME import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; @@ -71,15 +71,15 @@ public class ClusterRerouteIT extends ESIntegTestCase { public void testRerouteWithCommands_disableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none") - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") .build(); rerouteWithCommands(commonSettings); } public void testRerouteWithCommands_enableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name()) .build(); rerouteWithCommands(commonSettings); } @@ -147,15 +147,15 @@ public class ClusterRerouteIT extends ESIntegTestCase { public void testRerouteWithAllocateLocalGateway_disableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none") - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") .build(); rerouteWithAllocateLocalGateway(commonSettings); } public void testRerouteWithAllocateLocalGateway_enableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name()) .build(); rerouteWithAllocateLocalGateway(commonSettings); } @@ -279,7 +279,7 @@ public class ClusterRerouteIT extends ESIntegTestCase { logger.info("--> disable allocation"); Settings newSettings = settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name()) .build(); client().admin().cluster().prepareUpdateSettings().setTransientSettings(newSettings).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index d10912e69db..336846f2da9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -51,7 +51,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { public void testAddNodesAndIndices() { Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); AllocationService service = createAllocationService(settings.build()); ClusterState clusterState = initCluster(service, 1, 3, 3, 1); @@ -94,7 +94,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { public void testMinimalRelocations() { Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) .put("cluster.routing.allocation.node_concurrent_recoveries", 2); AllocationService service = createAllocationService(settings.build()); @@ -162,7 +162,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { public void testMinimalRelocationsNoLimit() { Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) .put("cluster.routing.allocation.node_concurrent_recoveries", 100) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100); AllocationService service = createAllocationService(settings.build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index 6ac2b7df9ca..1cf5ba0083d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -98,8 +98,8 @@ public class AllocationCommandsTests extends ESAllocationTestCase { public void testAllocateCommand() { AllocationService allocation = createAllocationService(settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none") - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") .build()); logger.info("--> building initial routing table"); @@ -186,8 +186,8 @@ public class AllocationCommandsTests extends ESAllocationTestCase { public void testCancelCommand() { AllocationService allocation = createAllocationService(settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none") - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") .build()); logger.info("--> building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java index d7a049d1b92..8d510e7f0c5 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java @@ -39,8 +39,8 @@ public class AllocationPriorityTests extends ESAllocationTestCase { public void testPrioritizedIndicesAllocatedFirst() { AllocationService allocation = createAllocationService(settingsBuilder(). put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES, 1) - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 1) - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, 1).build()); + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(), 1) + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), 1).build()); final String highPriorityName; final String lowPriorityName; final int priorityFirst; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java index 7be6037cf79..e9d0f75b1c1 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -55,7 +55,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testMoveShardOnceNewNodeWithAttributeAdded1() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -123,7 +123,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testMoveShardOnceNewNodeWithAttributeAdded2() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -193,7 +193,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .put("cluster.routing.allocation.awareness.attributes", "rack_id") .put("cluster.routing.allocation.balance.index", 0.0f) @@ -293,7 +293,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -387,7 +387,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testMoveShardOnceNewNodeWithAttributeAdded5() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -465,7 +465,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testMoveShardOnceNewNodeWithAttributeAdded6() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -545,7 +545,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testFullAwareness1() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -612,7 +612,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testFullAwareness2() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -681,7 +681,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2") .put("cluster.routing.allocation.awareness.attributes", "rack_id") @@ -767,7 +767,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { .put("cluster.routing.allocation.awareness.attributes", "zone") .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); @@ -828,7 +828,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testUnassignedShardsWithUnbalancedZones() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "zone") .build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index 1092b2ede19..bc4ef8235d4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -37,10 +37,11 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllo import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESAllocationTestCase; import org.elasticsearch.test.gateway.NoopGatewayAllocator; import org.hamcrest.Matchers; @@ -65,10 +66,10 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { final float balanceTreshold = 1.0f; Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); - settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance); - settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance); - settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), replicaBalance); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceTreshold); AllocationService strategy = createAllocationService(settings.build()); @@ -90,10 +91,10 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { final float balanceTreshold = 1.0f; Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); - settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance); - settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance); - settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), replicaBalance); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceTreshold); AllocationService strategy = createAllocationService(settings.build()); @@ -279,36 +280,30 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { public void testPersistedSettings() { Settings.Builder settings = settingsBuilder(); - settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, 0.2); - settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, 0.3); - settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, 2.0); - final NodeSettingsService.Listener[] listeners = new NodeSettingsService.Listener[1]; - NodeSettingsService service = new NodeSettingsService(settingsBuilder().build()) { - - @Override - public void addListener(Listener listener) { - assertNull("addListener was called twice while only one time was expected", listeners[0]); - listeners[0] = listener; - } - - }; + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.2); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 0.3); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), 2.0); + ClusterSettingsService service = new ClusterSettingsService(settingsBuilder().build(), new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); BalancedShardsAllocator allocator = new BalancedShardsAllocator(settings.build(), service); assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f)); assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f)); assertThat(allocator.getThreshold(), Matchers.equalTo(2.0f)); settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); - listeners[0].onRefreshSettings(settings.build()); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.2); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 0.3); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), 2.0); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + service.applySettings(settings.build()); assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f)); assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f)); assertThat(allocator.getThreshold(), Matchers.equalTo(2.0f)); settings = settingsBuilder(); - settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, 0.5); - settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, 0.1); - settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, 3.0); - listeners[0].onRefreshSettings(settings.build()); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.5); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 0.1); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), 3.0); + service.applySettings(settings.build()); assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.5f)); assertThat(allocator.getShardBalance(), Matchers.equalTo(0.1f)); assertThat(allocator.getThreshold(), Matchers.equalTo(3.0f)); @@ -317,7 +312,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { public void testNoRebalanceOnPrimaryOverload() { Settings.Builder settings = settingsBuilder(); AllocationService strategy = new AllocationService(settings.build(), randomAllocationDeciders(settings.build(), - new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS), getRandom()), new ShardsAllocators(settings.build(), + new ClusterSettingsService(Settings.Builder.EMPTY_SETTINGS, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), getRandom()), new ShardsAllocators(settings.build(), NoopGatewayAllocator.INSTANCE, new ShardsAllocator() { @Override diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java index 8dad41db2f8..15a6ea0a5f4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java @@ -46,7 +46,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { private final ESLogger logger = Loggers.getLogger(ClusterRebalanceRoutingTests.class); public void testAlways() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build()); MetaData metaData = MetaData.builder() @@ -132,7 +132,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { public void testClusterPrimariesActive1() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -236,7 +236,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { } public void testClusterPrimariesActive2() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -320,7 +320,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { } public void testClusterAllActive1() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -443,7 +443,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { } public void testClusterAllActive2() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -527,7 +527,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { } public void testClusterAllActive3() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -737,7 +737,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { public void testRebalanceWhileShardFetching() { final AtomicBoolean hasFetches = new AtomicBoolean(true); - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build(), new NoopGatewayAllocator() { @Override public boolean allocateUnassigned(RoutingAllocation allocation) { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java index e16e7cc2cec..d807dc1b5ca 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java @@ -46,7 +46,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { public void testSimpleDeadNodeOnStartedPrimaryShard() { AllocationService allocation = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("--> building initial routing table"); @@ -97,7 +97,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { public void testDeadNodeWhileRelocatingOnToNode() { AllocationService allocation = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("--> building initial routing table"); @@ -171,7 +171,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { public void testDeadNodeWhileRelocatingOnFromNode() { AllocationService allocation = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("--> building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 3b242d8676f..affab78521c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -41,7 +41,7 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase { private final ESLogger logger = Loggers.getLogger(FailedNodeRoutingTests.class); public void testSimpleFailedNodeTest() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build()); MetaData metaData = MetaData.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 9bfaf7e9997..8dffacaa379 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -57,7 +57,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testFailedShardPrimaryRelocatingToAndFrom() { AllocationService allocation = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("--> building initial routing table"); @@ -145,7 +145,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testFailPrimaryStartedCheckReplicaElected() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); @@ -226,7 +226,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testFirstAllocationFailureSingleNode() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); @@ -282,7 +282,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testSingleShardMultipleAllocationFailures() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); @@ -338,7 +338,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testFirstAllocationFailureTwoNodes() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); @@ -398,7 +398,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testRebalanceFailure() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java index aa6fdef828a..d5f8134d95f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java @@ -48,7 +48,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); @@ -178,7 +178,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); @@ -340,7 +340,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 62501cbf9fb..c00cd843b1b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -58,7 +58,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { public void testDoNotAllocateFromPrimary() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); @@ -172,7 +172,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { public void testRandom() { AllocationService service = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); @@ -221,7 +221,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { public void testRollingRestart() { AllocationService service = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java index fbc742573e9..18725a0de78 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java @@ -57,7 +57,7 @@ public class RebalanceAfterActiveTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), new ClusterInfoService() { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java index eca2a227f8f..eec1b48be97 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java @@ -47,7 +47,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); @@ -119,7 +119,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); @@ -211,7 +211,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 1) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 3) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java index f096ab0b13d..e1586c433a5 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java @@ -41,7 +41,7 @@ public class ShardVersioningTests extends ESAllocationTestCase { private final ESLogger logger = Loggers.getLogger(ShardVersioningTests.class); public void testSimple() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build()); MetaData metaData = MetaData.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java index 11d41a6a336..c0f0c0c2252 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java @@ -90,7 +90,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { public void testClusterLevelShardsLimitAllocate() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, 1) + .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 1) .build()); logger.info("Building initial routing table"); @@ -126,7 +126,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { // Bump the cluster total shards to 2 strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, 2) + .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 2) .build()); logger.info("Do another reroute, make sure shards are now allocated"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java index ed44b84a886..29ef451324d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java @@ -211,7 +211,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { public void testMultiIndexEvenDistribution() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); @@ -323,7 +323,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { public void testMultiIndexUnevenNodes() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java index 671cce007c9..aec81a6e063 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java @@ -50,7 +50,7 @@ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .put("cluster.routing.allocation.balance.index", 0.0f) .put("cluster.routing.allocation.balance.replica", 1.0f) diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index a739f30856a..5377d09d4b5 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -60,9 +60,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testDiskThreshold() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.8).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.8).build(); ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "node1", "/dev/null", 100, 10)); // 90% used @@ -96,7 +96,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { }; AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -170,9 +170,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // Set the high threshold to 70 instead of 80 // node2 now should not have new shards allocated to it, but shards can remain diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "60%") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.7).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.7).build(); deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -181,7 +181,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -201,9 +201,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // Set the high threshold to 60 instead of 70 // node2 now should not have new shards allocated to it, and shards cannot remain diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.5) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.6).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.5) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.6).build(); deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -212,7 +212,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -254,9 +254,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testDiskThresholdWithAbsoluteSizes() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "30b") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "9b").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "30b") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "9b").build(); ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 10)); // 90% used @@ -292,7 +292,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -349,7 +349,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { }; strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -405,9 +405,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // Set the high threshold to 70 instead of 80 // node2 now should not have new shards allocated to it, but shards can remain diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "40b") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "30b").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "40b") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "30b").build(); deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -416,7 +416,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -436,9 +436,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // Set the high threshold to 60 instead of 70 // node2 now should not have new shards allocated to it, and shards cannot remain diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "50b") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "40b").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "50b") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "40b").build(); deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -447,7 +447,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -522,9 +522,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testDiskThresholdWithShardSizes() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "71%").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "71%").build(); ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 31)); // 69% used @@ -556,7 +556,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -589,9 +589,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testUnknownDiskUsage() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.85).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.85).build(); ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node2", new DiskUsage("node2", "node2", "/dev/null", 100, 50)); // 50% used @@ -624,7 +624,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -688,10 +688,10 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testShardRelocationsTakenIntoAccount() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.8).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.8).build(); ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 40)); // 60% used @@ -727,7 +727,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -794,10 +794,10 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testCanRemainWithShardRelocatingAway() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "60%") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "70%").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%").build(); // We have an index with 2 primary shards each taking 40 bytes. Each node has 100 bytes available ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); @@ -889,7 +889,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { ))); AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); // Ensure that the reroute call doesn't alter the routing table, since the first primary is relocating away @@ -906,10 +906,10 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testForSingleDataNode() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "60%") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "70%").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%").build(); ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 100)); // 0% used @@ -989,7 +989,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); RoutingAllocation.Result result = strategy.reroute(clusterState, "reroute"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index a386883ad1b..279687a004f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -28,12 +28,13 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; @@ -45,7 +46,7 @@ import static org.hamcrest.CoreMatchers.equalTo; */ public class DiskThresholdDeciderUnitTests extends ESTestCase { public void testDynamicSettings() { - NodeSettingsService nss = new NodeSettingsService(Settings.EMPTY); + ClusterSettingsService nss = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); ClusterInfoService cis = EmptyClusterInfoService.INSTANCE; DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); @@ -59,18 +60,15 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { assertTrue(decider.isEnabled()); assertTrue(decider.isIncludeRelocations()); - DiskThresholdDecider.ApplySettings applySettings = decider.newApplySettings(); - Settings newSettings = Settings.builder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, false) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, false) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "70%") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "500mb") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, "30s") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), false) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), false) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "500mb") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "30s") .build(); - applySettings.onRefreshSettings(newSettings); - + nss.applySettings(newSettings); assertThat("high threshold bytes should be unset", decider.getFreeBytesThresholdHigh(), equalTo(ByteSizeValue.parseBytesSizeValue("0b", "test"))); assertThat("high threshold percentage should be changed", @@ -86,7 +84,7 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { } public void testCanAllocateUsesMaxAvailableSpace() { - NodeSettingsService nss = new NodeSettingsService(Settings.EMPTY); + ClusterSettingsService nss = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); ClusterInfoService cis = EmptyClusterInfoService.INSTANCE; DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); @@ -127,7 +125,7 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { } public void testCanRemainUsesLeastAvailableSpace() { - NodeSettingsService nss = new NodeSettingsService(Settings.EMPTY); + ClusterSettingsService nss = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); ClusterInfoService cis = EmptyClusterInfoService.INSTANCE; DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); ImmutableOpenMap.Builder shardRoutingMap = ImmutableOpenMap.builder(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIT.java index 940634a4657..be64aafc61e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIT.java @@ -37,7 +37,7 @@ public class EnableAllocationDeciderIT extends ESIntegTestCase { public void testEnableRebalance() throws InterruptedException { final String firstNode = internalCluster().startNode(); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)).get(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)).get(); // we test with 2 shards since otherwise it's pretty fragile if there are difference in the num or shards such that // all shards are relocated to the second node which is not what we want here. It's solely a test for the settings to take effect final int numShards = 2; @@ -64,7 +64,7 @@ public class EnableAllocationDeciderIT extends ESIntegTestCase { assertThat("index: [test] expected to be rebalanced on both nodes", test.size(), equalTo(2)); // flip the cluster wide setting such that we can also balance for index test_1 eventually we should have one shard of each index on each node - client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL)).get(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL)).get(); logger.info("--> balance index [test_1]"); client().admin().cluster().prepareReroute().get(); ensureGreen("test_1"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java index 0049a120777..b0a49a93936 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.cluster.routing.allocation.decider; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -32,10 +31,11 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Allocation; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Rebalance; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESAllocationTestCase; import java.util.EnumSet; @@ -44,8 +44,8 @@ import java.util.List; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.hamcrest.Matchers.equalTo; @@ -58,7 +58,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { public void testClusterEnableNone() { AllocationService strategy = createAllocationService(settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name()) .build()); logger.info("Building initial routing table"); @@ -86,7 +86,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { public void testClusterEnableOnlyPrimaries() { AllocationService strategy = createAllocationService(settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.PRIMARIES.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.PRIMARIES.name()) .build()); logger.info("Building initial routing table"); @@ -159,11 +159,11 @@ public class EnableAllocationTests extends ESAllocationTestCase { final boolean useClusterSetting = randomBoolean(); final Rebalance allowedOnes = RandomPicks.randomFrom(getRandom(), EnumSet.of(Rebalance.PRIMARIES, Rebalance.REPLICAS, Rebalance.ALL)); Settings build = settingsBuilder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE, useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings - .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 3) + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings + .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 3) .build(); - NodeSettingsService nodeSettingsService = new NodeSettingsService(build); - AllocationService strategy = createAllocationService(build, nodeSettingsService, getRandom()); + ClusterSettingsService clusterSettingsService = new ClusterSettingsService(build, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + AllocationService strategy = createAllocationService(build, clusterSettingsService, getRandom()); Settings indexSettings = useClusterSetting ? Settings.EMPTY : settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE).build(); logger.info("Building initial routing table"); @@ -213,7 +213,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { if (useClusterSetting) { prevState = clusterState; clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).transientSettings(settingsBuilder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE, allowedOnes) + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), allowedOnes) .build())).build(); } else { prevState = clusterState; @@ -224,7 +224,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { .build(); } - nodeSettingsService.clusterChanged(new ClusterChangedEvent("foo", clusterState, prevState)); + clusterSettingsService.applySettings(clusterState.metaData().settings()); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat("expected 6 shards to be started 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(6)); @@ -261,11 +261,11 @@ public class EnableAllocationTests extends ESAllocationTestCase { public void testEnableClusterBalanceNoReplicas() { final boolean useClusterSetting = randomBoolean(); Settings build = settingsBuilder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE, useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings - .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 3) + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings + .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 3) .build(); - NodeSettingsService nodeSettingsService = new NodeSettingsService(build); - AllocationService strategy = createAllocationService(build, nodeSettingsService, getRandom()); + ClusterSettingsService clusterSettingsService = new ClusterSettingsService(build, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + AllocationService strategy = createAllocationService(build, clusterSettingsService, getRandom()); Settings indexSettings = useClusterSetting ? Settings.EMPTY : settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE).build(); logger.info("Building initial routing table"); @@ -307,7 +307,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { if (useClusterSetting) { prevState = clusterState; clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).transientSettings(settingsBuilder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE, randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL) + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL) .build())).build(); } else { prevState = clusterState; @@ -315,7 +315,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).removeAllIndices() .put(IndexMetaData.builder(meta).settings(settingsBuilder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL).build()))).build(); } - nodeSettingsService.clusterChanged(new ClusterChangedEvent("foo", clusterState, prevState)); + clusterSettingsService.applySettings(clusterState.metaData().settings()); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat("expected 4 primaries to be started and 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index 126799f5937..a17017f6303 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.MockInternalClusterInfoService; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -46,22 +47,12 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class MockDiskUsagesIT extends ESIntegTestCase { - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - // Update more frequently - .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, "1s") - .build(); - } - @Override protected Collection> nodePlugins() { // Use the mock internal cluster info service, which has fake-able disk usages return pluginList(MockInternalClusterInfoService.TestPlugin.class); } - //@TestLogging("org.elasticsearch.cluster:TRACE,org.elasticsearch.cluster.routing.allocation.decider:TRACE") public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { List nodes = internalCluster().startNodesAsync(3).get(); @@ -77,15 +68,16 @@ public class MockDiskUsagesIT extends ESIntegTestCase { // Start with all nodes at 50% usage final MockInternalClusterInfoService cis = (MockInternalClusterInfoService) internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName()); + cis.setUpdateFrequency(TimeValue.timeValueMillis(200)); + cis.onMaster(); cis.setN1Usage(nodes.get(0), new DiskUsage(nodes.get(0), "n1", "/dev/null", 100, 50)); cis.setN2Usage(nodes.get(1), new DiskUsage(nodes.get(1), "n2", "/dev/null", 100, 50)); cis.setN3Usage(nodes.get(2), new DiskUsage(nodes.get(2), "n3", "/dev/null", 100, 50)); client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, randomFrom("20b", "80%")) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, randomFrom("10b", "90%")) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, "1ms")).get(); - + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), randomFrom("20b", "80%")) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), randomFrom("10b", "90%")) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "1ms")).get(); // Create an index with 10 shards so we can check allocation for it prepareCreate("test").setSettings(settingsBuilder() .put("number_of_shards", 10) diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index 65d5b0b9fcd..ce76893a831 100644 --- a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.settings; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.cluster.ClusterName; @@ -32,7 +33,6 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.hamcrest.Matchers; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; @@ -48,22 +48,116 @@ public class ClusterSettingsIT extends ESIntegTestCase { public void testClusterNonExistingSettingsUpdate() { String key1 = "no_idea_what_you_are_talking_about"; int value1 = 10; + try { + client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(key1, value1).build()) + .get(); + fail("bogus value"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "transient setting [no_idea_what_you_are_talking_about], not dynamically updateable"); + } + } + + public void testResetClusterSetting() { + DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class); + + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); ClusterUpdateSettingsResponse response = client().admin().cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(key1, value1).build()) + .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s").build()) .get(); assertAcked(response); - assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable()); + assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); + assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + + response = client().admin().cluster() + .prepareUpdateSettings() + .addTransientResetKeys(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()) + .get(); + + assertAcked(response); + assertNull(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + response = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder() + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") + .put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false).build()) + .get(); + + assertAcked(response); + assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); + assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); + assertFalse(discoverySettings.getPublishDiff()); + response = client().admin().cluster() + .prepareUpdateSettings() + .addTransientResetKeys(randomBoolean() ? "discovery.zen.*" : "*") + .get(); + + assertNull(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())); + assertNull(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey())); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + // now persistent + response = client().admin().cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s").build()) + .get(); + + assertAcked(response); + assertThat(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); + assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + + response = client().admin().cluster() + .prepareUpdateSettings() + .addPersistentResetKeys(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()) + .get(); + + assertAcked(response); + assertNull(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + + response = client().admin().cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder() + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") + .put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false).build()) + .get(); + + assertAcked(response); + assertThat(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); + assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); + assertFalse(discoverySettings.getPublishDiff()); + response = client().admin().cluster() + .prepareUpdateSettings() + .addPersistentResetKeys(randomBoolean() ? "discovery.zen.*" : "*") + .get(); + + assertNull(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())); + assertNull(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey())); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); } public void testClusterSettingsUpdateResponse() { - String key1 = IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC; + String key1 = IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(); int value1 = 10; - String key2 = EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE; - boolean value2 = false; + String key2 = EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(); + String value2 = EnableAllocationDecider.Allocation.NONE.name(); Settings transientSettings1 = Settings.builder().put(key1, value1, ByteSizeUnit.BYTES).build(); Settings persistentSettings1 = Settings.builder().put(key2, value2).build(); @@ -118,39 +212,45 @@ public class ClusterSettingsIT extends ESIntegTestCase { DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class); - assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.DEFAULT_PUBLISH_TIMEOUT)); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); ClusterUpdateSettingsResponse response = client().admin().cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "1s").build()) + .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s").build()) .get(); assertAcked(response); - assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT), equalTo("1s")); + assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); - response = client().admin().cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "whatever").build()) - .get(); + try { + client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "whatever").build()) + .get(); + fail("bogus value"); + } catch (ElasticsearchParseException ex) { + assertEquals(ex.getMessage(), "Failed to parse setting [discovery.zen.commit_timeout] with value [whatever] as a time value: unit is missing or unrecognized"); + } - assertAcked(response); - assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable()); assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); - response = client().admin().cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, -1).build()) - .get(); + try { + client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), -1).build()) + .get(); + fail("bogus value"); + } catch (ElasticsearchParseException ex) { + assertEquals(ex.getMessage(), "Failed to parse value [-1] for setting [discovery.zen.publish_timeout] must be >= 0s"); + } - assertAcked(response); - assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable()); assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); } public void testClusterUpdateSettingsWithBlocks() { String key1 = "cluster.routing.allocation.enable"; - Settings transientSettings = Settings.builder().put(key1, false).build(); + Settings transientSettings = Settings.builder().put(key1, EnableAllocationDecider.Allocation.NONE.name()).build(); String key2 = "cluster.routing.allocation.node_concurrent_recoveries"; Settings persistentSettings = Settings.builder().put(key2, "5").build(); @@ -165,7 +265,7 @@ public class ClusterSettingsIT extends ESIntegTestCase { assertBlocked(request, MetaData.CLUSTER_READ_ONLY_BLOCK); // But it's possible to update the settings to update the "cluster.blocks.read_only" setting - Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, false).build(); + Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), false).build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get()); } finally { diff --git a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java index c5a695d16e5..6094d49234c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java @@ -225,7 +225,7 @@ public class RoutingIteratorTests extends ESAllocationTestCase { public void testAttributePreferenceRouting() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id,zone") .build()); @@ -280,7 +280,7 @@ public class RoutingIteratorTests extends ESAllocationTestCase { public void testNodeSelectorRouting(){ AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); MetaData metaData = MetaData.builder() diff --git a/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java b/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java index fa4ce357a52..c484df7d99d 100644 --- a/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java +++ b/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java @@ -19,12 +19,13 @@ package org.elasticsearch.common.breaker; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import java.util.concurrent.atomic.AtomicBoolean; @@ -87,7 +88,7 @@ public class MemoryCircuitBreakerTests extends ESTestCase { final AtomicReference lastException = new AtomicReference<>(null); final AtomicReference breakerRef = new AtomicReference<>(null); - final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)) { + final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { @Override public CircuitBreaker getBreaker(String name) { @@ -147,7 +148,7 @@ public class MemoryCircuitBreakerTests extends ESTestCase { final AtomicInteger parentTripped = new AtomicInteger(0); final AtomicReference breakerRef = new AtomicReference<>(null); - final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)) { + final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { @Override public CircuitBreaker getBreaker(String name) { diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java new file mode 100644 index 00000000000..08ec33d0b43 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -0,0 +1,282 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.settings; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +public class SettingTests extends ESTestCase { + + + public void testGet() { + Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.Cluster); + assertFalse(booleanSetting.get(Settings.EMPTY)); + assertFalse(booleanSetting.get(Settings.builder().put("foo.bar", false).build())); + assertTrue(booleanSetting.get(Settings.builder().put("foo.bar", true).build())); + } + + public void testByteSize() { + Setting byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), true, Setting.Scope.Cluster); + assertFalse(byteSizeValueSetting.isGroupSetting()); + ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); + assertEquals(byteSizeValue.bytes(), 1024); + AtomicReference value = new AtomicReference<>(null); + ClusterSettingsService.SettingUpdater settingUpdater = byteSizeValueSetting.newUpdater(value::set, logger, Settings.EMPTY); + try { + settingUpdater.prepareApply(Settings.builder().put("a.byte.size", 12).build()); + fail("no unit"); + } catch (ElasticsearchParseException ex) { + assertEquals("failed to parse setting [a.byte.size] with value [12] as a size in bytes: unit is missing or unrecognized", ex.getMessage()); + } + + assertTrue(settingUpdater.prepareApply(Settings.builder().put("a.byte.size", "12b").build())); + settingUpdater.apply(); + assertEquals(new ByteSizeValue(12), value.get()); + } + + public void testSimpleUpdate() { + Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.Cluster); + AtomicReference atomicBoolean = new AtomicReference<>(null); + ClusterSettingsService.SettingUpdater settingUpdater = booleanSetting.newUpdater(atomicBoolean::set, logger, Settings.EMPTY); + Settings build = Settings.builder().put("foo.bar", false).build(); + settingUpdater.prepareApply(build); + assertNull(atomicBoolean.get()); + settingUpdater.rollback(); + assertNull(atomicBoolean.get()); + build = Settings.builder().put("foo.bar", true).build(); + settingUpdater.prepareApply(build); + assertNull(atomicBoolean.get()); + settingUpdater.apply(); + assertTrue(atomicBoolean.get()); + + // try update bogus value + build = Settings.builder().put("foo.bar", "I am not a boolean").build(); + try { + settingUpdater.prepareApply(build); + fail("not a boolean"); + } catch (IllegalArgumentException ex) { + assertEquals("Failed to parse value [I am not a boolean] for setting [foo.bar]", ex.getMessage()); + } + } + + public void testUpdateNotDynamic() { + Setting booleanSetting = Setting.boolSetting("foo.bar", false, false, Setting.Scope.Cluster); + assertFalse(booleanSetting.isGroupSetting()); + AtomicReference atomicBoolean = new AtomicReference<>(null); + try { + booleanSetting.newUpdater(atomicBoolean::set, logger, Settings.EMPTY); + fail("not dynamic"); + } catch (IllegalStateException ex) { + assertEquals("setting [foo.bar] is not dynamic", ex.getMessage()); + } + } + + public void testUpdaterIsIsolated() { + Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.Cluster); + AtomicReference ab1 = new AtomicReference<>(null); + AtomicReference ab2 = new AtomicReference<>(null); + ClusterSettingsService.SettingUpdater settingUpdater = booleanSetting.newUpdater(ab1::set, logger, Settings.EMPTY); + settingUpdater.prepareApply(Settings.builder().put("foo.bar", true).build()); + assertNull(ab1.get()); + assertNull(ab2.get()); + settingUpdater.apply(); + assertTrue(ab1.get()); + assertNull(ab2.get()); + } + + public void testDefault() { + TimeValue defautlValue = TimeValue.timeValueMillis(randomIntBetween(0, 1000000)); + Setting setting = Setting.positiveTimeSetting("my.time.value", defautlValue, randomBoolean(), Setting.Scope.Cluster); + assertFalse(setting.isGroupSetting()); + String aDefault = setting.getDefault(Settings.EMPTY); + assertEquals(defautlValue.millis() + "ms", aDefault); + assertEquals(defautlValue.millis(), setting.get(Settings.EMPTY).millis()); + + Setting secondaryDefault = new Setting<>("foo.bar", "_na_", (s) -> s.get("old.foo.bar", "some_default"), (s) -> s, randomBoolean(), Setting.Scope.Cluster); + assertEquals("some_default", secondaryDefault.get(Settings.EMPTY)); + assertEquals("42", secondaryDefault.get(Settings.builder().put("old.foo.bar", 42).build())); + } + + public void testComplexType() { + AtomicReference ref = new AtomicReference<>(null); + Setting setting = new Setting<>("foo.bar", "", (s) -> "", (s) -> new ComplexType(s), true, Setting.Scope.Cluster); + assertFalse(setting.isGroupSetting()); + ref.set(setting.get(Settings.EMPTY)); + ComplexType type = ref.get(); + ClusterSettingsService.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY); + assertFalse(settingUpdater.prepareApply(Settings.EMPTY)); + settingUpdater.apply(); + assertSame("no update - type has not changed", type, ref.get()); + + // change from default + assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.bar", "2").build())); + settingUpdater.apply(); + assertNotSame("update - type has changed", type, ref.get()); + assertEquals("2", ref.get().foo); + + + // change back to default... + assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.bar.baz", "2").build())); + settingUpdater.apply(); + assertNotSame("update - type has changed", type, ref.get()); + assertEquals("", ref.get().foo); + } + + public void testRollback() { + Setting integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.Cluster); + assertFalse(integerSetting.isGroupSetting()); + AtomicReference ref = new AtomicReference<>(null); + ClusterSettingsService.SettingUpdater settingUpdater = integerSetting.newUpdater(ref::set, logger, Settings.EMPTY); + assertNull(ref.get()); + assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.int.bar", "2").build())); + settingUpdater.rollback(); + settingUpdater.apply(); + assertNull(ref.get()); + assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.int.bar", "2").build())); + settingUpdater.apply(); + assertEquals(2, ref.get().intValue()); + } + + public void testType() { + Setting integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.Cluster); + assertEquals(integerSetting.getScope(), Setting.Scope.Cluster); + integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.Index); + assertEquals(integerSetting.getScope(), Setting.Scope.Index); + } + + public void testGroups() { + AtomicReference ref = new AtomicReference<>(null); + Setting setting = Setting.groupSetting("foo.bar.", true, Setting.Scope.Cluster); + assertTrue(setting.isGroupSetting()); + ClusterSettingsService.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY); + + assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").put("foo.bar.3.value", "3").build())); + settingUpdater.apply(); + assertNotNull(ref.get()); + Settings settings = ref.get(); + Map asMap = settings.getAsGroups(); + assertEquals(3, asMap.size()); + assertEquals(asMap.get("1").get("value"), "1"); + assertEquals(asMap.get("2").get("value"), "2"); + assertEquals(asMap.get("3").get("value"), "3"); + + Settings current = ref.get(); + assertFalse(settingUpdater.prepareApply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").put("foo.bar.3.value", "3").build())); + settingUpdater.apply(); + assertSame(current, ref.get()); + + // now update and check that we got it + assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").build())); + settingUpdater.apply(); + assertNotSame(current, ref.get()); + + asMap = ref.get().getAsGroups(); + assertEquals(2, asMap.size()); + assertEquals(asMap.get("1").get("value"), "1"); + assertEquals(asMap.get("2").get("value"), "2"); + + // now update and check that we got it + assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "4").build())); + settingUpdater.apply(); + assertNotSame(current, ref.get()); + + asMap = ref.get().getAsGroups(); + assertEquals(2, asMap.size()); + assertEquals(asMap.get("1").get("value"), "1"); + assertEquals(asMap.get("2").get("value"), "4"); + + assertTrue(setting.match("foo.bar.baz")); + assertFalse(setting.match("foo.baz.bar")); + + ClusterSettingsService.SettingUpdater predicateSettingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY, (s) -> false); + try { + predicateSettingUpdater.prepareApply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").build()); + fail("not accepted"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "illegal value can't update [foo.bar.] from [{}] to [{1.value=1, 2.value=2}]"); + } + } + + public static class ComplexType { + + final String foo; + + public ComplexType(String foo) { + this.foo = foo; + } + } + + public static class Composite { + + private Integer b; + private Integer a; + + public void set(Integer a, Integer b) { + this.a = a; + this.b = b; + } + } + + + public void testComposite() { + Composite c = new Composite(); + Setting a = Setting.intSetting("foo.int.bar.a", 1, true, Setting.Scope.Cluster); + Setting b = Setting.intSetting("foo.int.bar.b", 1, true, Setting.Scope.Cluster); + ClusterSettingsService.SettingUpdater settingUpdater = Setting.compoundUpdater(c::set, a, b, logger, Settings.EMPTY); + assertFalse(settingUpdater.prepareApply(Settings.EMPTY)); + settingUpdater.apply(); + assertNull(c.a); + assertNull(c.b); + + Settings build = Settings.builder().put("foo.int.bar.a", 2).build(); + assertTrue(settingUpdater.prepareApply(build)); + settingUpdater.apply(); + assertEquals(2, c.a.intValue()); + assertNull(c.b); + + Integer aValue = c.a; + assertFalse(settingUpdater.prepareApply(build)); + settingUpdater.apply(); + assertSame(aValue, c.a); + + build = Settings.builder().put("foo.int.bar.a", 2).put("foo.int.bar.b", 5).build(); + assertTrue(settingUpdater.prepareApply(build)); + settingUpdater.apply(); + assertEquals(2, c.a.intValue()); + assertEquals(5, c.b.intValue()); + + // reset to default + assertTrue(settingUpdater.prepareApply(Settings.EMPTY)); + settingUpdater.apply(); + assertEquals(1, c.a.intValue()); + assertEquals(1, c.b.intValue()); + + } + + + + + + +} diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsServiceTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingsServiceTests.java new file mode 100644 index 00000000000..254c310daba --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingsServiceTests.java @@ -0,0 +1,125 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.settings; + +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.atomic.AtomicInteger; + +public class SettingsServiceTests extends ESTestCase { + + public void testAddConsumer() { + Setting testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.Cluster); + SettingsService service = new SettingsService(Settings.EMPTY) { + @Override + protected Setting getSetting(String key) { + if (key.equals(testSetting.getKey())) { + return testSetting; + } + return null; + } + }; + + AtomicInteger consumer = new AtomicInteger(); + service.addSettingsUpdateConsumer(testSetting, consumer::set); + AtomicInteger consumer2 = new AtomicInteger(); + try { + service.addSettingsUpdateConsumer(testSetting2, consumer2::set); + fail("setting not registered"); + } catch (IllegalArgumentException ex) { + assertEquals("Setting is not registered for key [foo.bar.baz]", ex.getMessage()); + } + + + try { + service.addSettingsUpdateConsumer(testSetting, testSetting2, (a, b) -> {consumer.set(a); consumer2.set(b);}); + fail("setting not registered"); + } catch (IllegalArgumentException ex) { + assertEquals("Setting is not registered for key [foo.bar.baz]", ex.getMessage()); + } + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + service.applySettings(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", 15).build()); + assertEquals(2, consumer.get()); + assertEquals(0, consumer2.get()); + } + + public void testApply() { + Setting testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.Cluster); + SettingsService service = new SettingsService(Settings.EMPTY) { + @Override + protected Setting getSetting(String key) { + if (key.equals(testSetting.getKey())) { + return testSetting; + } else if (key.equals(testSetting2.getKey())) { + return testSetting2; + } + return null; + } + }; + + AtomicInteger consumer = new AtomicInteger(); + service.addSettingsUpdateConsumer(testSetting, consumer::set); + AtomicInteger consumer2 = new AtomicInteger(); + service.addSettingsUpdateConsumer(testSetting2, consumer2::set, (s) -> s > 0); + + AtomicInteger aC = new AtomicInteger(); + AtomicInteger bC = new AtomicInteger(); + service.addSettingsUpdateConsumer(testSetting, testSetting2, (a, b) -> {aC.set(a); bC.set(b);}); + + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + assertEquals(0, aC.get()); + assertEquals(0, bC.get()); + try { + service.applySettings(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", -15).build()); + fail("invalid value"); + } catch (IllegalArgumentException ex) { + assertEquals("illegal value can't update [foo.bar.baz] from [1] to [-15]", ex.getMessage()); + } + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + assertEquals(0, aC.get()); + assertEquals(0, bC.get()); + try { + service.dryRun(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", -15).build()); + fail("invalid value"); + } catch (IllegalArgumentException ex) { + assertEquals("illegal value can't update [foo.bar.baz] from [1] to [-15]", ex.getMessage()); + } + + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + assertEquals(0, aC.get()); + assertEquals(0, bC.get()); + service.dryRun(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", 15).build()); + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + assertEquals(0, aC.get()); + assertEquals(0, bC.get()); + + service.applySettings(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", 15).build()); + assertEquals(2, consumer.get()); + assertEquals(15, consumer2.get()); + assertEquals(2, aC.get()); + assertEquals(15, bC.get()); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java index ec0e26091df..4220973ae21 100644 --- a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java +++ b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java @@ -162,4 +162,14 @@ public class TimeValueTests extends ESTestCase { assertThat(e.getMessage(), containsString("Failed to parse")); } } + + public void testToStringRep() { + assertThat("-1", equalTo(new TimeValue(-1).getStringRep())); + assertThat("10ms", equalTo(new TimeValue(10, TimeUnit.MILLISECONDS).getStringRep())); + assertThat("1533ms", equalTo(new TimeValue(1533, TimeUnit.MILLISECONDS).getStringRep())); + assertThat("90s", equalTo(new TimeValue(90, TimeUnit.SECONDS).getStringRep())); + assertThat("90m", equalTo(new TimeValue(90, TimeUnit.MINUTES).getStringRep())); + assertThat("36h", equalTo(new TimeValue(36, TimeUnit.HOURS).getStringRep())); + assertThat("1000d", equalTo(new TimeValue(1000, TimeUnit.DAYS).getStringRep())); + } } diff --git a/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index 7d36c09ee19..f3783fa6d12 100644 --- a/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -21,13 +21,14 @@ package org.elasticsearch.common.util; import org.apache.lucene.util.BytesRef; import org.elasticsearch.cache.recycler.PageCacheRecycler; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESSingleNodeTestCase; import org.junit.Before; @@ -336,9 +337,9 @@ public class BigArraysTests extends ESSingleNodeTestCase { for (String type : Arrays.asList("Byte", "Int", "Long", "Float", "Double", "Object")) { HierarchyCircuitBreakerService hcbs = new HierarchyCircuitBreakerService( Settings.builder() - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, size - 1, ByteSizeUnit.BYTES) + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), size - 1, ByteSizeUnit.BYTES) .build(), - new NodeSettingsService(Settings.EMPTY)); + new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); BigArrays bigArrays = new BigArrays(null, hcbs).withCircuitBreaking(); Method create = BigArrays.class.getMethod("new" + type + "Array", long.class); try { @@ -356,9 +357,9 @@ public class BigArraysTests extends ESSingleNodeTestCase { final long maxSize = randomIntBetween(1 << 10, 1 << 22); HierarchyCircuitBreakerService hcbs = new HierarchyCircuitBreakerService( Settings.builder() - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, maxSize, ByteSizeUnit.BYTES) + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), maxSize, ByteSizeUnit.BYTES) .build(), - new NodeSettingsService(Settings.EMPTY)); + new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); BigArrays bigArrays = new BigArrays(null, hcbs).withCircuitBreaking(); Method create = BigArrays.class.getMethod("new" + type + "Array", long.class); final int size = scaledRandomIntBetween(1, 20); diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index b14792a2c33..570c0b424ff 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -132,7 +132,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { .put(FaultDetection.SETTING_PING_TIMEOUT, "1s") // for hitting simulated network failures quickly .put(FaultDetection.SETTING_PING_RETRIES, "1") // for hitting simulated network failures quickly .put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out - .put(DiscoverySettings.PUBLISH_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly .put("http.enabled", false) // just to make test quicker .put("gateway.local.list_timeout", "10s") // still long to induce failures but to long so test won't time out .build(); @@ -150,7 +150,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // TODO: Rarely use default settings form some of these Settings nodeSettings = Settings.builder() .put(DEFAULT_SETTINGS) - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, minimumMasterNode) + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode) .build(); if (discoveryConfig == null) { @@ -217,7 +217,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { logger.info("--> reducing min master nodes to 2"); assertAcked(client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2)).get()); + .setTransientSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)).get()); String master = internalCluster().getMasterName(); String nonMaster = null; @@ -293,9 +293,9 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // Wait until the master node sees al 3 nodes again. ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + networkPartition.expectedTimeToHeal().millis())); - logger.info("Verify no master block with {} set to {}", DiscoverySettings.NO_MASTER_BLOCK, "all"); + logger.info("Verify no master block with {} set to {}", DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "all"); client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(DiscoverySettings.NO_MASTER_BLOCK, "all")) + .setTransientSettings(Settings.builder().put(DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "all")) .get(); networkPartition.startDisrupting(); @@ -863,7 +863,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { internalCluster().startNodesAsync(3, Settings.builder() .put(DiscoveryService.SETTING_INITIAL_STATE_TIMEOUT, "1ms") - .put(DiscoverySettings.PUBLISH_TIMEOUT, "3s") + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "3s") .build()).get(); logger.info("applying disruption while cluster is forming ..."); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index ea590756a8b..2dac067fed3 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress; @@ -38,7 +39,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.BaseFuture; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.membership.MembershipAction; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -67,7 +68,7 @@ public class NodeJoinControllerTests extends ESTestCase { // make sure we have a master clusterService.setState(ClusterState.builder(clusterService.state()).nodes(DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.id()))); nodeJoinController = new NodeJoinController(clusterService, new NoopRoutingService(Settings.EMPTY), - new DiscoverySettings(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)), Settings.EMPTY); + new DiscoverySettings(Settings.EMPTY, new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))), Settings.EMPTY); } public void testSimpleJoinAccumulation() throws InterruptedException, ExecutionException { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index 0b5f9997dba..3b6708630ee 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -84,7 +84,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { assertThat(zenDiscovery.isRejoinOnMasterGone(), is(true)); client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(ZenDiscovery.SETTING_REJOIN_ON_MASTER_GONE, false)) + .setTransientSettings(Settings.builder().put(ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING.getKey(), false)) .get(); assertThat(zenDiscovery.isRejoinOnMasterGone(), is(false)); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java index d33aadd84aa..3cb5316cacc 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; @@ -42,7 +43,7 @@ import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; @@ -156,7 +157,7 @@ public class PublishClusterStateActionTests extends ESTestCase { public MockNode createMockNode(String name, Settings settings, Version version, @Nullable ClusterStateListener listener) throws Exception { settings = Settings.builder() .put("name", name) - .put(TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") .put(settings) .build(); @@ -237,7 +238,7 @@ public class PublishClusterStateActionTests extends ESTestCase { protected MockPublishAction buildPublishClusterStateAction(Settings settings, MockTransportService transportService, DiscoveryNodesProvider nodesProvider, PublishClusterStateAction.NewPendingClusterStateListener listener) { - DiscoverySettings discoverySettings = new DiscoverySettings(settings, new NodeSettingsService(settings)); + DiscoverySettings discoverySettings = new DiscoverySettings(settings, new ClusterSettingsService(settings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); return new MockPublishAction(settings, transportService, nodesProvider, listener, discoverySettings, ClusterName.DEFAULT); } @@ -345,7 +346,7 @@ public class PublishClusterStateActionTests extends ESTestCase { } public void testDisablingDiffPublishing() throws Exception { - Settings noDiffPublishingSettings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE, false).build(); + Settings noDiffPublishingSettings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false).build(); MockNode nodeA = createMockNode("nodeA", noDiffPublishingSettings, Version.CURRENT, new ClusterStateListener() { @Override @@ -384,7 +385,7 @@ public class PublishClusterStateActionTests extends ESTestCase { public void testSimultaneousClusterStatePublishing() throws Exception { int numberOfNodes = randomIntBetween(2, 10); int numberOfIterations = scaledRandomIntBetween(5, 50); - Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE, randomBoolean()).build(); + Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), randomBoolean()).build(); MockNode master = createMockNode("node0", settings, Version.CURRENT, new ClusterStateListener() { @Override public void clusterChanged(ClusterChangedEvent event) { @@ -510,8 +511,8 @@ public class PublishClusterStateActionTests extends ESTestCase { final boolean expectingToCommit = randomBoolean(); Settings.Builder settings = Settings.builder(); // make sure we have a reasonable timeout if we expect to timeout, o.w. one that will make the test "hang" - settings.put(DiscoverySettings.COMMIT_TIMEOUT, expectingToCommit == false && timeOutNodes > 0 ? "100ms" : "1h") - .put(DiscoverySettings.PUBLISH_TIMEOUT, "5ms"); // test is about committing + settings.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), expectingToCommit == false && timeOutNodes > 0 ? "100ms" : "1h") + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "5ms"); // test is about committing MockNode master = createMockNode("master", settings.build()); @@ -695,7 +696,7 @@ public class PublishClusterStateActionTests extends ESTestCase { */ public void testTimeoutOrCommit() throws Exception { Settings settings = Settings.builder() - .put(DiscoverySettings.COMMIT_TIMEOUT, "1ms").build(); // short but so we will sometime commit sometime timeout + .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "1ms").build(); // short but so we will sometime commit sometime timeout MockNode master = createMockNode("master", settings); MockNode node = createMockNode("node", settings); diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index 15ddc9dd771..2c6a55da242 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -57,7 +57,7 @@ public class GatewayMetaStateTests extends ESAllocationTestCase { //ridiculous settings to make sure we don't run into uninitialized because fo default AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 100) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100) .build()); @@ -111,7 +111,7 @@ public class GatewayMetaStateTests extends ESAllocationTestCase { //ridiculous settings to make sure we don't run into uninitialized because fo default AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 100) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100) .build()); diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java index 2184fda47c4..dbdf747de63 100644 --- a/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java @@ -82,9 +82,9 @@ public class RecoveryBackwardsCompatibilityIT extends ESBackcompatTestCase { SearchResponse countResponse = client().prepareSearch().setSize(0).get(); assertHitCount(countResponse, numDocs); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none")).execute().actionGet(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")).execute().actionGet(); backwardsCluster().upgradeAllNodes(); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "all")).execute().actionGet(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all")).execute().actionGet(); ensureGreen(); countResponse = client().prepareSearch().setSize(0).get(); diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 01c76b465a9..31e992afb62 100644 --- a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -360,7 +360,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { // Disable allocations while we are closing nodes client().admin().cluster().prepareUpdateSettings() .setTransientSettings(settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE)) + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)) .get(); logger.info("--> full cluster restart"); internalCluster().fullRestart(); @@ -377,7 +377,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { // Disable allocations while we are closing nodes client().admin().cluster().prepareUpdateSettings() .setTransientSettings(settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE)) + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)) .get(); logger.info("--> full cluster restart"); internalCluster().fullRestart(); diff --git a/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java b/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java index 21ecdf710b7..457cf31ec83 100644 --- a/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java +++ b/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java @@ -56,7 +56,7 @@ public class TransportIndexFailuresIT extends ESIntegTestCase { .put("discovery.type", "zen") // <-- To override the local setting if set externally .put(FaultDetection.SETTING_PING_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly .put(FaultDetection.SETTING_PING_RETRIES, "1") // <-- for hitting simulated network failures quickly - .put(DiscoverySettings.PUBLISH_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly .put("discovery.zen.minimum_master_nodes", 1) .build(); diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 4858e0a6e3c..15249a49727 100644 --- a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -111,9 +111,9 @@ public class CorruptedFileIT extends ESIntegTestCase { // and we need to make sure primaries are not just trashed if we don't have replicas .put(super.nodeSettings(nodeOrdinal)) // speed up recoveries - .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, 10) - .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 10) - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, 5) + .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.getKey(), 10) + .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.getKey(), 10) + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), 5) .build(); } diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java index 8de3af25827..4f6aaf25705 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java @@ -170,14 +170,14 @@ public class IndicesLifecycleListenerIT extends ESIntegTestCase { //add a node: 3 out of the 6 shards will be relocated to it //disable allocation before starting a new node, as we need to register the listener first assertAcked(client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none"))); + .setPersistentSettings(builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); String node2 = internalCluster().startNode(); IndexShardStateChangeListener stateChangeListenerNode2 = new IndexShardStateChangeListener(); //add a listener that keeps track of the shard state changes internalCluster().getInstance(MockIndexEventListener.TestEventListener.class, node2).setNewDelegate(stateChangeListenerNode2); //re-enable allocation assertAcked(client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "all"))); + .setPersistentSettings(builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all"))); ensureGreen(); //the 3 relocated shards get closed on the first node diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java index 3398839b905..08df60126a5 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java @@ -43,10 +43,10 @@ public class CircuitBreakerNoopIT extends ESIntegTestCase { return Settings.builder() .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, "noop") // This is set low, because if the "noop" is not a noop, it will break - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "10b") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, "noop") // This is set low, because if the "noop" is not a noop, it will break - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, "10b") + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") .build(); } diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java index fcd94d99585..1af04e295dd 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -63,13 +63,13 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { private void reset() { logger.info("--> resetting breaker settings"); Settings resetSettings = settingsBuilder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.DEFAULT_FIELDDATA_BREAKER_LIMIT) - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, - HierarchyCircuitBreakerService.DEFAULT_FIELDDATA_OVERHEAD_CONSTANT) - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.DEFAULT_REQUEST_BREAKER_LIMIT) - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getDefault(null)) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getDefault(null)) + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getDefault(null)) + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings)); } @@ -119,8 +119,8 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { // Update circuit breaker settings Settings settings = settingsBuilder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "100b") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.05) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "100b") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.05) .build(); assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -168,8 +168,8 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { // Update circuit breaker settings Settings settings = settingsBuilder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "100b") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.05) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "100b") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.05) .build(); assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -213,8 +213,8 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { .getNodes()[0].getBreaker().getStats(CircuitBreaker.REQUEST).getLimit(); Settings resetSettings = settingsBuilder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "10b") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) .build(); assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings)); @@ -234,9 +234,9 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { // Adjust settings so the parent breaker will fail, but the fielddata breaker doesn't resetSettings = settingsBuilder() - .put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, "15b") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "90%") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0) + .put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "15b") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "90%") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) .build(); client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings).execute().actionGet(); @@ -261,7 +261,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { // Make request breaker limited to a small amount Settings resetSettings = settingsBuilder() - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, "10b") + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") .build(); assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings)); diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java index 741ea305254..38da1ac135c 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java @@ -19,12 +19,13 @@ package org.elasticsearch.indices.memory.breaker; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.equalTo; @@ -66,7 +67,7 @@ public class CircuitBreakerUnitTests extends ESTestCase { } public void testRegisterCustomBreaker() throws Exception { - CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)); + CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); String customName = "custom"; BreakerSettings settings = new BreakerSettings(customName, 20, 1.0); service.registerBreaker(settings); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 5ece413f796..50346a2903c 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -142,8 +142,8 @@ public class IndexRecoveryIT extends ESIntegTestCase { assertTrue(client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder() // one chunk per sec.. - .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, chunkSize, ByteSizeUnit.BYTES) - .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, chunkSize, ByteSizeUnit.BYTES) + .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSize, ByteSizeUnit.BYTES) + .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING.getKey(), chunkSize, ByteSizeUnit.BYTES) ) .get().isAcknowledged()); } @@ -151,8 +151,8 @@ public class IndexRecoveryIT extends ESIntegTestCase { private void restoreRecoverySpeed() { assertTrue(client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder() - .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, "20mb") - .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, "512kb") + .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "20mb") + .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING.getKey(), "512kb") ) .get().isAcknowledged()); } @@ -525,8 +525,8 @@ public class IndexRecoveryIT extends ESIntegTestCase { public void testDisconnectsWhileRecovering() throws Exception { final String indexName = "test"; final Settings nodeSettings = Settings.builder() - .put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, "100ms") - .put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, "1s") + .put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), "100ms") + .put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), "1s") .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false) // restarted recoveries will delete temp files and write them again .build(); // start a master node diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 8346003287c..ca70fc1fe2f 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; import org.elasticsearch.common.settings.Settings; @@ -44,7 +45,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.CorruptionUtils; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; @@ -60,7 +61,7 @@ import java.util.concurrent.atomic.AtomicBoolean; public class RecoverySourceHandlerTests extends ESTestCase { private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings(new Index("index"), Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).build()); private final ShardId shardId = new ShardId(INDEX_SETTINGS.getIndex(), 1); - private final NodeSettingsService service = new NodeSettingsService(Settings.EMPTY); + private final ClusterSettingsService service = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); public void testSendFiles() throws Throwable { Settings settings = Settings.builder().put("indices.recovery.concurrent_streams", 1). diff --git a/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java b/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java index bde40aa928f..8ec629dbbdc 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java @@ -40,7 +40,7 @@ public class CloseIndexDisableCloseAllIT extends ESIntegTestCase { // The cluster scope is test b/c we can't clear cluster settings. public void testCloseAllRequiresName() { Settings clusterSettings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, true) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(clusterSettings)); createIndex("test1", "test2", "test3"); @@ -91,7 +91,7 @@ public class CloseIndexDisableCloseAllIT extends ESIntegTestCase { createIndex("test_no_close"); healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(TransportCloseIndexAction.SETTING_CLUSTER_INDICES_CLOSE_ENABLE, false)).get(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey(), false)).get(); try { client.admin().indices().prepareClose("test_no_close").execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 96611aeca8a..2e73a466677 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -171,7 +171,7 @@ public class RareClusterStateIT extends ESIntegTestCase { ensureGreen("test"); // now that the cluster is stable, remove publishing timeout - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0"))); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0"))); Set nodes = new HashSet<>(Arrays.asList(internalCluster().getNodeNames())); nodes.remove(internalCluster().getMasterName()); @@ -200,7 +200,7 @@ public class RareClusterStateIT extends ESIntegTestCase { // but the change might not be on the node that performed the indexing // operation yet - Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0ms").build(); + Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0ms").build(); final List nodeNames = internalCluster().startNodesAsync(2, settings).get(); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index fc4dd4f6487..948b76b963d 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -303,7 +303,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // disable allocation to control the situation more easily assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none"))); + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); logger.debug("--> shutting down two random nodes"); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3)); @@ -322,7 +322,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { .put(FilterAllocationDecider.INDEX_ROUTING_EXCLUDE_GROUP + "_name", "NONE"))); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "all"))); + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all"))); logger.debug("--> waiting for shards to recover on [{}]", node4); // we have to do this in two steps as we now do async shard fetching before assigning, so the change to the @@ -340,7 +340,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // disable allocation again to control concurrency a bit and allow shard active to kick in before allocation assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none"))); + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); logger.debug("--> starting the two old nodes back"); @@ -351,7 +351,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "all"))); + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all"))); logger.debug("--> waiting for the lost shard to be recovered"); @@ -396,7 +396,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // disable relocations when we do this, to make sure the shards are not relocated from node2 // due to rebalancing, and delete its content - client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)).get(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)).get(); internalCluster().getInstance(ClusterService.class, nonMasterNode).submitStateUpdateTask("test", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public ClusterState execute(ClusterState currentState) throws Exception { diff --git a/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java b/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java index 49d22b87bf8..514b1757e41 100644 --- a/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java @@ -34,7 +34,7 @@ public class DestructiveOperationsIntegrationIT extends ESIntegTestCase { // The cluster scope is test b/c we can't clear cluster settings. public void testDestructiveOperations() throws Exception { Settings settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, true) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -58,7 +58,7 @@ public class DestructiveOperationsIntegrationIT extends ESIntegTestCase { } settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, false) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -68,7 +68,7 @@ public class DestructiveOperationsIntegrationIT extends ESIntegTestCase { // end delete index: // close index: settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, true) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -100,7 +100,7 @@ public class DestructiveOperationsIntegrationIT extends ESIntegTestCase { } settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, false) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); assertAcked(client().admin().indices().prepareClose("_all").get()); diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java b/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java index b0288042ecc..8d659c13e13 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java +++ b/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java @@ -32,74 +32,74 @@ public class RecoverySettingsTests extends ESSingleNodeTestCase { } public void testAllSettingsAreDynamicallyUpdatable() { - innerTestSettings(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, randomIntBetween(1, 200), ByteSizeUnit.BYTES, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1, 200), ByteSizeUnit.BYTES, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.fileChunkSize().bytesAsInt()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS, randomIntBetween(1, 200), new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS_SETTING.getKey(), randomIntBetween(1, 200), new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.translogOps()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE, randomIntBetween(1, 200), ByteSizeUnit.BYTES, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE_SETTING.getKey(), randomIntBetween(1, 200), ByteSizeUnit.BYTES, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.translogSize().bytesAsInt()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, randomIntBetween(1, 200), new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.getKey(), randomIntBetween(1, 200), new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.concurrentStreamPool().getMaximumPoolSize()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, randomIntBetween(1, 200), new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.getKey(), randomIntBetween(1, 200), new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.concurrentSmallFileStreamPool().getMaximumPoolSize()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, 0, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), 0, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(null, recoverySettings.rateLimiter()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.retryDelayStateSync().millis()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.retryDelayNetwork().millis()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.activityTimeout().millis()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.internalActionTimeout().millis()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.internalActionLongTimeout().millis()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_COMPRESS, false, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_COMPRESS_SETTING.getKey(), false, new Validator() { @Override public void validate(RecoverySettings recoverySettings, boolean expectedValue) { assertEquals(expectedValue, recoverySettings.compress()); diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java index 57b5e888ea9..541911ce4e0 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -387,7 +387,7 @@ public class RelocationIT extends ESIntegTestCase { logger.info("--> stopping replica assignment"); assertAcked(client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none"))); + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); logger.info("--> wait for all replica shards to be removed, on all nodes"); assertBusy(new Runnable() { diff --git a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index d94f72ea80f..4f85eb77340 100644 --- a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -62,7 +62,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)); + .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)); return builder.build(); } diff --git a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 51ae038ca0d..8fde9bbf330 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -56,7 +56,7 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { return settingsBuilder().put(super.nodeSettings(nodeOrdinal)) // Rebalancing is causing some checks after restore to randomly fail // due to https://github.com/elastic/elasticsearch/issues/9421 - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE) + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) .build(); } diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index f9392836d8b..2c38c4c74c1 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -123,14 +123,14 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest logger.info("--> set test persistent setting"); client.admin().cluster().prepareUpdateSettings().setPersistentSettings( Settings.settingsBuilder() - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2) - .put(IndicesTTLService.INDICES_TTL_INTERVAL, random, TimeUnit.MINUTES)) + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) + .put(IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), random, TimeUnit.MINUTES)) .execute().actionGet(); assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(random).millis())); + .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(random).millis())); assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, -1), equalTo(2)); + .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), -1), equalTo(2)); logger.info("--> create repository"); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") @@ -146,23 +146,25 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest logger.info("--> clean the test persistent setting"); client.admin().cluster().prepareUpdateSettings().setPersistentSettings( Settings.settingsBuilder() - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 1) - .put(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1))) + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 1) + .put(IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(1))) .execute().actionGet(); assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(1).millis())); + .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(1).millis())); stopNode(secondNode); assertThat(client.admin().cluster().prepareHealth().setWaitForNodes("1").get().isTimedOut(), equalTo(false)); logger.info("--> restore snapshot"); - client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).execute().actionGet(); - assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(random).millis())); - + try { + client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).execute().actionGet(); + fail("can't restore minimum master nodes"); + } catch (IllegalArgumentException ex) { + assertEquals("cannot set discovery.zen.minimum_master_nodes to more than the current master nodes count [1]", ex.getMessage()); + } logger.info("--> ensure that zen discovery minimum master nodes wasn't restored"); assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, -1), not(equalTo(2))); + .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), -1), not(equalTo(2))); } public void testRestoreCustomMetadata() throws Exception { @@ -554,7 +556,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest public void testRestoreIndexWithShardsMissingInLocalGateway() throws Exception { logger.info("--> start 2 nodes"); Settings nodeSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE) + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) .build(); internalCluster().startNode(nodeSettings); diff --git a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolTypeSettingsValidatorTests.java b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolTypeSettingsValidatorTests.java deleted file mode 100644 index 3dfca5cb283..00000000000 --- a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolTypeSettingsValidatorTests.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed wit[√h - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.threadpool; - -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.settings.Validator; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.util.*; - -import static org.junit.Assert.*; - -public class ThreadPoolTypeSettingsValidatorTests extends ESTestCase { - private Validator validator; - - @Before - public void setUp() throws Exception { - super.setUp(); - validator = ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR; - } - - public void testValidThreadPoolTypeSettings() { - for (Map.Entry entry : ThreadPool.THREAD_POOL_TYPES.entrySet()) { - assertNull(validateSetting(validator, entry.getKey(), entry.getValue().getType())); - } - } - - public void testInvalidThreadPoolTypeSettings() { - for (Map.Entry entry : ThreadPool.THREAD_POOL_TYPES.entrySet()) { - Set set = new HashSet<>(); - set.addAll(Arrays.asList(ThreadPool.ThreadPoolType.values())); - set.remove(entry.getValue()); - ThreadPool.ThreadPoolType invalidThreadPoolType = randomFrom(set.toArray(new ThreadPool.ThreadPoolType[set.size()])); - String expectedMessage = String.format( - Locale.ROOT, - "thread pool type for [%s] can only be updated to [%s] but was [%s]", - entry.getKey(), - entry.getValue().getType(), - invalidThreadPoolType.getType()); - String message = validateSetting(validator, entry.getKey(), invalidThreadPoolType.getType()); - assertNotNull(message); - assertEquals(expectedMessage, message); - } - } - - public void testNonThreadPoolTypeSetting() { - String setting = ThreadPool.THREADPOOL_GROUP + randomAsciiOfLength(10) + "foo"; - String value = randomAsciiOfLength(10); - assertNull(validator.validate(setting, value, ClusterState.PROTO)); - } - - private String validateSetting(Validator validator, String threadPoolName, String value) { - return validator.validate(ThreadPool.THREADPOOL_GROUP + threadPoolName + ".type", value, ClusterState.PROTO); - } -} diff --git a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java index 95ceea1e490..47d82a36088 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java @@ -19,8 +19,10 @@ package org.elasticsearch.threadpool; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool.Names; @@ -90,9 +92,10 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { ThreadPool threadPool = null; try { threadPool = new ThreadPool(settingsBuilder().put("name", "testUpdateSettingsCanNotChangeThreadPoolType").build()); + ClusterSettingsService clusterSettingsService = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + threadPool.setNodeSettingsService(clusterSettingsService); - - threadPool.updateSettings( + clusterSettingsService.applySettings( settingsBuilder() .put("threadpool." + threadPoolName + ".type", invalidThreadPoolType.getType()) .build() @@ -111,14 +114,16 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.CACHED); ThreadPool threadPool = null; try { - threadPool = new ThreadPool( - Settings.settingsBuilder() - .put("name", "testCachedExecutorType").build()); + Settings nodeSettings = Settings.settingsBuilder() + .put("name", "testCachedExecutorType").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettingsService clusterSettingsService = new ClusterSettingsService(nodeSettings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + threadPool.setNodeSettingsService(clusterSettingsService); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED); assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); - threadPool.updateSettings(settingsBuilder() + Settings settings = clusterSettingsService.applySettings(settingsBuilder() .put("threadpool." + threadPoolName + ".keep_alive", "10m") .build()); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED); @@ -134,7 +139,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { // Change keep alive Executor oldExecutor = threadPool.executor(threadPoolName); - threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); + settings = clusterSettingsService.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); // Make sure keep alive value changed assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(1L)); assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L)); @@ -143,7 +148,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor)); // Set the same keep alive - threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); + settings = clusterSettingsService.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); // Make sure keep alive value didn't change assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(1L)); assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L)); @@ -160,11 +165,13 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { ThreadPool threadPool = null; try { - threadPool = new ThreadPool(settingsBuilder() - .put("name", "testCachedExecutorType").build()); + Settings nodeSettings = Settings.settingsBuilder() + .put("name", "testFixedExecutorType").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettingsService clusterSettingsService = new ClusterSettingsService(nodeSettings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + threadPool.setNodeSettingsService(clusterSettingsService); assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); - - threadPool.updateSettings(settingsBuilder() + Settings settings = clusterSettingsService.applySettings(settingsBuilder() .put("threadpool." + threadPoolName + ".size", "15") .build()); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); @@ -177,7 +184,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(0L)); // Put old type back - threadPool.updateSettings(Settings.EMPTY); + settings = clusterSettingsService.applySettings(Settings.EMPTY); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); // Make sure keep alive value is not used assertThat(info(threadPool, threadPoolName).getKeepAlive(), nullValue()); @@ -190,7 +197,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { // Change size Executor oldExecutor = threadPool.executor(threadPoolName); - threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".size", "10").build()); + settings = clusterSettingsService.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".size", "10").build()); // Make sure size values changed assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10)); assertThat(info(threadPool, threadPoolName).getMin(), equalTo(10)); @@ -201,8 +208,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor)); // Change queue capacity - threadPool.updateSettings(settingsBuilder() - .put("threadpool." + threadPoolName + ".queue", "500") + settings = clusterSettingsService.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".queue", "500") .build()); } finally { terminateThreadPoolIfNeeded(threadPool); @@ -213,9 +219,12 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); ThreadPool threadPool = null; try { - threadPool = new ThreadPool(settingsBuilder() + Settings nodeSettings = settingsBuilder() .put("threadpool." + threadPoolName + ".size", 10) - .put("name", "testCachedExecutorType").build()); + .put("name", "testScalingExecutorType").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettingsService clusterSettingsService = new ClusterSettingsService(nodeSettings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + threadPool.setNodeSettingsService(clusterSettingsService); assertThat(info(threadPool, threadPoolName).getMin(), equalTo(1)); assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10)); assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(5L)); @@ -224,7 +233,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { // Change settings that doesn't require pool replacement Executor oldExecutor = threadPool.executor(threadPoolName); - threadPool.updateSettings(settingsBuilder() + clusterSettingsService.applySettings(settingsBuilder() .put("threadpool." + threadPoolName + ".keep_alive", "10m") .put("threadpool." + threadPoolName + ".min", "2") .put("threadpool." + threadPoolName + ".size", "15") @@ -248,9 +257,12 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.FIXED); ThreadPool threadPool = null; try { - threadPool = new ThreadPool(Settings.settingsBuilder() + Settings nodeSettings = Settings.settingsBuilder() .put("threadpool." + threadPoolName + ".queue_size", 1000) - .put("name", "testCachedExecutorType").build()); + .put("name", "testCachedExecutorType").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettingsService clusterSettingsService = new ClusterSettingsService(nodeSettings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + threadPool.setNodeSettingsService(clusterSettingsService); assertEquals(info(threadPool, threadPoolName).getQueueSize().getSingles(), 1000L); final CountDownLatch latch = new CountDownLatch(1); @@ -264,7 +276,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { } } ); - threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".queue_size", 2000).build()); + clusterSettingsService.applySettings(settingsBuilder().put("threadpool." + threadPoolName + ".queue_size", 2000).build()); assertThat(threadPool.executor(threadPoolName), not(sameInstance(oldExecutor))); assertThat(oldExecutor.isShutdown(), equalTo(true)); assertThat(oldExecutor.isTerminating(), equalTo(true)); @@ -279,12 +291,15 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { public void testCustomThreadPool() throws Exception { ThreadPool threadPool = null; try { - threadPool = new ThreadPool(Settings.settingsBuilder() + Settings nodeSettings = Settings.settingsBuilder() .put("threadpool.my_pool1.type", "scaling") .put("threadpool.my_pool2.type", "fixed") .put("threadpool.my_pool2.size", "1") .put("threadpool.my_pool2.queue_size", "1") - .put("name", "testCustomThreadPool").build()); + .put("name", "testCustomThreadPool").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettingsService clusterSettingsService = new ClusterSettingsService(nodeSettings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + threadPool.setNodeSettingsService(clusterSettingsService); ThreadPoolInfo groups = threadPool.info(); boolean foundPool1 = false; boolean foundPool2 = false; @@ -316,7 +331,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { Settings settings = Settings.builder() .put("threadpool.my_pool2.size", "10") .build(); - threadPool.updateSettings(settings); + clusterSettingsService.applySettings(settings); groups = threadPool.info(); foundPool1 = false; diff --git a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index becb61666da..f7a9c221f7c 100644 --- a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -21,12 +21,14 @@ package org.elasticsearch.transport; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; @@ -69,12 +71,12 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { super.setUp(); threadPool = new ThreadPool(getClass().getName()); serviceA = build( - Settings.builder().put("name", "TS_A", TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING").build(), + Settings.builder().put("name", "TS_A", TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING").build(), version0, new NamedWriteableRegistry() ); nodeA = new DiscoveryNode("TS_A", "TS_A", serviceA.boundAddress().publishAddress(), emptyMap(), version0); serviceB = build( - Settings.builder().put("name", "TS_B", TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING").build(), + Settings.builder().put("name", "TS_B", TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING").build(), version1, new NamedWriteableRegistry() ); nodeB = new DiscoveryNode("TS_B", "TS_B", serviceB.boundAddress().publishAddress(), emptyMap(), version1); @@ -650,9 +652,10 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { includeSettings = "test"; excludeSettings = "DOESN'T_MATCH"; } - - serviceA.applySettings(Settings.builder() - .put(TransportService.SETTING_TRACE_LOG_INCLUDE, includeSettings, TransportService.SETTING_TRACE_LOG_EXCLUDE, excludeSettings) + ClusterSettingsService service = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + serviceA.setDynamicSettings(service); + service.applySettings(Settings.builder() + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), includeSettings, TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), excludeSettings) .build()); tracer.reset(4); diff --git a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java index 3f140b388fd..ca3e4a21604 100644 --- a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java +++ b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.transport; import org.elasticsearch.Version; import org.elasticsearch.cache.recycler.MockPageCacheRecycler; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; @@ -28,7 +29,7 @@ import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.netty.NettyTransport; @@ -64,7 +65,7 @@ public class NettySizeHeaderFrameDecoderTests extends ESTestCase { @Before public void startThreadPool() { threadPool = new ThreadPool(settings); - threadPool.setNodeSettingsService(new NodeSettingsService(settings)); + threadPool.setNodeSettingsService(new ClusterSettingsService(settings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); NetworkService networkService = new NetworkService(settings); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService()); nettyTransport = new NettyTransport(settings, threadPool, networkService, bigArrays, Version.CURRENT, new NamedWriteableRegistry()); diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java index 36b20b09fc1..59b57fca7de 100755 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java +++ b/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -27,7 +27,7 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.ZenPingService; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -40,9 +40,9 @@ public class AzureDiscovery extends ZenDiscovery { @Inject public AzureDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, + ClusterService clusterService, ClusterSettingsService clusterSettingsService, ZenPingService pingService, DiscoverySettings discoverySettings, ElectMasterService electMasterService) { - super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, + super(settings, clusterName, threadPool, transportService, clusterService, clusterSettingsService, pingService, electMasterService, discoverySettings); } } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java index e94b7618d12..e0d15a584bd 100755 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java @@ -27,7 +27,7 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.ZenPingService; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -40,10 +40,10 @@ public class Ec2Discovery extends ZenDiscovery { @Inject public Ec2Discovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, + ClusterService clusterService, ClusterSettingsService clusterSettingsService, ZenPingService pingService, DiscoverySettings discoverySettings, ElectMasterService electMasterService) { - super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, + super(settings, clusterName, threadPool, transportService, clusterService, clusterSettingsService, pingService, electMasterService, discoverySettings); } } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java index f20d1c74f83..e004230b735 100755 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java @@ -27,7 +27,7 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.ZenPingService; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -40,10 +40,10 @@ public class GceDiscovery extends ZenDiscovery { @Inject public GceDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, + ClusterService clusterService, ClusterSettingsService clusterSettingsService, ZenPingService pingService, DiscoverySettings discoverySettings, ElectMasterService electMasterService) { - super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, + super(settings, clusterName, threadPool, transportService, clusterService, clusterSettingsService, pingService, electMasterService, discoverySettings); } } diff --git a/test-framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test-framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java index 6ac2101fe52..b105d269163 100644 --- a/test-framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java +++ b/test-framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java @@ -31,8 +31,9 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.monitor.fs.FsInfo; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.threadpool.ThreadPool; @@ -77,11 +78,11 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService { } @Inject - public MockInternalClusterInfoService(Settings settings, NodeSettingsService nodeSettingsService, + public MockInternalClusterInfoService(Settings settings, ClusterSettingsService clusterSettingsService, TransportNodesStatsAction transportNodesStatsAction, TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService, ThreadPool threadPool) { - super(settings, nodeSettingsService, transportNodesStatsAction, transportIndicesStatsAction, clusterService, threadPool); + super(settings, clusterSettingsService, transportNodesStatsAction, transportIndicesStatsAction, clusterService, threadPool); this.clusterName = ClusterName.clusterNameFromSettings(settings); stats[0] = makeStats("node_t1", new DiskUsage("node_t1", "n1", "/dev/null", 100, 100)); stats[1] = makeStats("node_t2", new DiskUsage("node_t2", "n2", "/dev/null", 100, 100)); @@ -133,4 +134,9 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService { return "/dev/null"; } } + + @Override + public void setUpdateFrequency(TimeValue updateFrequency) { + super.setUpdateFrequency(updateFrequency); + } } diff --git a/test-framework/src/main/java/org/elasticsearch/search/MockSearchService.java b/test-framework/src/main/java/org/elasticsearch/search/MockSearchService.java index 9a7a3efa3dc..09a0ab9b6d3 100644 --- a/test-framework/src/main/java/org/elasticsearch/search/MockSearchService.java +++ b/test-framework/src/main/java/org/elasticsearch/search/MockSearchService.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.cache.request.IndicesRequestCache; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.dfs.DfsPhase; @@ -67,10 +67,10 @@ public class MockSearchService extends SearchService { } @Inject - public MockSearchService(Settings settings, NodeSettingsService nodeSettingsService, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, - ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, - DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) { - super(settings, nodeSettingsService, clusterService, indicesService, indicesWarmer, threadPool, scriptService, pageCacheRecycler, bigArrays, dfsPhase, + public MockSearchService(Settings settings, ClusterSettingsService clusterSettingsService, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, + ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, + DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) { + super(settings, clusterSettingsService, clusterService, indicesService, indicesWarmer, threadPool, scriptService, pageCacheRecycler, bigArrays, dfsPhase, queryPhase, fetchPhase, indicesQueryCache); } diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java index c4f4b196739..e4d3a3c42a5 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java @@ -36,6 +36,7 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -43,7 +44,7 @@ import org.elasticsearch.gateway.AsyncShardFetch; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.ReplicaShardAllocator; import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; -import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.gateway.NoopGatewayAllocator; import java.lang.reflect.Constructor; @@ -67,37 +68,37 @@ public abstract class ESAllocationTestCase extends ESTestCase { } public static MockAllocationService createAllocationService(Settings settings, Random random) { - return createAllocationService(settings, new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS), random); + return createAllocationService(settings, new ClusterSettingsService(Settings.Builder.EMPTY_SETTINGS, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), random); } - public static MockAllocationService createAllocationService(Settings settings, NodeSettingsService nodeSettingsService, Random random) { + public static MockAllocationService createAllocationService(Settings settings, ClusterSettingsService clusterSettingsService, Random random) { return new MockAllocationService(settings, - randomAllocationDeciders(settings, nodeSettingsService, random), + randomAllocationDeciders(settings, clusterSettingsService, random), new ShardsAllocators(settings, NoopGatewayAllocator.INSTANCE), EmptyClusterInfoService.INSTANCE); } public static MockAllocationService createAllocationService(Settings settings, ClusterInfoService clusterInfoService) { return new MockAllocationService(settings, - randomAllocationDeciders(settings, new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS), getRandom()), + randomAllocationDeciders(settings, new ClusterSettingsService(Settings.Builder.EMPTY_SETTINGS, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), getRandom()), new ShardsAllocators(settings, NoopGatewayAllocator.INSTANCE), clusterInfoService); } public static MockAllocationService createAllocationService(Settings settings, GatewayAllocator allocator) { return new MockAllocationService(settings, - randomAllocationDeciders(settings, new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS), getRandom()), + randomAllocationDeciders(settings, new ClusterSettingsService(Settings.Builder.EMPTY_SETTINGS, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), getRandom()), new ShardsAllocators(settings, allocator), EmptyClusterInfoService.INSTANCE); } - public static AllocationDeciders randomAllocationDeciders(Settings settings, NodeSettingsService nodeSettingsService, Random random) { + public static AllocationDeciders randomAllocationDeciders(Settings settings, ClusterSettingsService clusterSettingsService, Random random) { final List> defaultAllocationDeciders = ClusterModule.DEFAULT_ALLOCATION_DECIDERS; final List list = new ArrayList<>(); for (Class deciderClass : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) { try { try { - Constructor constructor = deciderClass.getConstructor(Settings.class, NodeSettingsService.class); - list.add(constructor.newInstance(settings, nodeSettingsService)); + Constructor constructor = deciderClass.getConstructor(Settings.class, ClusterSettingsService.class); + list.add(constructor.newInstance(settings, clusterSettingsService)); } catch (NoSuchMethodException e) { Constructor constructor = null; constructor = deciderClass.getConstructor(Settings.class); diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java index 47e163a6291..dbd260491f6 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java @@ -136,7 +136,7 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase { if (globalCompatibilityVersion().before(Version.V_1_3_2)) { // if we test against nodes before 1.3.2 we disable all the compression due to a known bug // see #7210 - builder.put(RecoverySettings.INDICES_RECOVERY_COMPRESS, false); + builder.put(RecoverySettings.INDICES_RECOVERY_COMPRESS_SETTING.getKey(), false); } return builder; } @@ -255,7 +255,7 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase { // if we test against nodes before 1.3.2 we disable all the compression due to a known bug // see #7210 builder.put(Transport.TransportSettings.TRANSPORT_TCP_COMPRESS, false) - .put(RecoverySettings.INDICES_RECOVERY_COMPRESS, false); + .put(RecoverySettings.INDICES_RECOVERY_COMPRESS_SETTING.getKey(), false); } return builder.build(); } diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index cc5348bc6bc..24adcf960eb 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1039,7 +1039,7 @@ public abstract class ESIntegTestCase extends ESTestCase { */ public void setMinimumMasterNodes(int n) { assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings( - settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, n)) + settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), n)) .get().isAcknowledged()); } @@ -1474,7 +1474,7 @@ public abstract class ESIntegTestCase extends ESTestCase { /** Sets or unsets the cluster read_only mode **/ public static void setClusterReadOnly(boolean value) { - Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, value).build(); + Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), value).build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get()); } @@ -1687,8 +1687,8 @@ public abstract class ESIntegTestCase extends ESTestCase { Settings.Builder builder = settingsBuilder() // Default the watermarks to absurdly low to prevent the tests // from failing on nodes without enough disk space - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "1b") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "1b") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b") .put("script.indexed", "on") .put("script.inline", "on") // wait short time for other active shards before actually deleting, default 30s not needed in tests diff --git a/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index b86c8689699..c8eacc4c1ae 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -292,19 +292,19 @@ public final class InternalTestCluster extends TestCluster { } // Default the watermarks to absurdly low to prevent the tests // from failing on nodes without enough disk space - builder.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "1b"); - builder.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "1b"); + builder.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b"); + builder.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b"); if (TEST_NIGHTLY) { - builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, RandomInts.randomIntBetween(random, 10, 15)); - builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RandomInts.randomIntBetween(random, 10, 15)); - builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, RandomInts.randomIntBetween(random, 5, 10)); + builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.getKey(), RandomInts.randomIntBetween(random, 10, 15)); + builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.getKey(), RandomInts.randomIntBetween(random, 10, 15)); + builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), RandomInts.randomIntBetween(random, 5, 10)); } else if (random.nextInt(100) <= 90) { - builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, RandomInts.randomIntBetween(random, 3, 6)); - builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RandomInts.randomIntBetween(random, 3, 6)); - builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, RandomInts.randomIntBetween(random, 2, 5)); + builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.getKey(), RandomInts.randomIntBetween(random, 3, 6)); + builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.getKey(), RandomInts.randomIntBetween(random, 3, 6)); + builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), RandomInts.randomIntBetween(random, 2, 5)); } // always reduce this - it can make tests really slow - builder.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 20, 50))); + builder.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 20, 50))); defaultSettings = builder.build(); executor = EsExecutors.newCached("test runner", 0, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory("test_" + clusterName)); } @@ -412,7 +412,7 @@ public final class InternalTestCluster extends TestCluster { } if (random.nextBoolean()) { - builder.put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT, new TimeValue(RandomInts.randomIntBetween(random, 10, 30), TimeUnit.SECONDS)); + builder.put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), new TimeValue(RandomInts.randomIntBetween(random, 10, 30), TimeUnit.SECONDS)); } if (random.nextInt(10) == 0) { @@ -430,25 +430,25 @@ public final class InternalTestCluster extends TestCluster { if (random.nextBoolean()) { if (random.nextInt(10) == 0) { // do something crazy slow here - builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); + builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); } else { - builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); + builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); } } if (random.nextBoolean()) { - builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE, RandomPicks.randomFrom(random, StoreRateLimiting.Type.values())); + builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING.getKey(), RandomPicks.randomFrom(random, StoreRateLimiting.Type.values())); } if (random.nextBoolean()) { if (random.nextInt(10) == 0) { // do something crazy slow here - builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); + builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); } else { - builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); + builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); } } if (random.nextBoolean()) { - builder.put(RecoverySettings.INDICES_RECOVERY_COMPRESS, random.nextBoolean()); + builder.put(RecoverySettings.INDICES_RECOVERY_COMPRESS_SETTING.getKey(), random.nextBoolean()); } if (random.nextBoolean()) { From 2e27ee393f04b95c9c51b1553cf91c0ae57e21af Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 7 Dec 2015 21:19:40 +0100 Subject: [PATCH 002/167] add rest API to reset settings --- .../ClusterUpdateSettingsRequest.java | 20 +-- .../ClusterUpdateSettingsRequestBuilder.java | 20 --- .../cluster/settings/SettingsUpdater.java | 130 ++++++++++++++++++ .../TransportClusterUpdateSettingsAction.java | 104 ++------------ .../common/settings/Settings.java | 8 +- .../loader/XContentSettingsLoader.java | 18 +-- .../settings/SettingsUpdaterTests.java | 127 +++++++++++++++++ .../cluster/settings/ClusterSettingsIT.java | 35 ++++- .../common/settings/SettingsServiceTests.java | 1 - .../cluster/update-settings.asciidoc | 25 ++++ .../test/cluster.put_settings/11_reset.yaml | 31 +++++ 11 files changed, 371 insertions(+), 148 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java create mode 100644 core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/11_reset.yaml diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index 96e90e1afc3..1f6bf871101 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -44,8 +44,6 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest transientReset = new HashSet<>(); - private Set persistentReset = new HashSet<>(); public ClusterUpdateSettingsRequest() { } @@ -53,7 +51,7 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest getTransientReset() { return Collections.unmodifiableSet(transientReset); } - - public Set getPersistentReset() { return Collections.unmodifiableSet(persistentReset); } - - public void addTransientResetKeys(Collection keys) { - transientReset.addAll(keys); - } - - public void addPersistentResetKeys(Collection keys) { - persistentReset.addAll(keys); - } - /** * Sets the transient settings to be updated. They will not survive a full cluster restart */ @@ -162,8 +148,6 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest(Arrays.asList(in.readStringArray())); - persistentReset = new HashSet<>(Arrays.asList(in.readStringArray())); readTimeout(in); } @@ -172,8 +156,6 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest keys) { - request.addTransientResetKeys(keys); - return this; - } - - public ClusterUpdateSettingsRequestBuilder addPersistentResetKeys(Collection keys) { - request.addPersistentResetKeys(keys); - return this; - } - - public ClusterUpdateSettingsRequestBuilder addTransientResetKeys(String... keys) { - request.addTransientResetKeys(Arrays.asList(keys)); - return this; - } - - public ClusterUpdateSettingsRequestBuilder addPersistentResetKeys(String... keys) { - request.addPersistentResetKeys(Arrays.asList(keys)); - return this; - } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java new file mode 100644 index 00000000000..a6b61844a1e --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.settings; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.ClusterSettingsService; +import org.elasticsearch.common.settings.Settings; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.cluster.ClusterState.builder; + +/** + * Updates transient and persistent cluster state settings if there are any changes + * due to the update. + */ +final class SettingsUpdater { + final Settings.Builder transientUpdates = Settings.settingsBuilder(); + final Settings.Builder persistentUpdates = Settings.settingsBuilder(); + private final ClusterSettings dynamicSettings; + private final ClusterSettingsService clusterSettingsService; + + SettingsUpdater(ClusterSettingsService clusterSettingsService) { + this.dynamicSettings = clusterSettingsService.getClusterSettings(); + this.clusterSettingsService = clusterSettingsService; + } + + synchronized Settings getTransientUpdates() { + return transientUpdates.build(); + } + + synchronized Settings getPersistentUpdate() { + return persistentUpdates.build(); + } + + synchronized ClusterState updateSettings(final ClusterState currentState, Settings transientToApply, Settings persistentToApply) { + boolean changed = false; + Settings.Builder transientSettings = Settings.settingsBuilder(); + transientSettings.put(currentState.metaData().transientSettings()); + changed |= apply(transientToApply, transientSettings, transientUpdates, "transient"); + + Settings.Builder persistentSettings = Settings.settingsBuilder(); + persistentSettings.put(currentState.metaData().persistentSettings()); + changed |= apply(persistentToApply, persistentSettings, persistentUpdates, "persistent"); + + if (!changed) { + return currentState; + } + + MetaData.Builder metaData = MetaData.builder(currentState.metaData()) + .persistentSettings(persistentSettings.build()) + .transientSettings(transientSettings.build()); + + ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); + boolean updatedReadOnly = MetaData.SETTING_READ_ONLY_SETTING.get(metaData.persistentSettings()) || MetaData.SETTING_READ_ONLY_SETTING.get(metaData.transientSettings()); + if (updatedReadOnly) { + blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); + } else { + blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); + } + ClusterState build = builder(currentState).metaData(metaData).blocks(blocks).build(); + Settings settings = build.metaData().settings(); + // now we try to apply things and if they are invalid we fail + // this dryRun will validate & parse settings but won't actually apply them. + clusterSettingsService.dryRun(settings); + return build; + } + + private boolean apply(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) { + boolean changed = false; + final Set toRemove = new HashSet<>(); + Settings.Builder settingsBuilder = Settings.settingsBuilder(); + for (Map.Entry entry : toApply.getAsMap().entrySet()) { + if (entry.getValue() == null) { + toRemove.add(entry.getKey()); + } else if (dynamicSettings.isLoggerSetting(entry.getKey()) || dynamicSettings.hasDynamicSetting(entry.getKey())) { + settingsBuilder.put(entry.getKey(), entry.getValue()); + updates.put(entry.getKey(), entry.getValue()); + changed = true; + } else { + throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable"); + } + + } + changed |= applyDeletes(toRemove, target); + target.put(settingsBuilder.build()); + return changed; + } + + private final boolean applyDeletes(Set deletes, Settings.Builder builder) { + boolean changed = false; + for (String entry : deletes) { + Set keysToRemove = new HashSet<>(); + Set keySet = builder.internalMap().keySet(); + for (String key : keySet) { + if (Regex.simpleMatch(entry, key)) { + keysToRemove.add(key); + } + } + for (String key : keysToRemove) { + builder.remove(key); + changed = true; + } + } + return changed; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 90cc68abe91..9170b9168b1 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -28,26 +28,19 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - import static org.elasticsearch.cluster.ClusterState.builder; /** @@ -57,16 +50,13 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct private final AllocationService allocationService; - private final ClusterSettings dynamicSettings; private final ClusterSettingsService clusterSettingsService; @Inject public TransportClusterUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - AllocationService allocationService, ClusterSettings dynamicSettings, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterSettingsService clusterSettingsService) { + AllocationService allocationService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterSettingsService clusterSettingsService) { super(settings, ClusterUpdateSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterUpdateSettingsRequest::new); this.allocationService = allocationService; - this.dynamicSettings = dynamicSettings; this.clusterSettingsService = clusterSettingsService; } @@ -93,9 +83,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener listener) { - final Settings.Builder transientUpdates = Settings.settingsBuilder(); - final Settings.Builder persistentUpdates = Settings.settingsBuilder(); - + final SettingsUpdater updater = new SettingsUpdater(clusterSettingsService); clusterService.submitStateUpdateTask("cluster_update_settings", new AckedClusterStateUpdateTask(Priority.IMMEDIATE, request, listener) { @@ -103,7 +91,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { - return new ClusterUpdateSettingsResponse(acknowledged, transientUpdates.build(), persistentUpdates.build()); + return new ClusterUpdateSettingsResponse(acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate()); } @Override @@ -130,7 +118,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct // so we should *not* execute the reroute. if (!clusterService.state().nodes().localNodeMaster()) { logger.debug("Skipping reroute after cluster update settings, because node is no longer master"); - listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build())); + listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate())); return; } @@ -150,13 +138,13 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override //we return when the cluster reroute is acked or it times out but the acknowledged flag depends on whether the update settings was acknowledged protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { - return new ClusterUpdateSettingsResponse(updateSettingsAcked && acknowledged, transientUpdates.build(), persistentUpdates.build()); + return new ClusterUpdateSettingsResponse(updateSettingsAcked && acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate()); } @Override public void onNoLongerMaster(String source) { logger.debug("failed to preform reroute after cluster settings were updated - current node is no longer a master"); - listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build())); + listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate())); } @Override @@ -186,83 +174,11 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override public ClusterState execute(final ClusterState currentState) { - Settings.Builder transientSettings = Settings.settingsBuilder(); - transientSettings.put(currentState.metaData().transientSettings()); - for (Map.Entry entry : request.transientSettings().getAsMap().entrySet()) { - if (dynamicSettings.isLoggerSetting(entry.getKey()) || dynamicSettings.hasDynamicSetting(entry.getKey())) { - transientSettings.put(entry.getKey(), entry.getValue()); - transientUpdates.put(entry.getKey(), entry.getValue()); - changed = true; - } else { - throw new IllegalArgumentException("transient setting [" + entry.getKey() + "], not dynamically updateable"); - } - } - - Settings.Builder persistentSettings = Settings.settingsBuilder(); - persistentSettings.put(currentState.metaData().persistentSettings()); - for (Map.Entry entry : request.persistentSettings().getAsMap().entrySet()) { - if (dynamicSettings.isLoggerSetting(entry.getKey()) || dynamicSettings.hasDynamicSetting(entry.getKey())) { - persistentSettings.put(entry.getKey(), entry.getValue()); - persistentUpdates.put(entry.getKey(), entry.getValue()); - changed = true; - } else { - throw new IllegalArgumentException("persistent setting [" + entry.getKey() + "], not dynamically updateable"); - } - } - - for (String entry : request.getPersistentReset()) { - Set strings = persistentSettings.internalMap().keySet(); - Set keysToRemove = new HashSet(); - for (String key : strings) { - if (Regex.simpleMatch(entry, key)) { - keysToRemove.add(key); - } - } - for (String keyToRemove : keysToRemove) { - persistentSettings.remove(keyToRemove); - persistentUpdates.remove(keyToRemove); - } - changed |= keysToRemove.isEmpty() == false; - } - - for (String entry : request.getTransientReset()) { - Set strings = transientSettings.internalMap().keySet(); - Set keysToRemove = new HashSet<>(); - for (String key : strings) { - if (Regex.simpleMatch(entry, key)) { - keysToRemove.add(key); - } - } - for (String keyToRemove : keysToRemove) { - transientSettings.remove(keyToRemove); - transientUpdates.remove(keyToRemove); - } - changed |= keysToRemove.isEmpty() == false; - } - - - if (!changed) { - return currentState; - } - - MetaData.Builder metaData = MetaData.builder(currentState.metaData()) - .persistentSettings(persistentSettings.build()) - .transientSettings(transientSettings.build()); - - ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - boolean updatedReadOnly = MetaData.SETTING_READ_ONLY_SETTING.get(metaData.persistentSettings()) || MetaData.SETTING_READ_ONLY_SETTING.get(metaData.transientSettings()); - if (updatedReadOnly) { - blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); - } else { - blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); - } - ClusterState build = builder(currentState).metaData(metaData).blocks(blocks).build(); - Settings settings = build.metaData().settings(); - // now we try to apply things and if they are invalid we fail - // this dryRun will validate & parse settings but won't actually apply them. - clusterSettingsService.dryRun(settings); - return build; + ClusterState clusterState = updater.updateSettings(currentState, request.transientSettings(), request.persistentSettings()); + changed = clusterState != currentState; + return clusterState; } }); } + } diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index aae4cb2b54d..05f3cb1ff0b 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -715,7 +715,7 @@ public final class Settings implements ToXContent { Builder builder = new Builder(); int numberOfSettings = in.readVInt(); for (int i = 0; i < numberOfSettings; i++) { - builder.put(in.readString(), in.readString()); + builder.put(in.readString(), in.readOptionalString()); } return builder.build(); } @@ -724,7 +724,7 @@ public final class Settings implements ToXContent { out.writeVInt(settings.getAsMap().size()); for (Map.Entry entry : settings.getAsMap().entrySet()) { out.writeString(entry.getKey()); - out.writeString(entry.getValue()); + out.writeOptionalString(entry.getValue()); } } @@ -827,6 +827,10 @@ public final class Settings implements ToXContent { return this; } + public Builder putNull(String key) { + return put(key, (String) null); + } + /** * Sets a setting with the provided setting key and class as value. * diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java index 725c7e56949..9c2f973b96e 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java +++ b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java @@ -103,9 +103,9 @@ public abstract class XContentSettingsLoader implements SettingsLoader { } else if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NULL) { - // ignore this + serializeValue(settings, sb, path, parser, currentFieldName, true); } else { - serializeValue(settings, sb, path, parser, currentFieldName); + serializeValue(settings, sb, path, parser, currentFieldName, false); } } @@ -126,31 +126,33 @@ public abstract class XContentSettingsLoader implements SettingsLoader { } else if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NULL) { + serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++), true); // ignore } else { - serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++)); + serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++), false); } } } - private void serializeValue(Map settings, StringBuilder sb, List path, XContentParser parser, String fieldName) throws IOException { + private void serializeValue(Map settings, StringBuilder sb, List path, XContentParser parser, String fieldName, boolean isNull) throws IOException { sb.setLength(0); for (String pathEle : path) { sb.append(pathEle).append('.'); } sb.append(fieldName); String key = sb.toString(); - String currentValue = parser.text(); - String previousValue = settings.put(key, currentValue); - if (previousValue != null) { + String currentValue = isNull ? null : parser.text(); + + if (settings.containsKey(key)) { throw new ElasticsearchParseException( "duplicate settings key [{}] found at line number [{}], column number [{}], previous value [{}], current value [{}]", key, parser.getTokenLocation().lineNumber, parser.getTokenLocation().columnNumber, - previousValue, + settings.get(key), currentValue ); } + settings.put(key, currentValue); } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java new file mode 100644 index 00000000000..0de13686509 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.settings; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.ClusterSettingsService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.atomic.AtomicReference; + +public class SettingsUpdaterTests extends ESTestCase { + + + public void testUpdateSetting() { + AtomicReference index = new AtomicReference<>(); + AtomicReference shard = new AtomicReference<>(); + ClusterState.Builder builder = ClusterState.builder(new ClusterName("foo")); + ClusterSettingsService settingsService = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, index::set); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, shard::set); + SettingsUpdater updater = new SettingsUpdater(settingsService); + MetaData.Builder metaData = MetaData.builder() + .persistentSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 2.5).build()) + .transientSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 3.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 4.5).build()); + ClusterState build = builder.metaData(metaData).build(); + ClusterState clusterState = updater.updateSettings(build, Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.5).build(), + Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.4).build()); + assertNotSame(clusterState, build); + assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 0.4, 0.1); + assertEquals(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 2.5, 0.1); + assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().transientSettings()), 0.5, 0.1); + assertEquals(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.get(clusterState.metaData().transientSettings()), 4.5, 0.1); + + clusterState = updater.updateSettings(clusterState, Settings.builder().putNull("cluster.routing.*").build(), + Settings.EMPTY); + assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 0.4, 0.1); + assertEquals(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 2.5, 0.1); + assertFalse(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().transientSettings())); + assertFalse(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().transientSettings())); + + clusterState = updater.updateSettings(clusterState, + Settings.EMPTY, Settings.builder().putNull("cluster.routing.*").put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 10.0).build()); + + assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 10.0, 0.1); + assertFalse(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().persistentSettings())); + assertFalse(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().transientSettings())); + assertFalse(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().transientSettings())); + assertNull("updater only does a dryRun", index.get()); + assertNull("updater only does a dryRun", shard.get()); + } + + public void testAllOrNothing() { + ClusterState.Builder builder = ClusterState.builder(new ClusterName("foo")); + ClusterSettingsService settingsService = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + AtomicReference index = new AtomicReference<>(); + AtomicReference shard = new AtomicReference<>(); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, index::set); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, shard::set); + SettingsUpdater updater = new SettingsUpdater(settingsService); + MetaData.Builder metaData = MetaData.builder() + .persistentSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 2.5).build()) + .transientSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 3.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 4.5).build()); + ClusterState build = builder.metaData(metaData).build(); + + try { + updater.updateSettings(build, Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float").build(), + Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float").put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build()); + fail("all or nothing"); + } catch (IllegalArgumentException ex) { + assertEquals("Failed to parse value [not a float] for setting [cluster.routing.allocation.balance.index]", ex.getMessage()); + } + assertNull("updater only does a dryRun", index.get()); + assertNull("updater only does a dryRun", shard.get()); + } + + public void testClusterBlock() { + ClusterState.Builder builder = ClusterState.builder(new ClusterName("foo")); + ClusterSettingsService settingsService = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + AtomicReference index = new AtomicReference<>(); + AtomicReference shard = new AtomicReference<>(); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, index::set); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, shard::set); + SettingsUpdater updater = new SettingsUpdater(settingsService); + MetaData.Builder metaData = MetaData.builder() + .persistentSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 2.5).build()) + .transientSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 3.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 4.5).build()); + ClusterState build = builder.metaData(metaData).build(); + + ClusterState clusterState = updater.updateSettings(build, Settings.builder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), true).build(), + Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.6).put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build()); + assertEquals(clusterState.blocks().global().size(), 1); + assertEquals(clusterState.blocks().global().iterator().next(), MetaData.CLUSTER_READ_ONLY_BLOCK); + + clusterState = updater.updateSettings(build, Settings.EMPTY, + Settings.builder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), false).build()); + assertEquals(clusterState.blocks().global().size(), 0); + + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index ce76893a831..3a028fe54b0 100644 --- a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import static org.elasticsearch.common.inject.matcher.Matchers.not; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -59,6 +60,32 @@ public class ClusterSettingsIT extends ESIntegTestCase { } } + public void testDeleteIsAppliedFirst() { + DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class); + + assertEquals(discoverySettings.getPublishTimeout(), DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY)); + assertTrue(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY)); + + ClusterUpdateSettingsResponse response = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder() + .put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false) + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s").build()) + .get(); + + assertAcked(response); + assertEquals(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), "1s"); + assertTrue(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY)); + assertFalse(response.getTransientSettings().getAsBoolean(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), null)); + + response = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull((randomBoolean() ? "discovery.zen.*" : "*")).put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "2s")) + .get(); + assertEquals(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), "2s"); + assertNull(response.getTransientSettings().getAsBoolean(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), null)); + } + public void testResetClusterSetting() { DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class); @@ -78,7 +105,7 @@ public class ClusterSettingsIT extends ESIntegTestCase { response = client().admin().cluster() .prepareUpdateSettings() - .addTransientResetKeys(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()) + .setTransientSettings(Settings.builder().putNull(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())) .get(); assertAcked(response); @@ -99,7 +126,7 @@ public class ClusterSettingsIT extends ESIntegTestCase { assertFalse(discoverySettings.getPublishDiff()); response = client().admin().cluster() .prepareUpdateSettings() - .addTransientResetKeys(randomBoolean() ? "discovery.zen.*" : "*") + .setTransientSettings(Settings.builder().putNull((randomBoolean() ? "discovery.zen.*" : "*"))) .get(); assertNull(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())); @@ -121,7 +148,7 @@ public class ClusterSettingsIT extends ESIntegTestCase { response = client().admin().cluster() .prepareUpdateSettings() - .addPersistentResetKeys(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()) + .setPersistentSettings(Settings.builder().putNull((DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()))) .get(); assertAcked(response); @@ -143,7 +170,7 @@ public class ClusterSettingsIT extends ESIntegTestCase { assertFalse(discoverySettings.getPublishDiff()); response = client().admin().cluster() .prepareUpdateSettings() - .addPersistentResetKeys(randomBoolean() ? "discovery.zen.*" : "*") + .setPersistentSettings(Settings.builder().putNull((randomBoolean() ? "discovery.zen.*" : "*"))) .get(); assertNull(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())); diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsServiceTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingsServiceTests.java index 254c310daba..ae6fd79aa58 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingsServiceTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.settings; import org.elasticsearch.test.ESTestCase; - import java.util.concurrent.atomic.AtomicInteger; public class SettingsServiceTests extends ESTestCase { diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc index 08f4c900597..2106bdf53b0 100644 --- a/docs/reference/cluster/update-settings.asciidoc +++ b/docs/reference/cluster/update-settings.asciidoc @@ -38,6 +38,31 @@ last example will be: }' -------------------------------------------------- +Resetting persistent or transient settings can be done by assigning a +`null` value. If a transient setting is reset, the persistent setting +is applied if available. Otherwise Elasticsearch will fallback to the setting +defined at the configuration file or, if not existent, to the default +value. Here is an example: + +[source,js] +-------------------------------------------------- +curl -XPUT localhost:9200/_cluster/settings -d '{ + "transient" : { + "discovery.zen.minimum_master_nodes" : null + } +}' +-------------------------------------------------- + +Reset settings will not be included in the cluster response. So +the response for the last example will be: + +[source,js] +-------------------------------------------------- +{ + "persistent" : {}, + "transient" : {} +} + Cluster wide settings can be returned using: [source,js] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/11_reset.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/11_reset.yaml new file mode 100644 index 00000000000..069f7921873 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/11_reset.yaml @@ -0,0 +1,31 @@ +--- +"Test reset cluster settings": + - do: + cluster.put_settings: + body: + persistent: + cluster.routing.allocation.disk.threshold_enabled: false + flat_settings: true + + - match: {persistent: {cluster.routing.allocation.disk.threshold_enabled: "false"}} + + - do: + cluster.get_settings: + flat_settings: true + + - match: {persistent: {cluster.routing.allocation.disk.threshold_enabled: "false"}} + + - do: + cluster.put_settings: + body: + persistent: + cluster.routing.allocation.disk.threshold_enabled: null + flat_settings: true + + - match: {persistent: {}} + + - do: + cluster.get_settings: + flat_settings: true + + - match: {persistent: {}} \ No newline at end of file From 85029263270829d700e7ac73bb78d8521a1c8d0a Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 7 Dec 2015 21:35:49 +0100 Subject: [PATCH 003/167] fuck you linefeed --- .../rest-api-spec/test/cluster.put_settings/11_reset.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/11_reset.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/11_reset.yaml index 069f7921873..4162296532d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/11_reset.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/11_reset.yaml @@ -28,4 +28,4 @@ cluster.get_settings: flat_settings: true - - match: {persistent: {}} \ No newline at end of file + - match: {persistent: {}} From 44df6814d490190f34ed3d15d46b932fb8d51a25 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 8 Dec 2015 11:01:41 +0100 Subject: [PATCH 004/167] fix imports --- .../admin/cluster/settings/ClusterUpdateSettingsRequest.java | 2 +- .../cluster/settings/ClusterUpdateSettingsRequestBuilder.java | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index 1f6bf871101..0090d7db057 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -30,7 +30,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; -import java.util.*; +import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java index 39ab0adc72f..f0492edfeb1 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java @@ -23,8 +23,6 @@ import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; -import java.util.Arrays; -import java.util.Collection; import java.util.Map; /** From dfb8bf658bd9dc4722d667e5bb710c915cda43c7 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 8 Dec 2015 15:22:19 +0100 Subject: [PATCH 005/167] expose default settings on `/_cluster/settings?defaults=true` --- .../common/settings/ClusterSettings.java | 15 +++++ .../RestClusterGetSettingsAction.java | 46 ++++++++++----- .../common/settings/ClusterSettingsTests.java | 57 +++++++++++++++++++ 3 files changed, 104 insertions(+), 14 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/common/settings/ClusterSettingsTests.java diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 220f2c5a164..5fd0b60dbcd 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -109,6 +109,20 @@ public final class ClusterSettings { return defaults; } + /** + * Returns a settings object that contains all clustersettings that are not + * already set in the given source. + */ + public Settings diff(Settings source) { + Settings.Builder builder = Settings.builder(); + for (Setting setting : keySettings.values()) { + if (setting.exists(source) == false) { + builder.put(setting.getKey(), setting.getRaw(source)); + } + } + return builder.build(); + } + public static Set> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, @@ -167,4 +181,5 @@ public final class ClusterSettings { TransportService.TRACE_LOG_INCLUDE_SETTING, TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING))); + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java index a1cfdb48ddb..fe87edab4f4 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java @@ -23,19 +23,27 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.*; import org.elasticsearch.rest.action.support.RestBuilderListener; +import java.io.IOException; + /** */ public class RestClusterGetSettingsAction extends BaseRestHandler { + private final ClusterSettings clusterSettings; + @Inject - public RestClusterGetSettingsAction(Settings settings, RestController controller, Client client) { + public RestClusterGetSettingsAction(Settings settings, RestController controller, Client client, ClusterSettings clusterSettings) { super(settings, controller, client); + this.clusterSettings = clusterSettings; controller.registerHandler(RestRequest.Method.GET, "/_cluster/settings", this); } @@ -44,24 +52,34 @@ public class RestClusterGetSettingsAction extends BaseRestHandler { ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest() .routingTable(false) .nodes(false); + final boolean renderDefaults = request.paramAsBoolean("defaults", false); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); client.admin().cluster().state(clusterStateRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(ClusterStateResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - - builder.startObject("persistent"); - response.getState().metaData().persistentSettings().toXContent(builder, request); - builder.endObject(); - - builder.startObject("transient"); - response.getState().metaData().transientSettings().toXContent(builder, request); - builder.endObject(); - - builder.endObject(); - - return new BytesRestResponse(RestStatus.OK, builder); + return new BytesRestResponse(RestStatus.OK, renderResponse(clusterSettings, response.getState(), renderDefaults, builder, request)); } }); } + + private static XContentBuilder renderResponse(ClusterSettings settings, ClusterState state, boolean renderDefaults, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + + builder.startObject("persistent"); + state.metaData().persistentSettings().toXContent(builder, params); + builder.endObject(); + + builder.startObject("transient"); + state.metaData().transientSettings().toXContent(builder, params); + builder.endObject(); + + if (renderDefaults) { + builder.startObject("defaults"); + settings.diff(state.metaData().settings()).toXContent(builder, params); + builder.endObject(); + } + + builder.endObject(); + return builder; + } } diff --git a/core/src/test/java/org/elasticsearch/common/settings/ClusterSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ClusterSettingsTests.java new file mode 100644 index 00000000000..a504e0ad727 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/settings/ClusterSettingsTests.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.settings; + +import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; + +/** + */ +public class ClusterSettingsTests extends ESTestCase { + + public void testGet() { + ClusterSettings settings = new ClusterSettings(); + Setting setting = settings.get("cluster.routing.allocation.require.value"); + assertEquals(setting, FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING); + + setting = settings.get("cluster.routing.allocation.total_shards_per_node"); + assertEquals(setting, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING); + } + + public void testIsDynamic(){ + ClusterSettings settings = new ClusterSettings(new HashSet<>(Arrays.asList(Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster), Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.Cluster)))); + assertFalse(settings.hasDynamicSetting("foo.bar.baz")); + assertTrue(settings.hasDynamicSetting("foo.bar")); + assertNotNull(settings.get("foo.bar.baz")); + } + + public void testDiff() throws IOException { + Setting foobarbaz = Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.Cluster); + Setting foobar = Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster); + ClusterSettings settings = new ClusterSettings(new HashSet<>(Arrays.asList(foobar, foobarbaz))); + Settings diff = settings.diff(Settings.builder().put("foo.bar", 5).build()); + assertEquals(diff.getAsMap().size(), 1); + assertEquals(diff.getAsInt("foo.bar.baz", null), Integer.valueOf(1)); + } +} From 5d7f4ef3943dcbd14e85021075f59c9b7cc7b262 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 8 Dec 2015 15:27:11 +0100 Subject: [PATCH 006/167] include default settigns --- .../common/settings/ClusterSettings.java | 18 ++++-------------- .../settings/RestClusterGetSettingsAction.java | 6 +++--- .../common/settings/ClusterSettingsTests.java | 6 +++++- 3 files changed, 12 insertions(+), 18 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 5fd0b60dbcd..5392fe47529 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -48,10 +48,8 @@ public final class ClusterSettings { private final Map> groupSettings = new HashMap<>(); private final Map> keySettings = new HashMap<>(); - private final Settings defaults; public ClusterSettings(Set> settingsSet) { - Settings.Builder builder = Settings.builder(); for (Setting entry : settingsSet) { if (entry.getScope() != Setting.Scope.Cluster) { throw new IllegalArgumentException("Setting must be a cluster setting but was: " + entry.getScope()); @@ -61,9 +59,7 @@ public final class ClusterSettings { } else { keySettings.put(entry.getKey(), entry); } - builder.put(entry.getKey(), entry.getDefault(Settings.EMPTY)); } - this.defaults = builder.build(); } public ClusterSettings() { @@ -102,22 +98,16 @@ public final class ClusterSettings { return key.startsWith("logger."); } - /** - * Returns the cluster settings defaults - */ - public Settings getDefaults() { - return defaults; - } - /** * Returns a settings object that contains all clustersettings that are not - * already set in the given source. + * already set in the given source. The diff contains either the default value for each + * setting or the settings value in the given default settings. */ - public Settings diff(Settings source) { + public Settings diff(Settings source, Settings defaultSettings) { Settings.Builder builder = Settings.builder(); for (Setting setting : keySettings.values()) { if (setting.exists(source) == false) { - builder.put(setting.getKey(), setting.getRaw(source)); + builder.put(setting.getKey(), setting.getRaw(defaultSettings)); } } return builder.build(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java index fe87edab4f4..b7b5064c096 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java @@ -57,12 +57,12 @@ public class RestClusterGetSettingsAction extends BaseRestHandler { client.admin().cluster().state(clusterStateRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(ClusterStateResponse response, XContentBuilder builder) throws Exception { - return new BytesRestResponse(RestStatus.OK, renderResponse(clusterSettings, response.getState(), renderDefaults, builder, request)); + return new BytesRestResponse(RestStatus.OK, renderResponse(response.getState(), renderDefaults, builder, request)); } }); } - private static XContentBuilder renderResponse(ClusterSettings settings, ClusterState state, boolean renderDefaults, XContentBuilder builder, ToXContent.Params params) throws IOException { + private XContentBuilder renderResponse(ClusterState state, boolean renderDefaults, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); builder.startObject("persistent"); @@ -75,7 +75,7 @@ public class RestClusterGetSettingsAction extends BaseRestHandler { if (renderDefaults) { builder.startObject("defaults"); - settings.diff(state.metaData().settings()).toXContent(builder, params); + clusterSettings.diff(state.metaData().settings(), this.settings).toXContent(builder, params); builder.endObject(); } diff --git a/core/src/test/java/org/elasticsearch/common/settings/ClusterSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ClusterSettingsTests.java index a504e0ad727..28a639e8903 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/ClusterSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/ClusterSettingsTests.java @@ -50,8 +50,12 @@ public class ClusterSettingsTests extends ESTestCase { Setting foobarbaz = Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.Cluster); Setting foobar = Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster); ClusterSettings settings = new ClusterSettings(new HashSet<>(Arrays.asList(foobar, foobarbaz))); - Settings diff = settings.diff(Settings.builder().put("foo.bar", 5).build()); + Settings diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.EMPTY); assertEquals(diff.getAsMap().size(), 1); assertEquals(diff.getAsInt("foo.bar.baz", null), Integer.valueOf(1)); + + diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.builder().put("foo.bar.baz", 17).build()); + assertEquals(diff.getAsMap().size(), 1); + assertEquals(diff.getAsInt("foo.bar.baz", null), Integer.valueOf(17)); } } From 6c7e5069d4aa841646e3069466fa93c945accfc8 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 8 Dec 2015 21:50:24 +0100 Subject: [PATCH 007/167] apply review comments from @nik9000 --- .../allocator/BalancedShardsAllocator.java | 30 ++--- .../decider/DiskThresholdDecider.java | 2 +- .../common/settings/ClusterSettings.java | 114 +++++++++--------- .../common/settings/Setting.java | 30 ++++- .../common/settings/SettingsService.java | 18 +-- .../HierarchyCircuitBreakerService.java | 4 +- .../common/unit/TimeValueTests.java | 14 +-- 7 files changed, 108 insertions(+), 104 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index e99e5c84ecb..d3916692898 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -74,18 +74,12 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; */ public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator { + public static final Setting INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, true, Setting.Scope.Cluster); + public static final Setting SHARD_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, true, Setting.Scope.Cluster); + public static final Setting THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, true, Setting.Scope.Cluster); - public static final float DEFAULT_INDEX_BALANCE_FACTOR = 0.55f; - public static final float DEFAULT_SHARD_BALANCE_FACTOR = 0.45f; - public static final float DEFAULT_THRESHOLD = 1.0f; - - public static final Setting INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", DEFAULT_INDEX_BALANCE_FACTOR, true, Setting.Scope.Cluster); - public static final Setting SHARD_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.shard", DEFAULT_SHARD_BALANCE_FACTOR, true, Setting.Scope.Cluster); - public static final Setting THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", DEFAULT_THRESHOLD, true, Setting.Scope.Cluster); - - private volatile WeightFunction weightFunction = new WeightFunction(DEFAULT_INDEX_BALANCE_FACTOR, DEFAULT_SHARD_BALANCE_FACTOR); - - private volatile float threshold = DEFAULT_THRESHOLD; + private volatile WeightFunction weightFunction; + private volatile float threshold; public BalancedShardsAllocator(Settings settings) { this(settings, new ClusterSettingsService(settings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); @@ -94,26 +88,18 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards @Inject public BalancedShardsAllocator(Settings settings, ClusterSettingsService clusterSettingsService) { super(settings); - setIndexBalance(INDEX_BALANCE_FACTOR_SETTING.get(settings)); - setShardBalance(SHARD_BALANCE_FACTOR_SETTING.get(settings)); + weightFunction = new WeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings)); setThreshold(THRESHOLD_SETTING.get(settings)); - clusterSettingsService.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, this::setIndexBalance); - clusterSettingsService.addSettingsUpdateConsumer(SHARD_BALANCE_FACTOR_SETTING, this::setShardBalance); + clusterSettingsService.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction); clusterSettingsService.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); } - public void setIndexBalance(float indexBalance) { + public void setWeightFunction(float indexBalance, float shardBalanceFactor) { weightFunction = new WeightFunction(indexBalance, weightFunction.shardBalance); } - public void setShardBalance(float shardBalanceFactor) { - weightFunction = new WeightFunction(weightFunction.indexBalance, shardBalanceFactor); - } public void setThreshold(float threshold) { - if (threshold <= 0.0f) { - throw new IllegalArgumentException("threshold must be greater than 0.0f but was: " + threshold); - } this.threshold = threshold; } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 40a8442ca3a..abfc3d13018 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -84,7 +84,7 @@ public class DiskThresholdDecider extends AllocationDecider { public static final Setting CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, true, Setting.Scope.Cluster); public static final Setting CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "_na_", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.Cluster); - public static final Setting CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "_na_", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "_na_", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), true, Setting.Scope.Cluster); public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, true, Setting.Scope.Cluster);; public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.Cluster); diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 5392fe47529..7c5da7c5074 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -114,62 +114,62 @@ public final class ClusterSettings { } public static Set> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, - AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, - BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, - BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, - BalancedShardsAllocator.THRESHOLD_SETTING, - ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, - ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, - EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, - EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, - ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING, - FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, - FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, - FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, - IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, - IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, - IndicesTTLService.INDICES_TTL_INTERVAL_SETTING, - MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, - MetaData.SETTING_READ_ONLY_SETTING, - RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING, - RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS_SETTING, - RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE_SETTING, - RecoverySettings.INDICES_RECOVERY_COMPRESS_SETTING, - RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING, - RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING, - RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, - RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, - RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, - RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, - RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, - RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, - ThreadPool.THREADPOOL_GROUP_SETTING, - ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, - ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, - InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, - InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, - SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, - DestructiveOperations.REQUIRES_NAME_SETTING, - DiscoverySettings.PUBLISH_TIMEOUT_SETTING, - DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING, - DiscoverySettings.COMMIT_TIMEOUT_SETTING, - DiscoverySettings.NO_MASTER_BLOCK_SETTING, - HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, - HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, - InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, - SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, - ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, - TransportService.TRACE_LOG_EXCLUDE_SETTING, - TransportService.TRACE_LOG_INCLUDE_SETTING, - TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, - ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING))); + AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, + BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.THRESHOLD_SETTING, + ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, + ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, + ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, + IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, + IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, + IndicesTTLService.INDICES_TTL_INTERVAL_SETTING, + MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, + MetaData.SETTING_READ_ONLY_SETTING, + RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING, + RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS_SETTING, + RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE_SETTING, + RecoverySettings.INDICES_RECOVERY_COMPRESS_SETTING, + RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING, + RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING, + RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, + RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, + RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, + RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, + ThreadPool.THREADPOOL_GROUP_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, + SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, + DestructiveOperations.REQUIRES_NAME_SETTING, + DiscoverySettings.PUBLISH_TIMEOUT_SETTING, + DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING, + DiscoverySettings.COMMIT_TIMEOUT_SETTING, + DiscoverySettings.NO_MASTER_BLOCK_SETTING, + HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, + InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, + TransportService.TRACE_LOG_EXCLUDE_SETTING, + TransportService.TRACE_LOG_INCLUDE_SETTING, + TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, + ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING))); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 169867ef384..ed635133853 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -241,7 +241,6 @@ public class Setting extends ToXContentToBytes { if (accept.test(inst) == false) { throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + value + "] to [" + getRaw(settings) + "]"); } - logger.info("update [{}] from [{}] to [{}]", key, value, getRaw(settings)); pendingValue = newValue; valueInstance = inst; commitPending = true; @@ -254,6 +253,7 @@ public class Setting extends ToXContentToBytes { public void apply() { if (commitPending) { + logger.info("update [{}] from [{}] to [{}]", key, value, pendingValue); value = pendingValue; consumer.accept(valueInstance); } @@ -283,6 +283,16 @@ public class Setting extends ToXContentToBytes { return new Setting<>(key, "_na_", (s) -> Float.toString(defaultValue), Float::parseFloat, dynamic, scope); } + public static Setting floatSetting(String key, float defaultValue, float minValue, boolean dynamic, Scope scope) { + return new Setting<>(key, "_na_", (s) -> Float.toString(defaultValue), (s) -> { + float value = Float.parseFloat(s); + if (value < minValue) { + throw new ElasticsearchParseException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + return value; + }, dynamic, scope); + } + public static Setting intSetting(String key, int defaultValue, boolean dynamic, Scope scope) { return new Setting<>(key, "_na_", (s) -> Integer.toString(defaultValue), Integer::parseInt, dynamic, scope); } @@ -304,7 +314,9 @@ public class Setting extends ToXContentToBytes { } public static Setting groupSetting(String key, boolean dynamic, Scope scope) { - String prefix = key.endsWith(".") ? key : key + "."; + if (key.endsWith(".") == false) { + throw new IllegalArgumentException("key must end with a '.'"); + } return new Setting(key, "_na_", "", (s) -> null, dynamic, scope) { @Override @@ -314,12 +326,12 @@ public class Setting extends ToXContentToBytes { @Override public Settings get(Settings settings) { - return settings.getByPrefix(prefix); + return settings.getByPrefix(key); } @Override public boolean match(String toTest) { - return Regex.simpleMatch(prefix + "*", toTest); + return Regex.simpleMatch(key + "*", toTest); } @Override @@ -387,8 +399,14 @@ public class Setting extends ToXContentToBytes { return new Setting<>(key, "_na_", (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, defaultValue, key), dynamic, scope); } - public static Setting nonNegativeDouble(String key, double defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, "_na_", (s) -> Double.toString(defaultValue), Double::parseDouble, dynamic, scope); + public static Setting doubleSetting(String key, double defaultValue, double minValue, boolean dynamic, Scope scope) { + return new Setting<>(key, "_na_", (s) -> Double.toString(defaultValue), (s) -> { + final double d = Double.parseDouble(s); + if (d < minValue) { + throw new ElasticsearchParseException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + return d; + }, dynamic, scope); } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsService.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsService.java index b1a1319e800..91ecd844190 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SettingsService.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsService.java @@ -66,7 +66,7 @@ public abstract class SettingsService extends AbstractComponent { try { settingUpdater.rollback(); } catch (Exception e) { - logger.warn("failed to rollback settings for [{}]", e, settingUpdater); + logger.error("failed to rollback settings for [{}]", e, settingUpdater); } } } @@ -77,15 +77,15 @@ public abstract class SettingsService extends AbstractComponent { * Applies the given settings to all the settings consumers or to none of them. The settings * will be merged with the node settings before they are applied while given settings override existing node * settings. - * @param settings the settings to apply + * @param newSettings the settings to apply * @return the unmerged applied settings */ - public synchronized Settings applySettings(Settings settings) { - if (lastSettingsApplied != null && settings.equals(lastSettingsApplied)) { + public synchronized Settings applySettings(Settings newSettings) { + if (lastSettingsApplied != null && newSettings.equals(lastSettingsApplied)) { // nothing changed in the settings, ignore - return settings; + return newSettings; } - final Settings build = Settings.builder().put(this.settings).put(settings).build(); + final Settings build = Settings.builder().put(this.settings).put(newSettings).build(); boolean success = false; try { for (SettingUpdater settingUpdater : settingUpdaters) { @@ -109,14 +109,14 @@ public abstract class SettingsService extends AbstractComponent { try { settingUpdater.rollback(); } catch (Exception e) { - logger.warn("failed to refresh settings for [{}]", e, settingUpdater); + logger.error("failed to refresh settings for [{}]", e, settingUpdater); } } } } try { - for (Map.Entry entry : settings.getAsMap().entrySet()) { + for (Map.Entry entry : newSettings.getAsMap().entrySet()) { if (entry.getKey().startsWith("logger.")) { String component = entry.getKey().substring("logger.".length()); if ("_root".equals(component)) { @@ -130,7 +130,7 @@ public abstract class SettingsService extends AbstractComponent { logger.warn("failed to refresh settings for [{}]", e, "logger"); } - return lastSettingsApplied = settings; + return lastSettingsApplied = newSettings; } /** diff --git a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index e3837fb391c..8e4886f4ac9 100644 --- a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -49,11 +49,11 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { public static final Setting TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.total.limit", "70%", true, Setting.Scope.Cluster); public static final Setting FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", true, Setting.Scope.Cluster); - public static final Setting FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.nonNegativeDouble("indices.breaker.fielddata.overhead", 1.03d, true, Setting.Scope.Cluster); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.fielddata.overhead", 1.03d, 0.0d, true, Setting.Scope.Cluster); public static final String FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = "indices.breaker.fielddata.type"; public static final Setting REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.request.limit", "40%", true, Setting.Scope.Cluster); - public static final Setting REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.nonNegativeDouble("indices.breaker.request.overhead", 1.0d, true, Setting.Scope.Cluster); + public static final Setting REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.request.overhead", 1.0d, 0.0d, true, Setting.Scope.Cluster); public static final String REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = "indices.breaker.request.type"; public static final String DEFAULT_BREAKER_TYPE = "memory"; diff --git a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java index 4220973ae21..2945d86fe59 100644 --- a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java +++ b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java @@ -164,12 +164,12 @@ public class TimeValueTests extends ESTestCase { } public void testToStringRep() { - assertThat("-1", equalTo(new TimeValue(-1).getStringRep())); - assertThat("10ms", equalTo(new TimeValue(10, TimeUnit.MILLISECONDS).getStringRep())); - assertThat("1533ms", equalTo(new TimeValue(1533, TimeUnit.MILLISECONDS).getStringRep())); - assertThat("90s", equalTo(new TimeValue(90, TimeUnit.SECONDS).getStringRep())); - assertThat("90m", equalTo(new TimeValue(90, TimeUnit.MINUTES).getStringRep())); - assertThat("36h", equalTo(new TimeValue(36, TimeUnit.HOURS).getStringRep())); - assertThat("1000d", equalTo(new TimeValue(1000, TimeUnit.DAYS).getStringRep())); + assertEquals("-1", new TimeValue(-1).getStringRep()); + assertEquals("10ms", new TimeValue(10, TimeUnit.MILLISECONDS).getStringRep()); + assertEquals("1533ms", new TimeValue(1533, TimeUnit.MILLISECONDS).getStringRep()); + assertEquals("90s", new TimeValue(90, TimeUnit.SECONDS).getStringRep()); + assertEquals("90m", new TimeValue(90, TimeUnit.MINUTES).getStringRep()); + assertEquals("36h", new TimeValue(36, TimeUnit.HOURS).getStringRep()); + assertEquals("1000d", new TimeValue(1000, TimeUnit.DAYS).getStringRep()); } } From edd98b0e6cdf3bd2231ff6cbe503c19fe3b829c0 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 8 Dec 2015 22:15:08 +0100 Subject: [PATCH 008/167] remove descritpion for now --- .../decider/AwarenessAllocationDecider.java | 2 +- .../ClusterRebalanceAllocationDecider.java | 6 +-- .../decider/DiskThresholdDecider.java | 4 +- .../decider/EnableAllocationDecider.java | 4 +- .../decider/ThrottlingAllocationDecider.java | 2 +- .../common/settings/Setting.java | 38 +++++++------------ .../discovery/DiscoverySettings.java | 4 +- .../index/store/IndexStoreConfig.java | 2 +- .../transport/TransportService.java | 4 +- .../common/settings/SettingTests.java | 4 +- 10 files changed, 30 insertions(+), 40 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 450b953fd38..624367003fa 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -78,7 +78,7 @@ public class AwarenessAllocationDecider extends AllocationDecider { public static final String NAME = "awareness"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = new Setting<>("cluster.routing.allocation.awareness.attributes", "_na_", "", Strings::splitStringByCommaToArray , true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , true, Setting.Scope.Cluster); public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", true, Setting.Scope.Cluster); private String[] awarenessAttributes; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index f20489a795e..fe481bcabca 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -37,10 +37,10 @@ import java.util.Locale; *
    *
  • indices_primaries_active - Re-balancing is allowed only once all * primary shards on all indices are active.
  • - * + * *
  • indices_all_active - Re-balancing is allowed only once all * shards on all indices are active.
  • - * + * *
  • always - Re-balancing is allowed once a shard replication group * is active
  • *
@@ -48,7 +48,7 @@ import java.util.Locale; public class ClusterRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "cluster_rebalance"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = new Setting<>("cluster.routing.allocation.allow_rebalance", "_na_", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), ClusterRebalanceType::parseString, true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), ClusterRebalanceType::parseString, true, Setting.Scope.Cluster); /** * An enum representation for the configured re-balance type. diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index abfc3d13018..bdbb9a16902 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -83,8 +83,8 @@ public class DiskThresholdDecider extends AllocationDecider { private volatile TimeValue rerouteInterval; public static final Setting CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, true, Setting.Scope.Cluster); - public static final Setting CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "_na_", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.Cluster); - public static final Setting CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "_na_", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), true, Setting.Scope.Cluster); public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, true, Setting.Scope.Cluster);; public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.Cluster); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 38183fab830..b32b953441a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -59,10 +59,10 @@ public class EnableAllocationDecider extends AllocationDecider { public static final String NAME = "enable"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", "_na_", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.Cluster); public static final String INDEX_ROUTING_ALLOCATION_ENABLE= "index.routing.allocation.enable"; - public static final Setting CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", "_na_", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.Cluster); public static final String INDEX_ROUTING_REBALANCE_ENABLE = "index.routing.rebalance.enable"; private volatile Rebalance enableRebalance; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 543f0bf780b..7f40667fbf5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -53,7 +53,7 @@ public class ThrottlingAllocationDecider extends AllocationDecider { public static final String CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES = "cluster.routing.allocation.concurrent_recoveries"; public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, true, Setting.Scope.Cluster); - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", "_na_", (s) -> s.get(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES,Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)), Integer::parseInt, true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", (s) -> s.get(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES,Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)), Integer::parseInt, true, Setting.Scope.Cluster); private volatile int primariesInitialRecoveries; private volatile int concurrentRecoveries; diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index ed635133853..1230936b6d0 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -39,15 +39,13 @@ import java.util.function.Predicate; */ public class Setting extends ToXContentToBytes { private final String key; - private final String description; private final Function defaultValue; private final Function parser; private final boolean dynamic; private final Scope scope; - public Setting(String key, String description, Function defaultValue, Function parser, boolean dynamic, Scope scope) { + public Setting(String key, Function defaultValue, Function parser, boolean dynamic, Scope scope) { this.key = key; - this.description = description; this.defaultValue = defaultValue; this.parser = parser; this.dynamic = dynamic; @@ -62,13 +60,6 @@ public class Setting extends ToXContentToBytes { return key; } - /** - * Returns a human readable description of this setting - */ - public String getDescription() { - return description; - } - /** * Returns true iff this setting is dynamically updateable, otherwise false */ @@ -117,7 +108,7 @@ public class Setting extends ToXContentToBytes { return parser.apply(value); } catch (ElasticsearchParseException ex) { throw ex; - } catch (Throwable t) { + } catch (Exception t) { throw new IllegalArgumentException("Failed to parse value [" + value + "] for setting [" + getKey() + "]", t); } } @@ -143,7 +134,6 @@ public class Setting extends ToXContentToBytes { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("key", key); - builder.field("description", description); builder.field("type", scope.name()); builder.field("dynamic", dynamic); builder.field("default", defaultValue.apply(Settings.EMPTY)); @@ -275,16 +265,16 @@ public class Setting extends ToXContentToBytes { } - public Setting(String key, String description, String defaultValue, Function parser, boolean dynamic, Scope scope) { - this(key, description, (s) -> defaultValue, parser, dynamic, scope); + public Setting(String key, String defaultValue, Function parser, boolean dynamic, Scope scope) { + this(key, (s) -> defaultValue, parser, dynamic, scope); } public static Setting floatSetting(String key, float defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, "_na_", (s) -> Float.toString(defaultValue), Float::parseFloat, dynamic, scope); + return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, dynamic, scope); } public static Setting floatSetting(String key, float defaultValue, float minValue, boolean dynamic, Scope scope) { - return new Setting<>(key, "_na_", (s) -> Float.toString(defaultValue), (s) -> { + return new Setting<>(key, (s) -> Float.toString(defaultValue), (s) -> { float value = Float.parseFloat(s); if (value < minValue) { throw new ElasticsearchParseException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); @@ -294,19 +284,19 @@ public class Setting extends ToXContentToBytes { } public static Setting intSetting(String key, int defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, "_na_", (s) -> Integer.toString(defaultValue), Integer::parseInt, dynamic, scope); + return new Setting<>(key, (s) -> Integer.toString(defaultValue), Integer::parseInt, dynamic, scope); } public static Setting boolSetting(String key, boolean defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, "_na_", (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope); + return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope); } public static Setting byteSizeSetting(String key, String percentage, boolean dynamic, Scope scope) { - return new Setting<>(key, "_na_", (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope); + return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope); } public static Setting byteSizeSetting(String key, ByteSizeValue value, boolean dynamic, Scope scope) { - return new Setting<>(key, "_na_", (s) -> value.toString(), (s) -> ByteSizeValue.parseBytesSizeValue(s, key), dynamic, scope); + return new Setting<>(key, (s) -> value.toString(), (s) -> ByteSizeValue.parseBytesSizeValue(s, key), dynamic, scope); } public static Setting positiveTimeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) { @@ -317,7 +307,7 @@ public class Setting extends ToXContentToBytes { if (key.endsWith(".") == false) { throw new IllegalArgumentException("key must end with a '.'"); } - return new Setting(key, "_na_", "", (s) -> null, dynamic, scope) { + return new Setting(key, "", (s) -> null, dynamic, scope) { @Override public boolean isGroupSetting() { @@ -382,7 +372,7 @@ public class Setting extends ToXContentToBytes { } public static Setting timeSetting(String key, Function defaultValue, TimeValue minValue, boolean dynamic, Scope scope) { - return new Setting<>(key, "_na_", defaultValue, (s) -> { + return new Setting<>(key, defaultValue, (s) -> { TimeValue timeValue = TimeValue.parseTimeValue(s, null, key); if (timeValue.millis() < minValue.millis()) { throw new ElasticsearchParseException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); @@ -396,11 +386,11 @@ public class Setting extends ToXContentToBytes { } public static Setting timeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, "_na_", (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, defaultValue, key), dynamic, scope); + return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, defaultValue, key), dynamic, scope); } public static Setting doubleSetting(String key, double defaultValue, double minValue, boolean dynamic, Scope scope) { - return new Setting<>(key, "_na_", (s) -> Double.toString(defaultValue), (s) -> { + return new Setting<>(key, (s) -> Double.toString(defaultValue), (s) -> { final double d = Double.parseDouble(s); if (d < minValue) { throw new ElasticsearchParseException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java index 829a173060d..745c4760d54 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java @@ -46,8 +46,8 @@ public class DiscoverySettings extends AbstractComponent { * sets the timeout for receiving enough acks for a specific cluster state and committing it. failing * to receive responses within this window will cause the cluster state change to be rejected. */ - public static final Setting COMMIT_TIMEOUT_SETTING = new Setting<>("discovery.zen.commit_timeout", "_na_", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), true, Setting.Scope.Cluster); - public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>("discovery.zen.no_master_block", "_na_", "write", DiscoverySettings::parseNoMasterBlock, true, Setting.Scope.Cluster); + public static final Setting COMMIT_TIMEOUT_SETTING = new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), true, Setting.Scope.Cluster); + public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, true, Setting.Scope.Cluster); public static final Setting PUBLISH_DIFF_ENABLE_SETTING = Setting.boolSetting("discovery.zen.publish_diff.enable", true, true, Setting.Scope.Cluster); public final static int NO_MASTER_BLOCK_ID = 2; diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java index 48f24613f6f..0588dafd310 100644 --- a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java +++ b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java @@ -36,7 +36,7 @@ public class IndexStoreConfig{ /** * Configures the node / cluster level throttle type. See {@link StoreRateLimiting.Type}. */ - public static final Setting INDICES_STORE_THROTTLE_TYPE_SETTING = new Setting<>("indices.store.throttle.type", "_na_", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, true, Setting.Scope.Cluster); + public static final Setting INDICES_STORE_THROTTLE_TYPE_SETTING = new Setting<>("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, true, Setting.Scope.Cluster); /** * Configures the node / cluster level throttle intensity. The default is 10240 MB */ diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index f9cbb012b2a..d0d5b5e230b 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -87,8 +87,8 @@ public class TransportService extends AbstractLifecycleComponent TRACE_LOG_INCLUDE_SETTING = new Setting<>("transport.tracer.include", "_na_", "", Strings::splitStringByCommaToArray , true, Setting.Scope.Cluster); - public static final Setting TRACE_LOG_EXCLUDE_SETTING = new Setting<>("transport.tracer.exclude", "_na_", "internal:discovery/zen/fd*," + TransportLivenessAction.NAME, Strings::splitStringByCommaToArray , true, Setting.Scope.Cluster);; + public static final Setting TRACE_LOG_INCLUDE_SETTING = new Setting<>("transport.tracer.include", "", Strings::splitStringByCommaToArray , true, Setting.Scope.Cluster); + public static final Setting TRACE_LOG_EXCLUDE_SETTING = new Setting<>("transport.tracer.exclude", "internal:discovery/zen/fd*," + TransportLivenessAction.NAME, Strings::splitStringByCommaToArray , true, Setting.Scope.Cluster);; private final ESLogger tracerLog; diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 08ec33d0b43..5787ae59f56 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -113,14 +113,14 @@ public class SettingTests extends ESTestCase { assertEquals(defautlValue.millis() + "ms", aDefault); assertEquals(defautlValue.millis(), setting.get(Settings.EMPTY).millis()); - Setting secondaryDefault = new Setting<>("foo.bar", "_na_", (s) -> s.get("old.foo.bar", "some_default"), (s) -> s, randomBoolean(), Setting.Scope.Cluster); + Setting secondaryDefault = new Setting<>("foo.bar", (s) -> s.get("old.foo.bar", "some_default"), (s) -> s, randomBoolean(), Setting.Scope.Cluster); assertEquals("some_default", secondaryDefault.get(Settings.EMPTY)); assertEquals("42", secondaryDefault.get(Settings.builder().put("old.foo.bar", 42).build())); } public void testComplexType() { AtomicReference ref = new AtomicReference<>(null); - Setting setting = new Setting<>("foo.bar", "", (s) -> "", (s) -> new ComplexType(s), true, Setting.Scope.Cluster); + Setting setting = new Setting<>("foo.bar", (s) -> "", (s) -> new ComplexType(s), true, Setting.Scope.Cluster); assertFalse(setting.isGroupSetting()); ref.set(setting.get(Settings.EMPTY)); ComplexType type = ref.get(); From c9d7c922437afb15aecd9b47230f76440eef1c1b Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Dec 2015 09:57:39 +0100 Subject: [PATCH 009/167] fold ClusterSettingsService into ClusterSettings --- .../cluster/settings/SettingsUpdater.java | 13 ++-- .../TransportClusterUpdateSettingsAction.java | 10 +-- .../close/TransportCloseIndexAction.java | 6 +- .../delete/TransportDeleteIndexAction.java | 4 +- .../open/TransportOpenIndexAction.java | 3 +- .../action/support/DestructiveOperations.java | 6 +- .../elasticsearch/cluster/ClusterModule.java | 5 +- .../cluster/InternalClusterInfoService.java | 10 +-- .../action/index/MappingUpdatedAction.java | 6 +- .../allocator/BalancedShardsAllocator.java | 9 +-- .../decider/AwarenessAllocationDecider.java | 9 +-- .../ClusterRebalanceAllocationDecider.java | 6 +- .../ConcurrentRebalanceAllocationDecider.java | 6 +- .../decider/DiskThresholdDecider.java | 15 ++-- .../decider/EnableAllocationDecider.java | 8 +- .../decider/FilterAllocationDecider.java | 10 +-- .../decider/ShardsLimitAllocationDecider.java | 6 +- .../SnapshotInProgressAllocationDecider.java | 7 +- .../decider/ThrottlingAllocationDecider.java | 8 +- .../service/InternalClusterService.java | 12 +-- ...rvice.java => AbstractScopedSettings.java} | 74 ++++++++++++++++--- .../common/settings/ClusterSettings.java | 64 +--------------- .../settings/ClusterSettingsService.java | 53 ------------- .../common/settings/Setting.java | 18 ++--- .../discovery/DiscoverySettings.java | 12 +-- .../discovery/zen/ZenDiscovery.java | 8 +- .../elasticsearch/indices/IndicesService.java | 8 +- .../HierarchyCircuitBreakerService.java | 10 +-- .../indices/recovery/RecoverySettings.java | 28 +++---- .../indices/ttl/IndicesTTLService.java | 6 +- .../java/org/elasticsearch/node/Node.java | 5 +- .../org/elasticsearch/node/NodeModule.java | 1 - .../elasticsearch/search/SearchService.java | 6 +- .../snapshots/RestoreService.java | 9 +-- .../elasticsearch/threadpool/ThreadPool.java | 6 +- .../transport/TransportService.java | 8 +- .../settings/SettingsUpdaterTests.java | 8 +- .../cluster/ClusterModuleTests.java | 5 +- .../allocation/BalanceConfigurationTests.java | 5 +- .../DiskThresholdDeciderUnitTests.java | 7 +- .../decider/EnableAllocationTests.java | 13 ++-- .../breaker/MemoryCircuitBreakerTests.java | 5 +- .../common/settings/ClusterSettingsTests.java | 61 --------------- ...iceTests.java => ScopedSettingsTests.java} | 61 +++++++++------ .../common/settings/SettingTests.java | 16 ++-- .../common/util/BigArraysTests.java | 5 +- .../zen/NodeJoinControllerTests.java | 3 +- .../PublishClusterStateActionTests.java | 3 +- .../breaker/CircuitBreakerUnitTests.java | 3 +- .../recovery/RecoverySourceHandlerTests.java | 3 +- .../UpdateThreadPoolSettingsTests.java | 47 ++++++------ .../AbstractSimpleTransportTestCase.java | 3 +- .../NettySizeHeaderFrameDecoderTests.java | 3 +- .../discovery/azure/AzureDiscovery.java | 1 - .../discovery/ec2/Ec2Discovery.java | 1 - .../discovery/gce/GceDiscovery.java | 1 - .../MockInternalClusterInfoService.java | 6 +- .../search/MockSearchService.java | 8 +- .../test/ESAllocationTestCase.java | 17 ++--- 59 files changed, 313 insertions(+), 447 deletions(-) rename core/src/main/java/org/elasticsearch/common/settings/{SettingsService.java => AbstractScopedSettings.java} (75%) delete mode 100644 core/src/main/java/org/elasticsearch/common/settings/ClusterSettingsService.java delete mode 100644 core/src/test/java/org/elasticsearch/common/settings/ClusterSettingsTests.java rename core/src/test/java/org/elasticsearch/common/settings/{SettingsServiceTests.java => ScopedSettingsTests.java} (65%) diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java index a6b61844a1e..f5020a46b37 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.common.settings.Settings; import java.util.HashSet; @@ -40,12 +39,10 @@ import static org.elasticsearch.cluster.ClusterState.builder; final class SettingsUpdater { final Settings.Builder transientUpdates = Settings.settingsBuilder(); final Settings.Builder persistentUpdates = Settings.settingsBuilder(); - private final ClusterSettings dynamicSettings; - private final ClusterSettingsService clusterSettingsService; + private final ClusterSettings clusterSettings; - SettingsUpdater(ClusterSettingsService clusterSettingsService) { - this.dynamicSettings = clusterSettingsService.getClusterSettings(); - this.clusterSettingsService = clusterSettingsService; + SettingsUpdater(ClusterSettings clusterSettings) { + this.clusterSettings = clusterSettings; } synchronized Settings getTransientUpdates() { @@ -85,7 +82,7 @@ final class SettingsUpdater { Settings settings = build.metaData().settings(); // now we try to apply things and if they are invalid we fail // this dryRun will validate & parse settings but won't actually apply them. - clusterSettingsService.dryRun(settings); + clusterSettings.dryRun(settings); return build; } @@ -96,7 +93,7 @@ final class SettingsUpdater { for (Map.Entry entry : toApply.getAsMap().entrySet()) { if (entry.getValue() == null) { toRemove.add(entry.getKey()); - } else if (dynamicSettings.isLoggerSetting(entry.getKey()) || dynamicSettings.hasDynamicSetting(entry.getKey())) { + } else if (clusterSettings.isLoggerSetting(entry.getKey()) || clusterSettings.hasDynamicSetting(entry.getKey())) { settingsBuilder.put(entry.getKey(), entry.getValue()); updates.put(entry.getKey(), entry.getValue()); changed = true; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 9170b9168b1..99815b77ff7 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -36,8 +36,8 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -50,14 +50,14 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct private final AllocationService allocationService; - private final ClusterSettingsService clusterSettingsService; + private final ClusterSettings clusterSettings; @Inject public TransportClusterUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - AllocationService allocationService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterSettingsService clusterSettingsService) { + AllocationService allocationService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterSettings clusterSettings) { super(settings, ClusterUpdateSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterUpdateSettingsRequest::new); this.allocationService = allocationService; - this.clusterSettingsService = clusterSettingsService; + this.clusterSettings = clusterSettings; } @Override @@ -83,7 +83,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener listener) { - final SettingsUpdater updater = new SettingsUpdater(clusterSettingsService); + final SettingsUpdater updater = new SettingsUpdater(clusterSettings); clusterService.submitStateUpdateTask("cluster_update_settings", new AckedClusterStateUpdateTask(Priority.IMMEDIATE, request, listener) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 994fdcced1f..57c9fdbf0da 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -31,9 +31,9 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -50,13 +50,13 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction(clusterDynamicSettings.values()))); - bind(ClusterSettingsService.class).toInstance(clusterSettingsService); + final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(clusterDynamicSettings.values())); + bind(ClusterSettings.class).toInstance(clusterSettings); } diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 13fe2472bd4..377d6578ac1 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -37,12 +37,12 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.monitor.fs.FsInfo; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ReceiveTimeoutTransportException; @@ -83,7 +83,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu private final List listeners = new CopyOnWriteArrayList<>(); @Inject - public InternalClusterInfoService(Settings settings, ClusterSettingsService clusterSettingsService, + public InternalClusterInfoService(Settings settings, ClusterSettings clusterSettings, TransportNodesStatsAction transportNodesStatsAction, TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService, ThreadPool threadPool) { @@ -99,9 +99,9 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu this.updateFrequency = INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings); this.fetchTimeout = INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.get(settings); this.enabled = DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings); - clusterSettingsService.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, this::setFetchTimeout); - clusterSettingsService.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, this::setUpdateFrequency); - clusterSettingsService.addSettingsUpdateConsumer(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); + clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, this::setFetchTimeout); + clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, this::setUpdateFrequency); + clusterSettings.addSettingsUpdateConsumer(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); // Add InternalClusterInfoService to listen for Master changes this.clusterService.add((LocalNodeMasterListener)this); diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 04fea06138e..7df0e24210e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -26,12 +26,12 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; -import org.elasticsearch.common.settings.ClusterSettingsService; import java.util.concurrent.TimeoutException; @@ -47,10 +47,10 @@ public class MappingUpdatedAction extends AbstractComponent { private volatile TimeValue dynamicMappingUpdateTimeout; @Inject - public MappingUpdatedAction(Settings settings, ClusterSettingsService clusterSettingsService) { + public MappingUpdatedAction(Settings settings, ClusterSettings clusterSettings) { super(settings); this.dynamicMappingUpdateTimeout = INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.get(settings); - clusterSettingsService.addSettingsUpdateConsumer(INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, this::setDynamicMappingUpdateTimeout); + clusterSettings.addSettingsUpdateConsumer(INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, this::setDynamicMappingUpdateTimeout); } private void setDynamicMappingUpdateTimeout(TimeValue dynamicMappingUpdateTimeout) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index d3916692898..f52b3228c47 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -41,7 +41,6 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.PriorityComparator; -import org.elasticsearch.common.settings.ClusterSettingsService; import java.util.ArrayList; import java.util.Collection; @@ -82,16 +81,16 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards private volatile float threshold; public BalancedShardsAllocator(Settings settings) { - this(settings, new ClusterSettingsService(settings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); + this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); } @Inject - public BalancedShardsAllocator(Settings settings, ClusterSettingsService clusterSettingsService) { + public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSettings) { super(settings); weightFunction = new WeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings)); setThreshold(THRESHOLD_SETTING.get(settings)); - clusterSettingsService.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction); - clusterSettingsService.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); + clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction); + clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); } public void setWeightFunction(float indexBalance, float shardBalanceFactor) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 624367003fa..148fdd82f35 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.ClusterSettingsService; import java.util.HashMap; import java.util.Map; @@ -98,16 +97,16 @@ public class AwarenessAllocationDecider extends AllocationDecider { * @param settings {@link Settings} to use */ public AwarenessAllocationDecider(Settings settings) { - this(settings, new ClusterSettingsService(settings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); + this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); } @Inject - public AwarenessAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { + public AwarenessAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); this.awarenessAttributes = CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, this::setAwarenessAttributes); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, this::setAwarenessAttributes); setForcedAwarenessAttributes(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.get(settings)); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, this::setForcedAwarenessAttributes); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, this::setForcedAwarenessAttributes); } private void setForcedAwarenessAttributes(Settings forceSettings) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index fe481bcabca..036695bee46 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -22,9 +22,9 @@ package org.elasticsearch.cluster.routing.allocation.decider; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.ClusterSettingsService; import java.util.Locale; @@ -82,7 +82,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { private ClusterRebalanceType type; @Inject - public ClusterRebalanceAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { + public ClusterRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); try { type = CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.get(settings); @@ -92,7 +92,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { } logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, type.toString().toLowerCase(Locale.ROOT)); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType); } public void setType(ClusterRebalanceType type) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index 040c4481951..7843a31ff91 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -22,9 +22,9 @@ package org.elasticsearch.cluster.routing.allocation.decider; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.ClusterSettingsService; /** * Similar to the {@link ClusterRebalanceAllocationDecider} this @@ -46,11 +46,11 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { private volatile int clusterConcurrentRebalance; @Inject - public ConcurrentRebalanceAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { + public ConcurrentRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); this.clusterConcurrentRebalance = CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.get(settings); logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, this::setClusterConcurrentRebalance); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, this::setClusterConcurrentRebalance); } public void setClusterConcurrentRebalance(int concurrentRebalance) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index bdbb9a16902..5ffaa1f0e84 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -43,7 +43,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.RatioValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.common.settings.ClusterSettingsService; import java.util.Set; @@ -191,11 +190,11 @@ public class DiskThresholdDecider extends AllocationDecider { // It's okay the Client is null here, because the empty cluster info // service will never actually call the listener where the client is // needed. Also this constructor is only used for tests - this(settings, new ClusterSettingsService(settings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), EmptyClusterInfoService.INSTANCE, null); + this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), EmptyClusterInfoService.INSTANCE, null); } @Inject - public DiskThresholdDecider(Settings settings, ClusterSettingsService clusterSettingsService, ClusterInfoService infoService, Client client) { + public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings, ClusterInfoService infoService, Client client) { super(settings); final String lowWatermark = CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.get(settings); final String highWatermark = CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.get(settings); @@ -204,11 +203,11 @@ public class DiskThresholdDecider extends AllocationDecider { this.includeRelocations = CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.get(settings); this.rerouteInterval = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.get(settings); this.enabled = CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, this::setLowWatermark); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, this::setHighWatermark); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, this::setIncludeRelocations); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, this::setRerouteInterval); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, this::setLowWatermark); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, this::setHighWatermark); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, this::setIncludeRelocations); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, this::setRerouteInterval); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); infoService.addListener(new DiskListener(client)); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index b32b953441a..61156de7137 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -23,9 +23,9 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.ClusterSettingsService; import java.util.Locale; @@ -69,12 +69,12 @@ public class EnableAllocationDecider extends AllocationDecider { private volatile Allocation enableAllocation; @Inject - public EnableAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { + public EnableAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); this.enableAllocation = CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.get(settings); this.enableRebalance = CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.get(settings); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, this::setEnableAllocation); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, this::setEnableRebalance); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, this::setEnableAllocation); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, this::setEnableRebalance); } public void setEnableRebalance(Rebalance enableRebalance) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index f9ee7ad8182..87ec158e8ca 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -25,9 +25,9 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.ClusterSettingsService; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; @@ -73,14 +73,14 @@ public class FilterAllocationDecider extends AllocationDecider { private volatile DiscoveryNodeFilters clusterExcludeFilters; @Inject - public FilterAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { + public FilterAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.get(settings)); setClusterExcludeFilters(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.get(settings)); setClusterIncludeFilters(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.get(settings)); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, this::setClusterRequireFilters); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, this::setClusterExcludeFilters); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, this::setClusterIncludeFilters); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, this::setClusterRequireFilters); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, this::setClusterExcludeFilters); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, this::setClusterIncludeFilters); } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index ecd03d92c70..3bc02879e61 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -25,9 +25,9 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.ClusterSettingsService; /** * This {@link AllocationDecider} limits the number of shards per node on a per @@ -68,10 +68,10 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { @Inject - public ShardsLimitAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { + public ShardsLimitAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); this.clusterShardLimit = CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.get(settings); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, this::setClusterShardLimit); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, this::setClusterShardLimit); } private void setClusterShardLimit(int clusterShardLimit) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index fa447626e63..fdc65bf7ac8 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.ClusterSettingsService; /** * This {@link org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider} prevents shards that @@ -57,14 +56,14 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { * @param settings {@link org.elasticsearch.common.settings.Settings} to use */ public SnapshotInProgressAllocationDecider(Settings settings) { - this(settings, new ClusterSettingsService(settings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); + this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); } @Inject - public SnapshotInProgressAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { + public SnapshotInProgressAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); enableRelocation = CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING.get(settings); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, this::setEnableRelocation); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, this::setEnableRelocation); } private void setEnableRelocation(boolean enableRelocation) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 7f40667fbf5..f7369f35dd9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -23,9 +23,9 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.ClusterSettingsService; /** * {@link ThrottlingAllocationDecider} controls the recovery process per node in @@ -59,13 +59,13 @@ public class ThrottlingAllocationDecider extends AllocationDecider { private volatile int concurrentRecoveries; @Inject - public ThrottlingAllocationDecider(Settings settings, ClusterSettingsService clusterSettingsService) { + public ThrottlingAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); this.primariesInitialRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.get(settings); this.concurrentRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.get(settings); logger.debug("using node_concurrent_recoveries [{}], node_initial_primaries_recoveries [{}]", concurrentRecoveries, primariesInitialRecoveries); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, this::setPrimariesInitialRecoveries); - clusterSettingsService.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, this::setConcurrentRecoveries); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, this::setPrimariesInitialRecoveries); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, this::setConcurrentRecoveries); } private void setConcurrentRecoveries(int concurrentRecoveries) { diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index 56bf400d46b..945b9d1ea59 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.StringText; @@ -47,7 +48,6 @@ import org.elasticsearch.common.util.concurrent.*; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryService; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -75,7 +75,7 @@ public class InternalClusterService extends AbstractLifecycleComponent settingUpdaters = new ArrayList<>(); + private final Map> groupSettings = new HashMap<>(); + private final Map> keySettings = new HashMap<>(); + private final Setting.Scope scope; - protected SettingsService(Settings settings) { + protected AbstractScopedSettings(Settings settings, Set> settingsSet, Setting.Scope scope) { super(settings); + for (Setting entry : settingsSet) { + if (entry.getScope() != scope) { + throw new IllegalArgumentException("Setting must be a cluster setting but was: " + entry.getScope()); + } + if (entry.isGroupSetting()) { + groupSettings.put(entry.getKey(), entry); + } else { + keySettings.put(entry.getKey(), entry); + } + } + this.scope = scope; + } + + public Setting.Scope getScope() { + return this.scope; } /** @@ -140,7 +156,7 @@ public abstract class SettingsService extends AbstractComponent { *

*/ public synchronized void addSettingsUpdateConsumer(Setting setting, Consumer consumer, Predicate predicate) { - if (setting != getSetting(setting.getKey())) { + if (setting != get(setting.getKey())) { throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]"); } this.settingUpdaters.add(setting.newUpdater(consumer, logger, settings, predicate)); @@ -153,10 +169,10 @@ public abstract class SettingsService extends AbstractComponent { *

*/ public synchronized void addSettingsUpdateConsumer(Setting
a, Setting b, BiConsumer consumer) { - if (a != getSetting(a.getKey())) { + if (a != get(a.getKey())) { throw new IllegalArgumentException("Setting is not registered for key [" + a.getKey() + "]"); } - if (b != getSetting(b.getKey())) { + if (b != get(b.getKey())) { throw new IllegalArgumentException("Setting is not registered for key [" + b.getKey() + "]"); } this.settingUpdaters.add(Setting.compoundUpdater(consumer, a, b, logger, settings)); @@ -172,8 +188,6 @@ public abstract class SettingsService extends AbstractComponent { addSettingsUpdateConsumer(setting, consumer, (s) -> true); } - protected abstract Setting getSetting(String key); - /** * Transactional interface to update settings. * @see Setting @@ -198,4 +212,44 @@ public abstract class SettingsService extends AbstractComponent { void rollback(); } + /** + * Returns the {@link Setting} for the given key or null if the setting can not be found. + */ + public Setting get(String key) { + Setting setting = keySettings.get(key); + if (setting == null) { + for (Map.Entry> entry : groupSettings.entrySet()) { + if (entry.getValue().match(key)) { + return entry.getValue(); + } + } + } else { + return setting; + } + return null; + } + + /** + * Returns true if the setting for the given key is dynamically updateable. Otherwise false. + */ + public boolean hasDynamicSetting(String key) { + final Setting setting = get(key); + return setting != null && setting.isDynamic(); + } + + /** + * Returns a settings object that contains all clustersettings that are not + * already set in the given source. The diff contains either the default value for each + * setting or the settings value in the given default settings. + */ + public Settings diff(Settings source, Settings defaultSettings) { + Settings.Builder builder = Settings.builder(); + for (Setting setting : keySettings.values()) { + if (setting.exists(source) == false) { + builder.put(setting.getKey(), setting.getRaw(defaultSettings)); + } + } + return builder.build(); + } + } diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 7c5da7c5074..bf9c24dad8d 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -26,8 +26,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.*; import org.elasticsearch.cluster.service.InternalClusterService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; @@ -44,51 +42,10 @@ import java.util.*; /** * Encapsulates all valid cluster level settings. */ -public final class ClusterSettings { +public final class ClusterSettings extends AbstractScopedSettings { - private final Map> groupSettings = new HashMap<>(); - private final Map> keySettings = new HashMap<>(); - - public ClusterSettings(Set> settingsSet) { - for (Setting entry : settingsSet) { - if (entry.getScope() != Setting.Scope.Cluster) { - throw new IllegalArgumentException("Setting must be a cluster setting but was: " + entry.getScope()); - } - if (entry.isGroupSetting()) { - groupSettings.put(entry.getKey(), entry); - } else { - keySettings.put(entry.getKey(), entry); - } - } - } - - public ClusterSettings() { - this(BUILT_IN_CLUSTER_SETTINGS); - } - - /** - * Returns the {@link Setting} for the given key or null if the setting can not be found. - */ - public Setting get(String key) { - Setting setting = keySettings.get(key); - if (setting == null) { - for (Map.Entry> entry : groupSettings.entrySet()) { - if (entry.getValue().match(key)) { - return entry.getValue(); - } - } - } else { - return setting; - } - return null; - } - - /** - * Returns true if the setting for the given key is dynamically updateable. Otherwise false. - */ - public boolean hasDynamicSetting(String key) { - final Setting setting = get(key); - return setting != null && setting.isDynamic(); + public ClusterSettings(Settings settings, Set> settingsSet) { + super(settings, settingsSet, Setting.Scope.Cluster); } /** @@ -98,20 +55,6 @@ public final class ClusterSettings { return key.startsWith("logger."); } - /** - * Returns a settings object that contains all clustersettings that are not - * already set in the given source. The diff contains either the default value for each - * setting or the settings value in the given default settings. - */ - public Settings diff(Settings source, Settings defaultSettings) { - Settings.Builder builder = Settings.builder(); - for (Setting setting : keySettings.values()) { - if (setting.exists(source) == false) { - builder.put(setting.getKey(), setting.getRaw(defaultSettings)); - } - } - return builder.build(); - } public static Set> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, @@ -171,5 +114,4 @@ public final class ClusterSettings { TransportService.TRACE_LOG_INCLUDE_SETTING, TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING))); - } diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettingsService.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettingsService.java deleted file mode 100644 index e66844d4e88..00000000000 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettingsService.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.settings; - -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLoggerFactory; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.function.BiConsumer; -import java.util.function.Consumer; -import java.util.function.Predicate; - -/** - * A service that allows to register for node settings change that can come from cluster - * events holding new settings. - */ -public final class ClusterSettingsService extends SettingsService { - private final ClusterSettings clusterSettings; - - @Inject - public ClusterSettingsService(Settings settings, ClusterSettings clusterSettings) { - super(settings); - this.clusterSettings = clusterSettings; - } - - protected Setting getSetting(String key) { - return this.clusterSettings.get(key); - } - - public ClusterSettings getClusterSettings() { - return clusterSettings; - } -} diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 1230936b6d0..fda10ea226b 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -149,11 +149,11 @@ public class Setting extends ToXContentToBytes { Index; } - SettingsService.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings) { + AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings) { return newUpdater(consumer, logger, settings, (s) -> true); } - SettingsService.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings, Predicate accept) { + AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings, Predicate accept) { if (isDynamic()) { return new Updater(consumer, logger, settings, accept); } else { @@ -161,12 +161,12 @@ public class Setting extends ToXContentToBytes { } } - static SettingsService.SettingUpdater compoundUpdater(final BiConsumer consumer, final Setting aSettting, final Setting bSetting, ESLogger logger, Settings settings) { + static AbstractScopedSettings.SettingUpdater compoundUpdater(final BiConsumer consumer, final Setting aSettting, final Setting bSetting, ESLogger logger, Settings settings) { final AtomicReference aRef = new AtomicReference<>(); final AtomicReference bRef = new AtomicReference<>(); - final SettingsService.SettingUpdater aSettingUpdater = aSettting.newUpdater(aRef::set, logger, settings); - final SettingsService.SettingUpdater bSettingUpdater = bSetting.newUpdater(bRef::set, logger, settings); - return new SettingsService.SettingUpdater() { + final AbstractScopedSettings.SettingUpdater aSettingUpdater = aSettting.newUpdater(aRef::set, logger, settings); + final AbstractScopedSettings.SettingUpdater bSettingUpdater = bSetting.newUpdater(bRef::set, logger, settings); + return new AbstractScopedSettings.SettingUpdater() { boolean aHasChanged = false; boolean bHasChanged = false; @Override @@ -204,7 +204,7 @@ public class Setting extends ToXContentToBytes { } - private class Updater implements SettingsService.SettingUpdater { + private class Updater implements AbstractScopedSettings.SettingUpdater { private final Consumer consumer; private final ESLogger logger; private final Predicate accept; @@ -325,12 +325,12 @@ public class Setting extends ToXContentToBytes { } @Override - public SettingsService.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings, Predicate accept) { + public AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings, Predicate accept) { if (isDynamic() == false) { throw new IllegalStateException("setting [" + getKey() + "] is not dynamic"); } final Setting setting = this; - return new SettingsService.SettingUpdater() { + return new AbstractScopedSettings.SettingUpdater() { private Settings pendingSettings; private Settings committedSettings = get(settings); diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java index 745c4760d54..26902a70e58 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java @@ -23,10 +23,10 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.rest.RestStatus; import java.util.EnumSet; @@ -62,12 +62,12 @@ public class DiscoverySettings extends AbstractComponent { private volatile boolean publishDiff; @Inject - public DiscoverySettings(Settings settings, ClusterSettingsService clusterSettingsService) { + public DiscoverySettings(Settings settings, ClusterSettings clusterSettings) { super(settings); - clusterSettingsService.addSettingsUpdateConsumer(NO_MASTER_BLOCK_SETTING, this::setNoMasterBlock); - clusterSettingsService.addSettingsUpdateConsumer(PUBLISH_DIFF_ENABLE_SETTING, this::setPublishDiff); - clusterSettingsService.addSettingsUpdateConsumer(COMMIT_TIMEOUT_SETTING, this::setCommitTimeout); - clusterSettingsService.addSettingsUpdateConsumer(PUBLISH_TIMEOUT_SETTING, this::setPublishTimeout); + clusterSettings.addSettingsUpdateConsumer(NO_MASTER_BLOCK_SETTING, this::setNoMasterBlock); + clusterSettings.addSettingsUpdateConsumer(PUBLISH_DIFF_ENABLE_SETTING, this::setPublishDiff); + clusterSettings.addSettingsUpdateConsumer(COMMIT_TIMEOUT_SETTING, this::setCommitTimeout); + clusterSettings.addSettingsUpdateConsumer(PUBLISH_TIMEOUT_SETTING, this::setPublishTimeout); this.noMasterBlock = NO_MASTER_BLOCK_SETTING.get(settings); this.publishTimeout = PUBLISH_TIMEOUT_SETTING.get(settings); this.commitTimeout = COMMIT_TIMEOUT_SETTING.get(settings); diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 7333618aef6..2c230d22599 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -56,7 +57,6 @@ import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; @@ -140,7 +140,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen @Inject public ZenDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, - TransportService transportService, final ClusterService clusterService, ClusterSettingsService clusterSettingsService, + TransportService transportService, final ClusterService clusterService, ClusterSettings clusterSettings, ZenPingService pingService, ElectMasterService electMasterService, DiscoverySettings discoverySettings) { super(settings); @@ -172,7 +172,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes); - clusterSettingsService.addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, this::handleMinimumMasterNodesChanged, (value) -> { + clusterSettings.addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, this::handleMinimumMasterNodesChanged, (value) -> { final ClusterState clusterState = clusterService.state(); int masterNodes = clusterState.nodes().masterNodes().size(); if (value > masterNodes) { @@ -180,7 +180,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } return true; }); - clusterSettingsService.addSettingsUpdateConsumer(REJOIN_ON_MASTER_GONE_SETTING, this::setRejoingOnMasterGone); + clusterSettings.addSettingsUpdateConsumer(REJOIN_ON_MASTER_GONE_SETTING, this::setRejoingOnMasterGone); this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterName, clusterService); this.masterFD.addListener(new MasterNodeFailureListener()); diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index ad98e3e9a8f..d8c142f478d 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -58,7 +59,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.plugins.PluginsService; import java.io.IOException; @@ -100,7 +100,7 @@ public class IndicesService extends AbstractLifecycleComponent i @Inject public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvironment nodeEnv, - ClusterSettingsService clusterSettingsService, AnalysisRegistry analysisRegistry, + ClusterSettings clusterSettings, AnalysisRegistry analysisRegistry, IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, MapperRegistry mapperRegistry) { super(settings); @@ -113,8 +113,8 @@ public class IndicesService extends AbstractLifecycleComponent i this.clusterService = clusterService; this.indexNameExpressionResolver = indexNameExpressionResolver; this.mapperRegistry = mapperRegistry; - clusterSettingsService.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, indexStoreConfig::setRateLimitingType); - clusterSettingsService.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, indexStoreConfig::setRateLimitingThrottle); + clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, indexStoreConfig::setRateLimitingType); + clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, indexStoreConfig::setRateLimitingThrottle); } diff --git a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 8e4886f4ac9..fc80495bb1b 100644 --- a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -25,10 +25,10 @@ import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.settings.ClusterSettingsService; import java.util.ArrayList; import java.util.List; @@ -66,7 +66,7 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { private final AtomicLong parentTripCount = new AtomicLong(0); @Inject - public HierarchyCircuitBreakerService(Settings settings, ClusterSettingsService clusterSettingsService) { + public HierarchyCircuitBreakerService(Settings settings, ClusterSettings clusterSettings) { super(settings); this.fielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(), @@ -88,9 +88,9 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { registerBreaker(this.requestSettings); registerBreaker(this.fielddataSettings); - clusterSettingsService.addSettingsUpdateConsumer(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, this::setTotalCircuitBreakerLimit, this::validateTotalCircuitBreakerLimit); - clusterSettingsService.addSettingsUpdateConsumer(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, this::setFieldDataBreakerLimit); - clusterSettingsService.addSettingsUpdateConsumer(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, this::setRequestBreakerLimit); + clusterSettings.addSettingsUpdateConsumer(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, this::setTotalCircuitBreakerLimit, this::validateTotalCircuitBreakerLimit); + clusterSettings.addSettingsUpdateConsumer(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, this::setFieldDataBreakerLimit); + clusterSettings.addSettingsUpdateConsumer(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, this::setRequestBreakerLimit); } private void setRequestBreakerLimit(ByteSizeValue newRequestMax, Double newRequestOverhead) { long newRequestLimitBytes = newRequestMax == null ? HierarchyCircuitBreakerService.this.requestSettings.getLimit() : newRequestMax.bytes(); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index eb1c643038f..0e72dedb3ad 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -23,13 +23,13 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; @@ -95,7 +95,7 @@ public class RecoverySettings extends AbstractComponent implements Closeable { @Inject - public RecoverySettings(Settings settings, ClusterSettingsService clusterSettingsService) { + public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { super(settings); this.fileChunkSize = INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING.get(settings); this.translogOps = INDICES_RECOVERY_TRANSLOG_OPS_SETTING.get(settings); @@ -130,18 +130,18 @@ public class RecoverySettings extends AbstractComponent implements Closeable { logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}], file_chunk_size [{}], translog_size [{}], translog_ops [{}], and compress [{}]", maxBytesPerSec, concurrentStreams, fileChunkSize, translogSize, translogOps, compress); - clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING, this::setFileChunkSize); - clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_TRANSLOG_OPS_SETTING, this::setTranslogOps); - clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_TRANSLOG_SIZE_SETTING, this::setTranslogSize); - clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_COMPRESS_SETTING, this::setCompress); - clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING, this::setConcurrentStreams); - clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING, this::setConcurrentSmallFileStreams); - clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec); - clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, this::setRetryDelayStateSync); - clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, this::setRetryDelayNetwork); - clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, this::setInternalActionTimeout); - clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, this::setInternalActionLongTimeout); - clusterSettingsService.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING, this::setFileChunkSize); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_TRANSLOG_OPS_SETTING, this::setTranslogOps); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_TRANSLOG_SIZE_SETTING, this::setTranslogSize); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_COMPRESS_SETTING, this::setCompress); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING, this::setConcurrentStreams); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING, this::setConcurrentSmallFileStreams); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, this::setRetryDelayStateSync); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, this::setRetryDelayNetwork); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, this::setInternalActionTimeout); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, this::setInternalActionLongTimeout); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout); } @Override diff --git a/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java b/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java index 47d18105d0f..454dc460274 100644 --- a/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java +++ b/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java @@ -36,6 +36,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -50,7 +51,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.common.settings.ClusterSettingsService; import java.io.IOException; import java.util.ArrayList; @@ -78,7 +78,7 @@ public class IndicesTTLService extends AbstractLifecycleComponent imp private final ParseFieldMatcher parseFieldMatcher; @Inject - public SearchService(Settings settings, ClusterSettingsService clusterSettingsService, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, ThreadPool threadPool, + public SearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) { super(settings); @@ -187,7 +187,7 @@ public class SearchService extends AbstractLifecycleComponent imp this.indicesWarmer.addListener(new SearchWarmer()); defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings); - clusterSettingsService.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout); + clusterSettings.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout); } private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) { diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index c4049573b9b..14b2680d25c 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -49,7 +49,6 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRepository; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.threadpool.ThreadPool; @@ -125,12 +124,12 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis private final CopyOnWriteArrayList> listeners = new CopyOnWriteArrayList<>(); private final BlockingQueue updatedSnapshotStateQueue = ConcurrentCollections.newBlockingQueue(); - private final ClusterSettingsService clusterSettingsService; + private final ClusterSettings clusterSettings; @Inject public RestoreService(Settings settings, ClusterService clusterService, RepositoriesService repositoriesService, TransportService transportService, AllocationService allocationService, MetaDataCreateIndexService createIndexService, ClusterSettings dynamicSettings, - MetaDataIndexUpgradeService metaDataIndexUpgradeService, ClusterSettingsService clusterSettingsService) { + MetaDataIndexUpgradeService metaDataIndexUpgradeService, ClusterSettings clusterSettings) { super(settings); this.clusterService = clusterService; this.repositoriesService = repositoriesService; @@ -141,7 +140,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis this.metaDataIndexUpgradeService = metaDataIndexUpgradeService; transportService.registerRequestHandler(UPDATE_RESTORE_ACTION_NAME, UpdateIndexShardRestoreStatusRequest::new, ThreadPool.Names.SAME, new UpdateRestoreStateRequestHandler()); clusterService.add(this); - this.clusterSettingsService = clusterSettingsService; + this.clusterSettings = clusterSettings; } /** @@ -392,7 +391,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis if (request.includeGlobalState()) { if (metaData.persistentSettings() != null) { Settings settings = metaData.persistentSettings(); - clusterSettingsService.dryRun(settings); + clusterSettings.dryRun(settings); mdBuilder.persistentSettings(settings); } if (metaData.templates() != null) { diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index f0fc5d86b00..862b048e2ec 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; @@ -37,7 +38,6 @@ import org.elasticsearch.common.util.concurrent.XRejectedExecutionHandler; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; -import org.elasticsearch.common.settings.ClusterSettingsService; import java.io.IOException; import java.util.*; @@ -250,9 +250,9 @@ public class ThreadPool extends AbstractComponent { this.estimatedTimeThread.start(); } - public void setNodeSettingsService(ClusterSettingsService clusterSettingsService) { + public void setNodeSettingsService(ClusterSettings clusterSettings) { if(settingsListenerIsSet.compareAndSet(false, true)) { - clusterSettingsService.addSettingsUpdateConsumer(THREADPOOL_GROUP_SETTING, this::updateSettings, (s) -> {validate(s.getAsGroups()); return true;}); + clusterSettings.addSettingsUpdateConsumer(THREADPOOL_GROUP_SETTING, this::updateSettings, (s) -> {validate(s.getAsGroups()); return true;}); } else { throw new IllegalStateException("the node settings listener was set more then once"); } diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index d0d5b5e230b..82802e99dfa 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; @@ -38,7 +39,6 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -132,9 +132,9 @@ public class TransportService extends AbstractLifecycleComponent index = new AtomicReference<>(); AtomicReference shard = new AtomicReference<>(); ClusterState.Builder builder = ClusterState.builder(new ClusterName("foo")); - ClusterSettingsService settingsService = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + ClusterSettings settingsService = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, index::set); settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, shard::set); SettingsUpdater updater = new SettingsUpdater(settingsService); @@ -75,7 +73,7 @@ public class SettingsUpdaterTests extends ESTestCase { public void testAllOrNothing() { ClusterState.Builder builder = ClusterState.builder(new ClusterName("foo")); - ClusterSettingsService settingsService = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + ClusterSettings settingsService = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); AtomicReference index = new AtomicReference<>(); AtomicReference shard = new AtomicReference<>(); settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, index::set); @@ -101,7 +99,7 @@ public class SettingsUpdaterTests extends ESTestCase { public void testClusterBlock() { ClusterState.Builder builder = ClusterState.builder(new ClusterName("foo")); - ClusterSettingsService settingsService = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + ClusterSettings settingsService = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); AtomicReference index = new AtomicReference<>(); AtomicReference shard = new AtomicReference<>(); settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, index::set); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 69fb6cb1a61..e9627b10068 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -31,11 +31,10 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllo import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.inject.ModuleTestCase; -import org.elasticsearch.common.settings.ClusterSettingsService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.settings.IndexDynamicSettings; @@ -86,7 +85,7 @@ public class ClusterModuleTests extends ModuleTestCase { public void testRegisterClusterDynamicSetting() { ClusterModule module = new ClusterModule(Settings.EMPTY); module.registerSetting(Setting.boolSetting("foo.bar", false, true, Setting.Scope.Cluster)); - assertInstanceBinding(module, ClusterSettingsService.class, service -> service.getClusterSettings().hasDynamicSetting("foo.bar")); + assertInstanceBinding(module, ClusterSettings.class, service -> service.hasDynamicSetting("foo.bar")); } public void testRegisterIndexDynamicSettingDuplicate() { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index bc4ef8235d4..e622036e13b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -41,7 +41,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESAllocationTestCase; import org.elasticsearch.test.gateway.NoopGatewayAllocator; import org.hamcrest.Matchers; @@ -283,7 +282,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.2); settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 0.3); settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), 2.0); - ClusterSettingsService service = new ClusterSettingsService(settingsBuilder().build(), new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + ClusterSettings service = new ClusterSettings(settingsBuilder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); BalancedShardsAllocator allocator = new BalancedShardsAllocator(settings.build(), service); assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f)); assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f)); @@ -312,7 +311,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { public void testNoRebalanceOnPrimaryOverload() { Settings.Builder settings = settingsBuilder(); AllocationService strategy = new AllocationService(settings.build(), randomAllocationDeciders(settings.build(), - new ClusterSettingsService(Settings.Builder.EMPTY_SETTINGS, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), getRandom()), new ShardsAllocators(settings.build(), + new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), getRandom()), new ShardsAllocators(settings.build(), NoopGatewayAllocator.INSTANCE, new ShardsAllocator() { @Override diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 279687a004f..52e88ea3bc9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; @@ -46,7 +45,7 @@ import static org.hamcrest.CoreMatchers.equalTo; */ public class DiskThresholdDeciderUnitTests extends ESTestCase { public void testDynamicSettings() { - ClusterSettingsService nss = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterInfoService cis = EmptyClusterInfoService.INSTANCE; DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); @@ -84,7 +83,7 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { } public void testCanAllocateUsesMaxAvailableSpace() { - ClusterSettingsService nss = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterInfoService cis = EmptyClusterInfoService.INSTANCE; DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); @@ -125,7 +124,7 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { } public void testCanRemainUsesLeastAvailableSpace() { - ClusterSettingsService nss = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterInfoService cis = EmptyClusterInfoService.INSTANCE; DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); ImmutableOpenMap.Builder shardRoutingMap = ImmutableOpenMap.builder(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java index b0a49a93936..1bdc39036a3 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESAllocationTestCase; import java.util.EnumSet; @@ -162,8 +161,8 @@ public class EnableAllocationTests extends ESAllocationTestCase { .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 3) .build(); - ClusterSettingsService clusterSettingsService = new ClusterSettingsService(build, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - AllocationService strategy = createAllocationService(build, clusterSettingsService, getRandom()); + ClusterSettings clusterSettings = new ClusterSettings(build, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AllocationService strategy = createAllocationService(build, clusterSettings, getRandom()); Settings indexSettings = useClusterSetting ? Settings.EMPTY : settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE).build(); logger.info("Building initial routing table"); @@ -224,7 +223,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { .build(); } - clusterSettingsService.applySettings(clusterState.metaData().settings()); + clusterSettings.applySettings(clusterState.metaData().settings()); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat("expected 6 shards to be started 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(6)); @@ -264,8 +263,8 @@ public class EnableAllocationTests extends ESAllocationTestCase { .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 3) .build(); - ClusterSettingsService clusterSettingsService = new ClusterSettingsService(build, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - AllocationService strategy = createAllocationService(build, clusterSettingsService, getRandom()); + ClusterSettings clusterSettings = new ClusterSettings(build, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AllocationService strategy = createAllocationService(build, clusterSettings, getRandom()); Settings indexSettings = useClusterSetting ? Settings.EMPTY : settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE).build(); logger.info("Building initial routing table"); @@ -315,7 +314,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).removeAllIndices() .put(IndexMetaData.builder(meta).settings(settingsBuilder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL).build()))).build(); } - clusterSettingsService.applySettings(clusterState.metaData().settings()); + clusterSettings.applySettings(clusterState.metaData().settings()); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat("expected 4 primaries to be started and 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); diff --git a/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java b/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java index c484df7d99d..bb9d23db1cb 100644 --- a/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java +++ b/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import java.util.concurrent.atomic.AtomicBoolean; @@ -88,7 +87,7 @@ public class MemoryCircuitBreakerTests extends ESTestCase { final AtomicReference lastException = new AtomicReference<>(null); final AtomicReference breakerRef = new AtomicReference<>(null); - final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) { @Override public CircuitBreaker getBreaker(String name) { @@ -148,7 +147,7 @@ public class MemoryCircuitBreakerTests extends ESTestCase { final AtomicInteger parentTripped = new AtomicInteger(0); final AtomicReference breakerRef = new AtomicReference<>(null); - final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) { @Override public CircuitBreaker getBreaker(String name) { diff --git a/core/src/test/java/org/elasticsearch/common/settings/ClusterSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ClusterSettingsTests.java deleted file mode 100644 index 28a639e8903..00000000000 --- a/core/src/test/java/org/elasticsearch/common/settings/ClusterSettingsTests.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.common.settings; - -import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; -import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashSet; - -/** - */ -public class ClusterSettingsTests extends ESTestCase { - - public void testGet() { - ClusterSettings settings = new ClusterSettings(); - Setting setting = settings.get("cluster.routing.allocation.require.value"); - assertEquals(setting, FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING); - - setting = settings.get("cluster.routing.allocation.total_shards_per_node"); - assertEquals(setting, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING); - } - - public void testIsDynamic(){ - ClusterSettings settings = new ClusterSettings(new HashSet<>(Arrays.asList(Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster), Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.Cluster)))); - assertFalse(settings.hasDynamicSetting("foo.bar.baz")); - assertTrue(settings.hasDynamicSetting("foo.bar")); - assertNotNull(settings.get("foo.bar.baz")); - } - - public void testDiff() throws IOException { - Setting foobarbaz = Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.Cluster); - Setting foobar = Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster); - ClusterSettings settings = new ClusterSettings(new HashSet<>(Arrays.asList(foobar, foobarbaz))); - Settings diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.EMPTY); - assertEquals(diff.getAsMap().size(), 1); - assertEquals(diff.getAsInt("foo.bar.baz", null), Integer.valueOf(1)); - - diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.builder().put("foo.bar.baz", 17).build()); - assertEquals(diff.getAsMap().size(), 1); - assertEquals(diff.getAsInt("foo.bar.baz", null), Integer.valueOf(17)); - } -} diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsServiceTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java similarity index 65% rename from core/src/test/java/org/elasticsearch/common/settings/SettingsServiceTests.java rename to core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index ae6fd79aa58..c837bbb7d98 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -18,23 +18,22 @@ */ package org.elasticsearch.common.settings; +import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; import java.util.concurrent.atomic.AtomicInteger; -public class SettingsServiceTests extends ESTestCase { +public class ScopedSettingsTests extends ESTestCase { public void testAddConsumer() { Setting testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster); Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.Cluster); - SettingsService service = new SettingsService(Settings.EMPTY) { - @Override - protected Setting getSetting(String key) { - if (key.equals(testSetting.getKey())) { - return testSetting; - } - return null; - } - }; + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY, Collections.singleton(testSetting)); AtomicInteger consumer = new AtomicInteger(); service.addSettingsUpdateConsumer(testSetting, consumer::set); @@ -46,7 +45,6 @@ public class SettingsServiceTests extends ESTestCase { assertEquals("Setting is not registered for key [foo.bar.baz]", ex.getMessage()); } - try { service.addSettingsUpdateConsumer(testSetting, testSetting2, (a, b) -> {consumer.set(a); consumer2.set(b);}); fail("setting not registered"); @@ -63,17 +61,7 @@ public class SettingsServiceTests extends ESTestCase { public void testApply() { Setting testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster); Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.Cluster); - SettingsService service = new SettingsService(Settings.EMPTY) { - @Override - protected Setting getSetting(String key) { - if (key.equals(testSetting.getKey())) { - return testSetting; - } else if (key.equals(testSetting2.getKey())) { - return testSetting2; - } - return null; - } - }; + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(testSetting, testSetting2))); AtomicInteger consumer = new AtomicInteger(); service.addSettingsUpdateConsumer(testSetting, consumer::set); @@ -121,4 +109,33 @@ public class SettingsServiceTests extends ESTestCase { assertEquals(2, aC.get()); assertEquals(15, bC.get()); } + + public void testGet() { + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + Setting setting = settings.get("cluster.routing.allocation.require.value"); + assertEquals(setting, FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING); + + setting = settings.get("cluster.routing.allocation.total_shards_per_node"); + assertEquals(setting, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING); + } + + public void testIsDynamic(){ + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster), Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.Cluster)))); + assertFalse(settings.hasDynamicSetting("foo.bar.baz")); + assertTrue(settings.hasDynamicSetting("foo.bar")); + assertNotNull(settings.get("foo.bar.baz")); + } + + public void testDiff() throws IOException { + Setting foobarbaz = Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.Cluster); + Setting foobar = Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster); + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(foobar, foobarbaz))); + Settings diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.EMPTY); + assertEquals(diff.getAsMap().size(), 1); + assertEquals(diff.getAsInt("foo.bar.baz", null), Integer.valueOf(1)); + + diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.builder().put("foo.bar.baz", 17).build()); + assertEquals(diff.getAsMap().size(), 1); + assertEquals(diff.getAsInt("foo.bar.baz", null), Integer.valueOf(17)); + } } diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 5787ae59f56..a61d91c8f25 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -42,7 +42,7 @@ public class SettingTests extends ESTestCase { ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); assertEquals(byteSizeValue.bytes(), 1024); AtomicReference value = new AtomicReference<>(null); - ClusterSettingsService.SettingUpdater settingUpdater = byteSizeValueSetting.newUpdater(value::set, logger, Settings.EMPTY); + ClusterSettings.SettingUpdater settingUpdater = byteSizeValueSetting.newUpdater(value::set, logger, Settings.EMPTY); try { settingUpdater.prepareApply(Settings.builder().put("a.byte.size", 12).build()); fail("no unit"); @@ -58,7 +58,7 @@ public class SettingTests extends ESTestCase { public void testSimpleUpdate() { Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.Cluster); AtomicReference atomicBoolean = new AtomicReference<>(null); - ClusterSettingsService.SettingUpdater settingUpdater = booleanSetting.newUpdater(atomicBoolean::set, logger, Settings.EMPTY); + ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(atomicBoolean::set, logger, Settings.EMPTY); Settings build = Settings.builder().put("foo.bar", false).build(); settingUpdater.prepareApply(build); assertNull(atomicBoolean.get()); @@ -96,7 +96,7 @@ public class SettingTests extends ESTestCase { Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.Cluster); AtomicReference ab1 = new AtomicReference<>(null); AtomicReference ab2 = new AtomicReference<>(null); - ClusterSettingsService.SettingUpdater settingUpdater = booleanSetting.newUpdater(ab1::set, logger, Settings.EMPTY); + ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(ab1::set, logger, Settings.EMPTY); settingUpdater.prepareApply(Settings.builder().put("foo.bar", true).build()); assertNull(ab1.get()); assertNull(ab2.get()); @@ -124,7 +124,7 @@ public class SettingTests extends ESTestCase { assertFalse(setting.isGroupSetting()); ref.set(setting.get(Settings.EMPTY)); ComplexType type = ref.get(); - ClusterSettingsService.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY); + ClusterSettings.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY); assertFalse(settingUpdater.prepareApply(Settings.EMPTY)); settingUpdater.apply(); assertSame("no update - type has not changed", type, ref.get()); @@ -147,7 +147,7 @@ public class SettingTests extends ESTestCase { Setting integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.Cluster); assertFalse(integerSetting.isGroupSetting()); AtomicReference ref = new AtomicReference<>(null); - ClusterSettingsService.SettingUpdater settingUpdater = integerSetting.newUpdater(ref::set, logger, Settings.EMPTY); + ClusterSettings.SettingUpdater settingUpdater = integerSetting.newUpdater(ref::set, logger, Settings.EMPTY); assertNull(ref.get()); assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.int.bar", "2").build())); settingUpdater.rollback(); @@ -169,7 +169,7 @@ public class SettingTests extends ESTestCase { AtomicReference ref = new AtomicReference<>(null); Setting setting = Setting.groupSetting("foo.bar.", true, Setting.Scope.Cluster); assertTrue(setting.isGroupSetting()); - ClusterSettingsService.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY); + ClusterSettings.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY); assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").put("foo.bar.3.value", "3").build())); settingUpdater.apply(); @@ -209,7 +209,7 @@ public class SettingTests extends ESTestCase { assertTrue(setting.match("foo.bar.baz")); assertFalse(setting.match("foo.baz.bar")); - ClusterSettingsService.SettingUpdater predicateSettingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY, (s) -> false); + ClusterSettings.SettingUpdater predicateSettingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY, (s) -> false); try { predicateSettingUpdater.prepareApply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").build()); fail("not accepted"); @@ -243,7 +243,7 @@ public class SettingTests extends ESTestCase { Composite c = new Composite(); Setting a = Setting.intSetting("foo.int.bar.a", 1, true, Setting.Scope.Cluster); Setting b = Setting.intSetting("foo.int.bar.b", 1, true, Setting.Scope.Cluster); - ClusterSettingsService.SettingUpdater settingUpdater = Setting.compoundUpdater(c::set, a, b, logger, Settings.EMPTY); + ClusterSettings.SettingUpdater settingUpdater = Setting.compoundUpdater(c::set, a, b, logger, Settings.EMPTY); assertFalse(settingUpdater.prepareApply(Settings.EMPTY)); settingUpdater.apply(); assertNull(c.a); diff --git a/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index f3783fa6d12..184de7f385e 100644 --- a/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESSingleNodeTestCase; import org.junit.Before; @@ -339,7 +338,7 @@ public class BigArraysTests extends ESSingleNodeTestCase { Settings.builder() .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), size - 1, ByteSizeUnit.BYTES) .build(), - new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); BigArrays bigArrays = new BigArrays(null, hcbs).withCircuitBreaking(); Method create = BigArrays.class.getMethod("new" + type + "Array", long.class); try { @@ -359,7 +358,7 @@ public class BigArraysTests extends ESSingleNodeTestCase { Settings.builder() .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), maxSize, ByteSizeUnit.BYTES) .build(), - new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); BigArrays bigArrays = new BigArrays(null, hcbs).withCircuitBreaking(); Method create = BigArrays.class.getMethod("new" + type + "Array", long.class); final int size = scaledRandomIntBetween(1, 20); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index 2dac067fed3..1f6f0bdc0e3 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -39,7 +39,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.BaseFuture; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.membership.MembershipAction; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -68,7 +67,7 @@ public class NodeJoinControllerTests extends ESTestCase { // make sure we have a master clusterService.setState(ClusterState.builder(clusterService.state()).nodes(DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.id()))); nodeJoinController = new NodeJoinController(clusterService, new NoopRoutingService(Settings.EMPTY), - new DiscoverySettings(Settings.EMPTY, new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))), Settings.EMPTY); + new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), Settings.EMPTY); } public void testSimpleJoinAccumulation() throws InterruptedException, ExecutionException { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java index 3cb5316cacc..e5b3337683b 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java @@ -43,7 +43,6 @@ import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; @@ -238,7 +237,7 @@ public class PublishClusterStateActionTests extends ESTestCase { protected MockPublishAction buildPublishClusterStateAction(Settings settings, MockTransportService transportService, DiscoveryNodesProvider nodesProvider, PublishClusterStateAction.NewPendingClusterStateListener listener) { - DiscoverySettings discoverySettings = new DiscoverySettings(settings, new ClusterSettingsService(settings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); + DiscoverySettings discoverySettings = new DiscoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); return new MockPublishAction(settings, transportService, nodesProvider, listener, discoverySettings, ClusterName.DEFAULT); } diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java index 38da1ac135c..212d7ecbb7b 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.equalTo; @@ -67,7 +66,7 @@ public class CircuitBreakerUnitTests extends ESTestCase { } public void testRegisterCustomBreaker() throws Exception { - CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); + CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); String customName = "custom"; BreakerSettings settings = new BreakerSettings(customName, 20, 1.0); service.registerBreaker(settings); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index ca70fc1fe2f..75a2b14060b 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.CorruptionUtils; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; @@ -61,7 +60,7 @@ import java.util.concurrent.atomic.AtomicBoolean; public class RecoverySourceHandlerTests extends ESTestCase { private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings(new Index("index"), Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).build()); private final ShardId shardId = new ShardId(INDEX_SETTINGS.getIndex(), 1); - private final ClusterSettingsService service = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + private final ClusterSettings service = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); public void testSendFiles() throws Throwable { Settings settings = Settings.builder().put("indices.recovery.concurrent_streams", 1). diff --git a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java index 47d82a36088..ddaa53fce7b 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.threadpool; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool.Names; @@ -92,10 +91,10 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { ThreadPool threadPool = null; try { threadPool = new ThreadPool(settingsBuilder().put("name", "testUpdateSettingsCanNotChangeThreadPoolType").build()); - ClusterSettingsService clusterSettingsService = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - threadPool.setNodeSettingsService(clusterSettingsService); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setNodeSettingsService(clusterSettings); - clusterSettingsService.applySettings( + clusterSettings.applySettings( settingsBuilder() .put("threadpool." + threadPoolName + ".type", invalidThreadPoolType.getType()) .build() @@ -117,13 +116,13 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { Settings nodeSettings = Settings.settingsBuilder() .put("name", "testCachedExecutorType").build(); threadPool = new ThreadPool(nodeSettings); - ClusterSettingsService clusterSettingsService = new ClusterSettingsService(nodeSettings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - threadPool.setNodeSettingsService(clusterSettingsService); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setNodeSettingsService(clusterSettings); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED); assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); - Settings settings = clusterSettingsService.applySettings(settingsBuilder() + Settings settings = clusterSettings.applySettings(settingsBuilder() .put("threadpool." + threadPoolName + ".keep_alive", "10m") .build()); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED); @@ -139,7 +138,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { // Change keep alive Executor oldExecutor = threadPool.executor(threadPoolName); - settings = clusterSettingsService.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); + settings = clusterSettings.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); // Make sure keep alive value changed assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(1L)); assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L)); @@ -148,7 +147,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor)); // Set the same keep alive - settings = clusterSettingsService.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); + settings = clusterSettings.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); // Make sure keep alive value didn't change assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(1L)); assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L)); @@ -168,10 +167,10 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { Settings nodeSettings = Settings.settingsBuilder() .put("name", "testFixedExecutorType").build(); threadPool = new ThreadPool(nodeSettings); - ClusterSettingsService clusterSettingsService = new ClusterSettingsService(nodeSettings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - threadPool.setNodeSettingsService(clusterSettingsService); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setNodeSettingsService(clusterSettings); assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); - Settings settings = clusterSettingsService.applySettings(settingsBuilder() + Settings settings = clusterSettings.applySettings(settingsBuilder() .put("threadpool." + threadPoolName + ".size", "15") .build()); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); @@ -184,7 +183,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(0L)); // Put old type back - settings = clusterSettingsService.applySettings(Settings.EMPTY); + settings = clusterSettings.applySettings(Settings.EMPTY); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); // Make sure keep alive value is not used assertThat(info(threadPool, threadPoolName).getKeepAlive(), nullValue()); @@ -197,7 +196,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { // Change size Executor oldExecutor = threadPool.executor(threadPoolName); - settings = clusterSettingsService.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".size", "10").build()); + settings = clusterSettings.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".size", "10").build()); // Make sure size values changed assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10)); assertThat(info(threadPool, threadPoolName).getMin(), equalTo(10)); @@ -208,7 +207,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor)); // Change queue capacity - settings = clusterSettingsService.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".queue", "500") + settings = clusterSettings.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".queue", "500") .build()); } finally { terminateThreadPoolIfNeeded(threadPool); @@ -223,8 +222,8 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { .put("threadpool." + threadPoolName + ".size", 10) .put("name", "testScalingExecutorType").build(); threadPool = new ThreadPool(nodeSettings); - ClusterSettingsService clusterSettingsService = new ClusterSettingsService(nodeSettings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - threadPool.setNodeSettingsService(clusterSettingsService); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setNodeSettingsService(clusterSettings); assertThat(info(threadPool, threadPoolName).getMin(), equalTo(1)); assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10)); assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(5L)); @@ -233,7 +232,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { // Change settings that doesn't require pool replacement Executor oldExecutor = threadPool.executor(threadPoolName); - clusterSettingsService.applySettings(settingsBuilder() + clusterSettings.applySettings(settingsBuilder() .put("threadpool." + threadPoolName + ".keep_alive", "10m") .put("threadpool." + threadPoolName + ".min", "2") .put("threadpool." + threadPoolName + ".size", "15") @@ -261,8 +260,8 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { .put("threadpool." + threadPoolName + ".queue_size", 1000) .put("name", "testCachedExecutorType").build(); threadPool = new ThreadPool(nodeSettings); - ClusterSettingsService clusterSettingsService = new ClusterSettingsService(nodeSettings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - threadPool.setNodeSettingsService(clusterSettingsService); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setNodeSettingsService(clusterSettings); assertEquals(info(threadPool, threadPoolName).getQueueSize().getSingles(), 1000L); final CountDownLatch latch = new CountDownLatch(1); @@ -276,7 +275,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { } } ); - clusterSettingsService.applySettings(settingsBuilder().put("threadpool." + threadPoolName + ".queue_size", 2000).build()); + clusterSettings.applySettings(settingsBuilder().put("threadpool." + threadPoolName + ".queue_size", 2000).build()); assertThat(threadPool.executor(threadPoolName), not(sameInstance(oldExecutor))); assertThat(oldExecutor.isShutdown(), equalTo(true)); assertThat(oldExecutor.isTerminating(), equalTo(true)); @@ -298,8 +297,8 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { .put("threadpool.my_pool2.queue_size", "1") .put("name", "testCustomThreadPool").build(); threadPool = new ThreadPool(nodeSettings); - ClusterSettingsService clusterSettingsService = new ClusterSettingsService(nodeSettings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - threadPool.setNodeSettingsService(clusterSettingsService); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setNodeSettingsService(clusterSettings); ThreadPoolInfo groups = threadPool.info(); boolean foundPool1 = false; boolean foundPool2 = false; @@ -331,7 +330,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { Settings settings = Settings.builder() .put("threadpool.my_pool2.size", "10") .build(); - clusterSettingsService.applySettings(settings); + clusterSettings.applySettings(settings); groups = threadPool.info(); foundPool1 = false; diff --git a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index f7a9c221f7c..6599412834d 100644 --- a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; @@ -652,7 +651,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { includeSettings = "test"; excludeSettings = "DOESN'T_MATCH"; } - ClusterSettingsService service = new ClusterSettingsService(Settings.EMPTY, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + ClusterSettings service = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); serviceA.setDynamicSettings(service); service.applySettings(Settings.builder() .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), includeSettings, TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), excludeSettings) diff --git a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java index ca3e4a21604..02fffda722a 100644 --- a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java +++ b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.netty.NettyTransport; @@ -65,7 +64,7 @@ public class NettySizeHeaderFrameDecoderTests extends ESTestCase { @Before public void startThreadPool() { threadPool = new ThreadPool(settings); - threadPool.setNodeSettingsService(new ClusterSettingsService(settings, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); + threadPool.setNodeSettingsService(new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); NetworkService networkService = new NetworkService(settings); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService()); nettyTransport = new NettyTransport(settings, threadPool, networkService, bigArrays, Version.CURRENT, new NamedWriteableRegistry()); diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java index 59b57fca7de..15e49d7a60d 100755 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java +++ b/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -27,7 +27,6 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.ZenPingService; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java index e0d15a584bd..3fd3f1948e8 100755 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java @@ -27,7 +27,6 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.ZenPingService; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java index e004230b735..c1a00979fcd 100755 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java @@ -27,7 +27,6 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.ZenPingService; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/test-framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test-framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java index b105d269163..3e9b0c09cb2 100644 --- a/test-framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java +++ b/test-framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java @@ -29,11 +29,11 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.monitor.fs.FsInfo; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.threadpool.ThreadPool; @@ -78,11 +78,11 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService { } @Inject - public MockInternalClusterInfoService(Settings settings, ClusterSettingsService clusterSettingsService, + public MockInternalClusterInfoService(Settings settings, ClusterSettings clusterSettings, TransportNodesStatsAction transportNodesStatsAction, TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService, ThreadPool threadPool) { - super(settings, clusterSettingsService, transportNodesStatsAction, transportIndicesStatsAction, clusterService, threadPool); + super(settings, clusterSettings, transportNodesStatsAction, transportIndicesStatsAction, clusterService, threadPool); this.clusterName = ClusterName.clusterNameFromSettings(settings); stats[0] = makeStats("node_t1", new DiskUsage("node_t1", "n1", "/dev/null", 100, 100)); stats[1] = makeStats("node_t2", new DiskUsage("node_t2", "n2", "/dev/null", 100, 100)); diff --git a/test-framework/src/main/java/org/elasticsearch/search/MockSearchService.java b/test-framework/src/main/java/org/elasticsearch/search/MockSearchService.java index 09a0ab9b6d3..98b5181636d 100644 --- a/test-framework/src/main/java/org/elasticsearch/search/MockSearchService.java +++ b/test-framework/src/main/java/org/elasticsearch/search/MockSearchService.java @@ -22,12 +22,12 @@ package org.elasticsearch.search; import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.cache.request.IndicesRequestCache; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.dfs.DfsPhase; @@ -67,13 +67,13 @@ public class MockSearchService extends SearchService { } @Inject - public MockSearchService(Settings settings, ClusterSettingsService clusterSettingsService, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, + public MockSearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) { - super(settings, clusterSettingsService, clusterService, indicesService, indicesWarmer, threadPool, scriptService, pageCacheRecycler, bigArrays, dfsPhase, + super(settings, clusterSettings, clusterService, indicesService, indicesWarmer, threadPool, scriptService, pageCacheRecycler, bigArrays, dfsPhase, queryPhase, fetchPhase, indicesQueryCache); } - + @Override protected void putContext(SearchContext context) { super.putContext(context); diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java index e4d3a3c42a5..091ff23cb5a 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java @@ -44,7 +44,6 @@ import org.elasticsearch.gateway.AsyncShardFetch; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.ReplicaShardAllocator; import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; -import org.elasticsearch.common.settings.ClusterSettingsService; import org.elasticsearch.test.gateway.NoopGatewayAllocator; import java.lang.reflect.Constructor; @@ -68,37 +67,37 @@ public abstract class ESAllocationTestCase extends ESTestCase { } public static MockAllocationService createAllocationService(Settings settings, Random random) { - return createAllocationService(settings, new ClusterSettingsService(Settings.Builder.EMPTY_SETTINGS, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), random); + return createAllocationService(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), random); } - public static MockAllocationService createAllocationService(Settings settings, ClusterSettingsService clusterSettingsService, Random random) { + public static MockAllocationService createAllocationService(Settings settings, ClusterSettings clusterSettings, Random random) { return new MockAllocationService(settings, - randomAllocationDeciders(settings, clusterSettingsService, random), + randomAllocationDeciders(settings, clusterSettings, random), new ShardsAllocators(settings, NoopGatewayAllocator.INSTANCE), EmptyClusterInfoService.INSTANCE); } public static MockAllocationService createAllocationService(Settings settings, ClusterInfoService clusterInfoService) { return new MockAllocationService(settings, - randomAllocationDeciders(settings, new ClusterSettingsService(Settings.Builder.EMPTY_SETTINGS, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), getRandom()), + randomAllocationDeciders(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), getRandom()), new ShardsAllocators(settings, NoopGatewayAllocator.INSTANCE), clusterInfoService); } public static MockAllocationService createAllocationService(Settings settings, GatewayAllocator allocator) { return new MockAllocationService(settings, - randomAllocationDeciders(settings, new ClusterSettingsService(Settings.Builder.EMPTY_SETTINGS, new ClusterSettings(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), getRandom()), + randomAllocationDeciders(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), getRandom()), new ShardsAllocators(settings, allocator), EmptyClusterInfoService.INSTANCE); } - public static AllocationDeciders randomAllocationDeciders(Settings settings, ClusterSettingsService clusterSettingsService, Random random) { + public static AllocationDeciders randomAllocationDeciders(Settings settings, ClusterSettings clusterSettings, Random random) { final List> defaultAllocationDeciders = ClusterModule.DEFAULT_ALLOCATION_DECIDERS; final List list = new ArrayList<>(); for (Class deciderClass : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) { try { try { - Constructor constructor = deciderClass.getConstructor(Settings.class, ClusterSettingsService.class); - list.add(constructor.newInstance(settings, clusterSettingsService)); + Constructor constructor = deciderClass.getConstructor(Settings.class, ClusterSettings.class); + list.add(constructor.newInstance(settings, clusterSettings)); } catch (NoSuchMethodException e) { Constructor constructor = null; constructor = deciderClass.getConstructor(Settings.class); From d4e7bd2cc3ef7f5cc3adc7c9e395551720e3de26 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Dec 2015 10:14:33 +0100 Subject: [PATCH 010/167] predicate -> consumer --- .../settings/AbstractScopedSettings.java | 8 ++++--- .../common/settings/Setting.java | 22 +++++++++++-------- .../discovery/zen/ZenDiscovery.java | 1 - .../elasticsearch/threadpool/ThreadPool.java | 2 +- .../common/settings/ScopedSettingsTests.java | 2 +- .../common/settings/SettingTests.java | 8 +------ 6 files changed, 21 insertions(+), 22 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 66c966dde88..e5dedbba7f1 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -155,7 +155,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { * Note: Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically. *

*/ - public synchronized void addSettingsUpdateConsumer(Setting setting, Consumer consumer, Predicate predicate) { + public synchronized void addSettingsUpdateConsumer(Setting setting, Consumer consumer, Consumer predicate) { if (setting != get(setting.getKey())) { throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]"); } @@ -167,6 +167,8 @@ public abstract class AbstractScopedSettings extends AbstractComponent { *

* Note: Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically. *

+ * This method registers a compound updater that is useful if two settings are depending on each other. The consumer is always provided + * with both values even if only one of the two changes. */ public synchronized void addSettingsUpdateConsumer(Setting
a, Setting b, BiConsumer consumer) { if (a != get(a.getKey())) { @@ -185,7 +187,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { *

*/ public synchronized void addSettingsUpdateConsumer(Setting setting, Consumer consumer) { - addSettingsUpdateConsumer(setting, consumer, (s) -> true); + addSettingsUpdateConsumer(setting, consumer, (s) -> {}); } /** @@ -238,7 +240,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { } /** - * Returns a settings object that contains all clustersettings that are not + * Returns a settings object that contains all settings that are not * already set in the given source. The diff contains either the default value for each * setting or the settings value in the given default settings. */ diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index fda10ea226b..8b3227d7b9f 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -150,10 +150,10 @@ public class Setting extends ToXContentToBytes { } AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings) { - return newUpdater(consumer, logger, settings, (s) -> true); + return newUpdater(consumer, logger, settings, (s) -> {}); } - AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings, Predicate accept) { + AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings, Consumer accept) { if (isDynamic()) { return new Updater(consumer, logger, settings, accept); } else { @@ -207,13 +207,13 @@ public class Setting extends ToXContentToBytes { private class Updater implements AbstractScopedSettings.SettingUpdater { private final Consumer consumer; private final ESLogger logger; - private final Predicate accept; + private final Consumer accept; private String value; private boolean commitPending; private String pendingValue; private T valueInstance; - public Updater(Consumer consumer, ESLogger logger, Settings settings, Predicate accept) { + public Updater(Consumer consumer, ESLogger logger, Settings settings, Consumer accept) { this.consumer = consumer; this.logger = logger; value = getRaw(settings); @@ -228,8 +228,10 @@ public class Setting extends ToXContentToBytes { } if (value.equals(newValue) == false) { T inst = get(settings); - if (accept.test(inst) == false) { - throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + value + "] to [" + getRaw(settings) + "]"); + try { + accept.accept(inst); + } catch (Exception | AssertionError e) { + throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + value + "] to [" + getRaw(settings) + "]", e); } pendingValue = newValue; valueInstance = inst; @@ -325,7 +327,7 @@ public class Setting extends ToXContentToBytes { } @Override - public AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings, Predicate accept) { + public AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings, Consumer accept) { if (isDynamic() == false) { throw new IllegalStateException("setting [" + getKey() + "] is not dynamic"); } @@ -338,8 +340,10 @@ public class Setting extends ToXContentToBytes { public boolean prepareApply(Settings settings) { Settings currentSettings = get(settings); if (currentSettings.equals(committedSettings) == false) { - if (accept.test(currentSettings) == false) { - throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + committedSettings.getAsMap() + "] to [" + currentSettings.getAsMap() + "]"); + try { + accept.accept(currentSettings); + } catch (Exception | AssertionError e) { + throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + committedSettings.getAsMap() + "] to [" + currentSettings.getAsMap() + "]", e); } pendingSettings = currentSettings; return true; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 2c230d22599..441f6403d1d 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -178,7 +178,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen if (value > masterNodes) { throw new IllegalArgumentException("cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current master nodes count [" + masterNodes + "]"); } - return true; }); clusterSettings.addSettingsUpdateConsumer(REJOIN_ON_MASTER_GONE_SETTING, this::setRejoingOnMasterGone); diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 862b048e2ec..870ae387907 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -252,7 +252,7 @@ public class ThreadPool extends AbstractComponent { public void setNodeSettingsService(ClusterSettings clusterSettings) { if(settingsListenerIsSet.compareAndSet(false, true)) { - clusterSettings.addSettingsUpdateConsumer(THREADPOOL_GROUP_SETTING, this::updateSettings, (s) -> {validate(s.getAsGroups()); return true;}); + clusterSettings.addSettingsUpdateConsumer(THREADPOOL_GROUP_SETTING, this::updateSettings, (s) -> validate(s.getAsGroups())); } else { throw new IllegalStateException("the node settings listener was set more then once"); } diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index c837bbb7d98..bfc91bc92a3 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -66,7 +66,7 @@ public class ScopedSettingsTests extends ESTestCase { AtomicInteger consumer = new AtomicInteger(); service.addSettingsUpdateConsumer(testSetting, consumer::set); AtomicInteger consumer2 = new AtomicInteger(); - service.addSettingsUpdateConsumer(testSetting2, consumer2::set, (s) -> s > 0); + service.addSettingsUpdateConsumer(testSetting2, consumer2::set, (s) -> assertTrue(s > 0)); AtomicInteger aC = new AtomicInteger(); AtomicInteger bC = new AtomicInteger(); diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java index a61d91c8f25..5f6f2b3615c 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -209,7 +209,7 @@ public class SettingTests extends ESTestCase { assertTrue(setting.match("foo.bar.baz")); assertFalse(setting.match("foo.baz.bar")); - ClusterSettings.SettingUpdater predicateSettingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY, (s) -> false); + ClusterSettings.SettingUpdater predicateSettingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY, (s) -> assertFalse(true)); try { predicateSettingUpdater.prepareApply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").build()); fail("not accepted"); @@ -273,10 +273,4 @@ public class SettingTests extends ESTestCase { assertEquals(1, c.b.intValue()); } - - - - - - } From 3d946871e8aa6c563db0195dcced8ebec5420660 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Dec 2015 10:30:18 +0100 Subject: [PATCH 011/167] move setting registration into settings module - this stuff needs to be present for transport clients as well. --- .../elasticsearch/cluster/ClusterModule.java | 24 ----------------- .../settings/AbstractScopedSettings.java | 6 ++--- .../common/settings/SettingsModule.java | 27 ++++++++++++++++++- .../cluster/ClusterModuleTests.java | 10 +++---- 4 files changed, 33 insertions(+), 34 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 7505926868a..ca4de192e14 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -108,7 +108,6 @@ public class ClusterModule extends AbstractModule { SnapshotInProgressAllocationDecider.class)); private final Settings settings; - private final Map> clusterDynamicSettings = new HashMap<>(); private final DynamicSettings.Builder indexDynamicSettings = new DynamicSettings.Builder(); private final ExtensionPoint.SelectedType shardsAllocators = new ExtensionPoint.SelectedType<>("shards_allocator", ShardsAllocator.class); private final ExtensionPoint.ClassSet allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class); @@ -120,7 +119,6 @@ public class ClusterModule extends AbstractModule { public ClusterModule(Settings settings) { this.settings = settings; - registerBuiltinClusterSettings(); registerBuiltinIndexSettings(); for (Class decider : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) { @@ -130,12 +128,6 @@ public class ClusterModule extends AbstractModule { registerShardsAllocator(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, BalancedShardsAllocator.class); } - private void registerBuiltinClusterSettings() { - for (Setting setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) { - registerSetting(setting); - } - } - private void registerBuiltinIndexSettings() { registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_TYPE, Validator.EMPTY); @@ -204,18 +196,6 @@ public class ClusterModule extends AbstractModule { indexDynamicSettings.addSetting(setting, validator); } - public void registerSetting(Setting setting) { - switch (setting.getScope()) { - case Cluster: - if (clusterDynamicSettings.containsKey(setting.getKey())) { - throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); - } - clusterDynamicSettings.put(setting.getKey(), setting); - break; - case Index: - throw new UnsupportedOperationException("not yet implemented"); - } - } public void registerAllocationDecider(Class allocationDecider) { allocationDeciders.registerExtension(allocationDecider); @@ -261,9 +241,5 @@ public class ClusterModule extends AbstractModule { bind(NodeIndexDeletedAction.class).asEagerSingleton(); bind(NodeMappingRefreshAction.class).asEagerSingleton(); bind(MappingUpdatedAction.class).asEagerSingleton(); - final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(clusterDynamicSettings.values())); - bind(ClusterSettings.class).toInstance(clusterSettings); - - } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index e5dedbba7f1..af82f97bf0c 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -21,13 +21,11 @@ package org.elasticsearch.common.settings; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLoggerFactory; import java.util.*; import java.util.function.BiConsumer; import java.util.function.Consumer; -import java.util.function.Predicate; /** * A basic setting service that can be used for per-index and per-cluster settings. @@ -152,7 +150,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { /** * Adds a settings consumer with a predicate that is only evaluated at update time. *

- * Note: Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically. + * Note: Only settings registered in {@link SettingsModule} can be changed dynamically. *

*/ public synchronized void addSettingsUpdateConsumer(Setting setting, Consumer consumer, Consumer predicate) { @@ -165,7 +163,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { /** * Adds a settings consumer that accepts the values for two settings. The consumer if only notified if one or both settings change. *

- * Note: Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically. + * Note: Only settings registered in {@link SettingsModule} can be changed dynamically. *

* This method registers a compound updater that is useful if two settings are depending on each other. The consumer is always provided * with both values even if only one of the two changes. diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 2ae4799d9f3..647d9e02963 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -21,6 +21,10 @@ package org.elasticsearch.common.settings; import org.elasticsearch.common.inject.AbstractModule; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; + /** * A module that binds the provided settings to the {@link Settings} interface. * @@ -30,15 +34,36 @@ public class SettingsModule extends AbstractModule { private final Settings settings; private final SettingsFilter settingsFilter; + private final Map> clusterDynamicSettings = new HashMap<>(); + public SettingsModule(Settings settings, SettingsFilter settingsFilter) { this.settings = settings; this.settingsFilter = settingsFilter; + for (Setting setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) { + registerSetting(setting); + } } @Override protected void configure() { bind(Settings.class).toInstance(settings); bind(SettingsFilter.class).toInstance(settingsFilter); + final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(clusterDynamicSettings.values())); + bind(ClusterSettings.class).toInstance(clusterSettings); } -} \ No newline at end of file + + public void registerSetting(Setting setting) { + switch (setting.getScope()) { + case Cluster: + if (clusterDynamicSettings.containsKey(setting.getKey())) { + throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); + } + clusterDynamicSettings.put(setting.getKey(), setting); + break; + case Index: + throw new UnsupportedOperationException("not yet implemented"); + } + } + +} diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index e9627b10068..2b77c3c7514 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -34,9 +34,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDeci import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.inject.ModuleTestCase; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.*; import org.elasticsearch.index.settings.IndexDynamicSettings; public class ClusterModuleTests extends ModuleTestCase { @@ -74,7 +72,8 @@ public class ClusterModuleTests extends ModuleTestCase { } public void testRegisterClusterDynamicSettingDuplicate() { - ClusterModule module = new ClusterModule(Settings.EMPTY); + final SettingsFilter settingsFilter = new SettingsFilter(Settings.EMPTY); + SettingsModule module = new SettingsModule(Settings.EMPTY, settingsFilter); try { module.registerSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING); } catch (IllegalArgumentException e) { @@ -83,7 +82,8 @@ public class ClusterModuleTests extends ModuleTestCase { } public void testRegisterClusterDynamicSetting() { - ClusterModule module = new ClusterModule(Settings.EMPTY); + final SettingsFilter settingsFilter = new SettingsFilter(Settings.EMPTY); + SettingsModule module = new SettingsModule(Settings.EMPTY, settingsFilter); module.registerSetting(Setting.boolSetting("foo.bar", false, true, Setting.Scope.Cluster)); assertInstanceBinding(module, ClusterSettings.class, service -> service.hasDynamicSetting("foo.bar")); } From 83ef665cbac54c19668246ed734a7883717fe885 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Dec 2015 10:36:25 +0100 Subject: [PATCH 012/167] fix tests --- .../routing/allocation/allocator/BalancedShardsAllocator.java | 2 +- .../threadpool/UpdateThreadPoolSettingsTests.java | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index f52b3228c47..1388eddf76f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -94,7 +94,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } public void setWeightFunction(float indexBalance, float shardBalanceFactor) { - weightFunction = new WeightFunction(indexBalance, weightFunction.shardBalance); + weightFunction = new WeightFunction(indexBalance, shardBalanceFactor); } diff --git a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java index ddaa53fce7b..07f34071442 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java @@ -101,8 +101,9 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { ); fail("expected IllegalArgumentException"); } catch (IllegalArgumentException e) { + assertEquals("illegal value can't update [threadpool.] from [{}] to [{" + threadPoolName + ".type=" + invalidThreadPoolType.getType() + "}]", e.getMessage()); assertThat( - e.getMessage(), + e.getCause().getMessage(), is("setting threadpool." + threadPoolName + ".type to " + invalidThreadPoolType.getType() + " is not permitted; must be " + validThreadPoolType.getType())); } finally { terminateThreadPoolIfNeeded(threadPool); From fd79e40e65861e85c500bf7bfb956af31d1d40b6 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Dec 2015 11:14:35 +0100 Subject: [PATCH 013/167] fix expcetion msg comparison --- .../snapshots/DedicatedClusterSnapshotRestoreIT.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 2c38c4c74c1..9133828c070 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -160,7 +160,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).execute().actionGet(); fail("can't restore minimum master nodes"); } catch (IllegalArgumentException ex) { - assertEquals("cannot set discovery.zen.minimum_master_nodes to more than the current master nodes count [1]", ex.getMessage()); + assertEquals("illegal value can't update [discovery.zen.minimum_master_nodes] from [1] to [2]", ex.getMessage()); + assertEquals("cannot set discovery.zen.minimum_master_nodes to more than the current master nodes count [1]", ex.getCause().getMessage()); } logger.info("--> ensure that zen discovery minimum master nodes wasn't restored"); assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() From 85a1b54867a81c334b29485e686cd4a086357a00 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Dec 2015 11:41:14 +0100 Subject: [PATCH 014/167] fix compilation --- .../org/elasticsearch/discovery/azure/AzureDiscovery.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java index 15e49d7a60d..89d6d17298f 100755 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java +++ b/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -22,6 +22,7 @@ package org.elasticsearch.discovery.azure; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -39,9 +40,9 @@ public class AzureDiscovery extends ZenDiscovery { @Inject public AzureDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, ClusterSettingsService clusterSettingsService, ZenPingService pingService, + ClusterService clusterService, ClusterSettings clusterSettings, ZenPingService pingService, DiscoverySettings discoverySettings, ElectMasterService electMasterService) { - super(settings, clusterName, threadPool, transportService, clusterService, clusterSettingsService, + super(settings, clusterName, threadPool, transportService, clusterService, clusterSettings, pingService, electMasterService, discoverySettings); } } From f0add6239e8be968566df4ab55fe8a2ea7505bbd Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Dec 2015 11:57:27 +0100 Subject: [PATCH 015/167] make enum names all uppercase --- .../close/TransportCloseIndexAction.java | 2 +- .../action/support/DestructiveOperations.java | 2 +- .../cluster/InternalClusterInfoService.java | 4 +-- .../action/index/MappingUpdatedAction.java | 2 +- .../cluster/metadata/MetaData.java | 3 +- .../allocator/BalancedShardsAllocator.java | 6 ++-- .../decider/AwarenessAllocationDecider.java | 4 +-- .../ClusterRebalanceAllocationDecider.java | 2 +- .../ConcurrentRebalanceAllocationDecider.java | 2 +- .../decider/DiskThresholdDecider.java | 10 +++--- .../decider/EnableAllocationDecider.java | 4 +-- .../decider/FilterAllocationDecider.java | 6 ++-- .../decider/ShardsLimitAllocationDecider.java | 2 +- .../SnapshotInProgressAllocationDecider.java | 2 +- .../decider/ThrottlingAllocationDecider.java | 4 +-- .../service/InternalClusterService.java | 2 +- .../common/settings/ClusterSettings.java | 2 +- .../common/settings/Setting.java | 4 +-- .../common/settings/SettingsModule.java | 4 +-- .../discovery/DiscoverySettings.java | 8 ++--- .../discovery/zen/ZenDiscovery.java | 2 +- .../zen/elect/ElectMasterService.java | 2 +- .../index/store/IndexStoreConfig.java | 4 +-- .../HierarchyCircuitBreakerService.java | 10 +++--- .../indices/recovery/RecoverySettings.java | 24 +++++++------- .../indices/ttl/IndicesTTLService.java | 2 +- .../elasticsearch/search/SearchService.java | 2 +- .../elasticsearch/threadpool/ThreadPool.java | 2 +- .../transport/TransportService.java | 4 +-- .../cluster/ClusterModuleTests.java | 2 +- .../common/settings/ScopedSettingsTests.java | 14 ++++---- .../common/settings/SettingTests.java | 32 +++++++++---------- 32 files changed, 87 insertions(+), 88 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 57c9fdbf0da..e454fcabc7a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -45,7 +45,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction CLUSTER_INDICES_CLOSE_ENABLE_SETTING = Setting.boolSetting("cluster.indices.close.enable", true, true, Setting.Scope.Cluster); + public static final Setting CLUSTER_INDICES_CLOSE_ENABLE_SETTING = Setting.boolSetting("cluster.indices.close.enable", true, true, Setting.Scope.CLUSTER); @Inject public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, diff --git a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java index 68b775bd532..5f2fb33e043 100644 --- a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java +++ b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java @@ -33,7 +33,7 @@ public final class DestructiveOperations extends AbstractComponent { /** * Setting which controls whether wildcard usage (*, prefix*, _all) is allowed. */ - public static final Setting REQUIRES_NAME_SETTING = Setting.boolSetting("action.destructive_requires_name", false, true, Setting.Scope.Cluster); + public static final Setting REQUIRES_NAME_SETTING = Setting.boolSetting("action.destructive_requires_name", false, true, Setting.Scope.CLUSTER); private volatile boolean destructiveRequiresName; @Inject diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 377d6578ac1..925a5a12ed6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -64,8 +64,8 @@ import java.util.concurrent.TimeUnit; */ public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener { - public static final Setting INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), true, Setting.Scope.Cluster); - public static final Setting INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), true, Setting.Scope.Cluster); + public static final Setting INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), true, Setting.Scope.CLUSTER); + public static final Setting INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), true, Setting.Scope.CLUSTER); private volatile TimeValue updateFrequency; diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 7df0e24210e..9e57fe3a48a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -41,7 +41,7 @@ import java.util.concurrent.TimeoutException; */ public class MappingUpdatedAction extends AbstractComponent { - public static final Setting INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.Cluster); + public static final Setting INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); private IndicesAdminClient client; private volatile TimeValue dynamicMappingUpdateTimeout; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 5b8514c22bc..0bcebfbd543 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -40,7 +40,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.support.LoggerMessageFormat; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -141,7 +140,7 @@ public class MetaData implements Iterable, Diffable, Fr } - public static final Setting SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, true, Setting.Scope.Cluster); + public static final Setting SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, true, Setting.Scope.CLUSTER); public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 1388eddf76f..da73cdc1455 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -73,9 +73,9 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; */ public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator { - public static final Setting INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, true, Setting.Scope.Cluster); - public static final Setting SHARD_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, true, Setting.Scope.Cluster); - public static final Setting THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, true, Setting.Scope.Cluster); + public static final Setting INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, true, Setting.Scope.CLUSTER); + public static final Setting SHARD_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, true, Setting.Scope.CLUSTER); + public static final Setting THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, true, Setting.Scope.CLUSTER); private volatile WeightFunction weightFunction; private volatile float threshold; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 148fdd82f35..a66c8ddaef7 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -77,8 +77,8 @@ public class AwarenessAllocationDecider extends AllocationDecider { public static final String NAME = "awareness"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , true, Setting.Scope.Cluster); - public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", true, Setting.Scope.CLUSTER); private String[] awarenessAttributes; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index 036695bee46..0e5e744d274 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -48,7 +48,7 @@ import java.util.Locale; public class ClusterRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "cluster_rebalance"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), ClusterRebalanceType::parseString, true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), ClusterRebalanceType::parseString, true, Setting.Scope.CLUSTER); /** * An enum representation for the configured re-balance type. diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index 7843a31ff91..21023400e32 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -42,7 +42,7 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "concurrent_rebalance"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, true, Setting.Scope.CLUSTER); private volatile int clusterConcurrentRebalance; @Inject diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 5ffaa1f0e84..49cf5abadd4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -81,11 +81,11 @@ public class DiskThresholdDecider extends AllocationDecider { private volatile boolean enabled; private volatile TimeValue rerouteInterval; - public static final Setting CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, true, Setting.Scope.Cluster); - public static final Setting CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.Cluster); - public static final Setting CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), true, Setting.Scope.Cluster); - public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, true, Setting.Scope.Cluster);; - public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, true, Setting.Scope.CLUSTER);; + public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER); /** * Listens for a node to go over the high watermark and kicks off an empty diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 61156de7137..1df362399f7 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -59,10 +59,10 @@ public class EnableAllocationDecider extends AllocationDecider { public static final String NAME = "enable"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.CLUSTER); public static final String INDEX_ROUTING_ALLOCATION_ENABLE= "index.routing.allocation.enable"; - public static final Setting CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.CLUSTER); public static final String INDEX_ROUTING_REBALANCE_ENABLE = "index.routing.rebalance.enable"; private volatile Rebalance enableRebalance; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index 87ec158e8ca..4c451e7fffa 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -64,9 +64,9 @@ public class FilterAllocationDecider extends AllocationDecider { public static final String INDEX_ROUTING_INCLUDE_GROUP = "index.routing.allocation.include."; public static final String INDEX_ROUTING_EXCLUDE_GROUP = "index.routing.allocation.exclude."; - public static final Setting CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.Cluster); - public static final Setting CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.Cluster); - public static final Setting CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.CLUSTER); private volatile DiscoveryNodeFilters clusterRequireFilters; private volatile DiscoveryNodeFilters clusterIncludeFilters; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index 3bc02879e61..9149d04cf60 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -64,7 +64,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { * Controls the maximum number of shards per node on a global level. * Negative values are interpreted as unlimited. */ - public static final Setting CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, true, Setting.Scope.Cluster); + public static final Setting CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, true, Setting.Scope.CLUSTER); @Inject diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index fdc65bf7ac8..597f0add8da 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -39,7 +39,7 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { /** * Disables relocation of shards that are currently being snapshotted. */ - public static final Setting CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, true, Setting.Scope.CLUSTER); private volatile boolean enableRelocation = false; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index f7369f35dd9..9e3d96b4e18 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -52,8 +52,8 @@ public class ThrottlingAllocationDecider extends AllocationDecider { public static final String NAME = "throttling"; public static final String CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES = "cluster.routing.allocation.concurrent_recoveries"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, true, Setting.Scope.Cluster); - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", (s) -> s.get(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES,Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)), Integer::parseInt, true, Setting.Scope.Cluster); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", (s) -> s.get(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES,Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)), Integer::parseInt, true, Setting.Scope.CLUSTER); private volatile int primariesInitialRecoveries; private volatile int concurrentRecoveries; diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index 945b9d1ea59..09f15994848 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -63,7 +63,7 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF */ public class InternalClusterService extends AbstractLifecycleComponent implements ClusterService { - public static final Setting CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.Cluster); + public static final Setting CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); public static final String SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL = "cluster.service.reconnect_interval"; public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index bf9c24dad8d..602081a1d47 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -45,7 +45,7 @@ import java.util.*; public final class ClusterSettings extends AbstractScopedSettings { public ClusterSettings(Settings settings, Set> settingsSet) { - super(settings, settingsSet, Setting.Scope.Cluster); + super(settings, settingsSet, Setting.Scope.CLUSTER); } /** diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 8b3227d7b9f..3f182885151 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -145,8 +145,8 @@ public class Setting extends ToXContentToBytes { * The settings scope - settings can either be cluster settings or per index settings. */ public enum Scope { - Cluster, - Index; + CLUSTER, + INDEX; } AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 647d9e02963..8bc8ce1b651 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -55,13 +55,13 @@ public class SettingsModule extends AbstractModule { public void registerSetting(Setting setting) { switch (setting.getScope()) { - case Cluster: + case CLUSTER: if (clusterDynamicSettings.containsKey(setting.getKey())) { throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); } clusterDynamicSettings.put(setting.getKey(), setting); break; - case Index: + case INDEX: throw new UnsupportedOperationException("not yet implemented"); } } diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java index 26902a70e58..519a2330353 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java @@ -40,15 +40,15 @@ public class DiscoverySettings extends AbstractComponent { * sets the timeout for a complete publishing cycle, including both sending and committing. the master * will continute to process the next cluster state update after this time has elapsed **/ - public static final Setting PUBLISH_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.Cluster); + public static final Setting PUBLISH_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); /** * sets the timeout for receiving enough acks for a specific cluster state and committing it. failing * to receive responses within this window will cause the cluster state change to be rejected. */ - public static final Setting COMMIT_TIMEOUT_SETTING = new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), true, Setting.Scope.Cluster); - public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, true, Setting.Scope.Cluster); - public static final Setting PUBLISH_DIFF_ENABLE_SETTING = Setting.boolSetting("discovery.zen.publish_diff.enable", true, true, Setting.Scope.Cluster); + public static final Setting COMMIT_TIMEOUT_SETTING = new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), true, Setting.Scope.CLUSTER); + public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, true, Setting.Scope.CLUSTER); + public static final Setting PUBLISH_DIFF_ENABLE_SETTING = Setting.boolSetting("discovery.zen.publish_diff.enable", true, true, Setting.Scope.CLUSTER); public final static int NO_MASTER_BLOCK_ID = 2; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 441f6403d1d..d69227d8f0b 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -75,7 +75,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; */ public class ZenDiscovery extends AbstractLifecycleComponent implements Discovery, PingContextProvider { - public final static Setting REJOIN_ON_MASTER_GONE_SETTING = Setting.boolSetting("discovery.zen.rejoin_on_master_gone", true, true, Setting.Scope.Cluster); + public final static Setting REJOIN_ON_MASTER_GONE_SETTING = Setting.boolSetting("discovery.zen.rejoin_on_master_gone", true, true, Setting.Scope.CLUSTER); public final static String SETTING_PING_TIMEOUT = "discovery.zen.ping_timeout"; public final static String SETTING_JOIN_TIMEOUT = "discovery.zen.join_timeout"; public final static String SETTING_JOIN_RETRY_ATTEMPTS = "discovery.zen.join_retry_attempts"; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java index 3ba338b4070..9cca1edfc5e 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java @@ -40,7 +40,7 @@ import java.util.List; */ public class ElectMasterService extends AbstractComponent { - public static final Setting DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = Setting.intSetting("discovery.zen.minimum_master_nodes", -1, true, Setting.Scope.Cluster); + public static final Setting DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = Setting.intSetting("discovery.zen.minimum_master_nodes", -1, true, Setting.Scope.CLUSTER); // This is the minimum version a master needs to be on, otherwise it gets ignored // This is based on the minimum compatible version of the current version this node is on diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java index 0588dafd310..ed561876735 100644 --- a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java +++ b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java @@ -36,11 +36,11 @@ public class IndexStoreConfig{ /** * Configures the node / cluster level throttle type. See {@link StoreRateLimiting.Type}. */ - public static final Setting INDICES_STORE_THROTTLE_TYPE_SETTING = new Setting<>("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, true, Setting.Scope.Cluster); + public static final Setting INDICES_STORE_THROTTLE_TYPE_SETTING = new Setting<>("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, true, Setting.Scope.CLUSTER); /** * Configures the node / cluster level throttle intensity. The default is 10240 MB */ - public static final Setting INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.Cluster); + public static final Setting INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.CLUSTER); private volatile StoreRateLimiting.Type rateLimitingType; private volatile ByteSizeValue rateLimitingThrottle; private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); diff --git a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index fc80495bb1b..4c2a2ced09e 100644 --- a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -46,14 +46,14 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { private final ConcurrentMap breakers = new ConcurrentHashMap(); - public static final Setting TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.total.limit", "70%", true, Setting.Scope.Cluster); + public static final Setting TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.total.limit", "70%", true, Setting.Scope.CLUSTER); - public static final Setting FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", true, Setting.Scope.Cluster); - public static final Setting FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.fielddata.overhead", 1.03d, 0.0d, true, Setting.Scope.Cluster); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", true, Setting.Scope.CLUSTER); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.fielddata.overhead", 1.03d, 0.0d, true, Setting.Scope.CLUSTER); public static final String FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = "indices.breaker.fielddata.type"; - public static final Setting REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.request.limit", "40%", true, Setting.Scope.Cluster); - public static final Setting REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.request.overhead", 1.0d, 0.0d, true, Setting.Scope.Cluster); + public static final Setting REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.request.limit", "40%", true, Setting.Scope.CLUSTER); + public static final Setting REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.request.overhead", 1.0d, 0.0d, true, Setting.Scope.CLUSTER); public static final String REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = "indices.breaker.request.type"; public static final String DEFAULT_BREAKER_TYPE = "memory"; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 0e72dedb3ad..c07ef57810a 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -40,37 +40,37 @@ import java.util.concurrent.TimeUnit; */ public class RecoverySettings extends AbstractComponent implements Closeable { - public static final Setting INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING = Setting.byteSizeSetting("indices.recovery.file_chunk_size", new ByteSizeValue(512, ByteSizeUnit.KB), true, Setting.Scope.Cluster); - public static final Setting INDICES_RECOVERY_TRANSLOG_OPS_SETTING = Setting.intSetting("indices.recovery.translog_ops", 1000, true, Setting.Scope.Cluster); - public static final Setting INDICES_RECOVERY_TRANSLOG_SIZE_SETTING = Setting.byteSizeSetting("indices.recovery.translog_size", new ByteSizeValue(512, ByteSizeUnit.KB), true, Setting.Scope.Cluster); - public static final Setting INDICES_RECOVERY_COMPRESS_SETTING = Setting.boolSetting("indices.recovery.compress", true, true, Setting.Scope.Cluster); - public static final Setting INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_streams", 3, true, Setting.Scope.Cluster); - public static final Setting INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_small_file_streams", 2, true, Setting.Scope.Cluster); - public static final Setting INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), true, Setting.Scope.Cluster); + public static final Setting INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING = Setting.byteSizeSetting("indices.recovery.file_chunk_size", new ByteSizeValue(512, ByteSizeUnit.KB), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_TRANSLOG_OPS_SETTING = Setting.intSetting("indices.recovery.translog_ops", 1000, true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_TRANSLOG_SIZE_SETTING = Setting.byteSizeSetting("indices.recovery.translog_size", new ByteSizeValue(512, ByteSizeUnit.KB), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_COMPRESS_SETTING = Setting.boolSetting("indices.recovery.compress", true, true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_streams", 3, true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_small_file_streams", 2, true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), true, Setting.Scope.CLUSTER); /** * how long to wait before retrying after issues cause by cluster state syncing between nodes * i.e., local node is not yet known on remote node, remote shard not yet started etc. */ - public static final Setting INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_state_sync", TimeValue.timeValueMillis(500), true, Setting.Scope.Cluster); + public static final Setting INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_state_sync", TimeValue.timeValueMillis(500), true, Setting.Scope.CLUSTER); /** how long to wait before retrying after network related issues */ - public static final Setting INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_network", TimeValue.timeValueSeconds(5), true, Setting.Scope.Cluster); + public static final Setting INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_network", TimeValue.timeValueSeconds(5), true, Setting.Scope.CLUSTER); /** timeout value to use for requests made as part of the recovery process */ - public static final Setting INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.recovery.internal_action_timeout", TimeValue.timeValueMinutes(15), true, Setting.Scope.Cluster); + public static final Setting INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.recovery.internal_action_timeout", TimeValue.timeValueMinutes(15), true, Setting.Scope.CLUSTER); /** * timeout value to use for requests made as part of the recovery process that are expected to take long time. * defaults to twice `indices.recovery.internal_action_timeout`. */ - public static final Setting INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.internal_action_long_timeout", (s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2).toString(), TimeValue.timeValueSeconds(0), true, Setting.Scope.Cluster); + public static final Setting INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.internal_action_long_timeout", (s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2).toString(), TimeValue.timeValueSeconds(0), true, Setting.Scope.CLUSTER); /** * recoveries that don't show any activity for more then this interval will be failed. * defaults to `indices.recovery.internal_action_long_timeout` */ - public static final Setting INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.recovery_activity_timeout", (s) -> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getRaw(s) , TimeValue.timeValueSeconds(0), true, Setting.Scope.Cluster); + public static final Setting INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.recovery_activity_timeout", (s) -> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getRaw(s) , TimeValue.timeValueSeconds(0), true, Setting.Scope.CLUSTER); public static final long SMALL_FILE_CUTOFF_BYTES = ByteSizeValue.parseBytesSizeValue("5mb", "SMALL_FILE_CUTOFF_BYTES").bytes(); diff --git a/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java b/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java index 454dc460274..0eed82561a3 100644 --- a/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java +++ b/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java @@ -67,7 +67,7 @@ import java.util.concurrent.locks.ReentrantLock; */ public class IndicesTTLService extends AbstractLifecycleComponent { - public static final Setting INDICES_TTL_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.ttl.interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.Cluster); + public static final Setting INDICES_TTL_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.ttl.interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER); public static final String INDEX_TTL_DISABLE_PURGE = "index.ttl.disable_purge"; private final ClusterService clusterService; diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index a41a97e5684..c416ec469de 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -112,7 +112,7 @@ public class SearchService extends AbstractLifecycleComponent imp public static final String KEEPALIVE_INTERVAL_KEY = "search.keep_alive_interval"; public static final TimeValue NO_TIMEOUT = timeValueMillis(-1); - public static final Setting DEFAULT_SEARCH_TIMEOUT_SETTING = Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, true, Setting.Scope.Cluster); + public static final Setting DEFAULT_SEARCH_TIMEOUT_SETTING = Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, true, Setting.Scope.CLUSTER); private final ThreadPool threadPool; diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 870ae387907..935fa2231cb 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -169,7 +169,7 @@ public class ThreadPool extends AbstractComponent { } } - public static final Setting THREADPOOL_GROUP_SETTING = Setting.groupSetting("threadpool.", true, Setting.Scope.Cluster); + public static final Setting THREADPOOL_GROUP_SETTING = Setting.groupSetting("threadpool.", true, Setting.Scope.CLUSTER); private volatile Map executors; diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index 82802e99dfa..05d5242ac82 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -87,8 +87,8 @@ public class TransportService extends AbstractLifecycleComponent TRACE_LOG_INCLUDE_SETTING = new Setting<>("transport.tracer.include", "", Strings::splitStringByCommaToArray , true, Setting.Scope.Cluster); - public static final Setting TRACE_LOG_EXCLUDE_SETTING = new Setting<>("transport.tracer.exclude", "internal:discovery/zen/fd*," + TransportLivenessAction.NAME, Strings::splitStringByCommaToArray , true, Setting.Scope.Cluster);; + public static final Setting TRACE_LOG_INCLUDE_SETTING = new Setting<>("transport.tracer.include", "", Strings::splitStringByCommaToArray , true, Setting.Scope.CLUSTER); + public static final Setting TRACE_LOG_EXCLUDE_SETTING = new Setting<>("transport.tracer.exclude", "internal:discovery/zen/fd*," + TransportLivenessAction.NAME, Strings::splitStringByCommaToArray , true, Setting.Scope.CLUSTER);; private final ESLogger tracerLog; diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 2b77c3c7514..48b2591559e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -84,7 +84,7 @@ public class ClusterModuleTests extends ModuleTestCase { public void testRegisterClusterDynamicSetting() { final SettingsFilter settingsFilter = new SettingsFilter(Settings.EMPTY); SettingsModule module = new SettingsModule(Settings.EMPTY, settingsFilter); - module.registerSetting(Setting.boolSetting("foo.bar", false, true, Setting.Scope.Cluster)); + module.registerSetting(Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER)); assertInstanceBinding(module, ClusterSettings.class, service -> service.hasDynamicSetting("foo.bar")); } diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index bfc91bc92a3..9c5320b1354 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -31,8 +31,8 @@ import java.util.concurrent.atomic.AtomicInteger; public class ScopedSettingsTests extends ESTestCase { public void testAddConsumer() { - Setting testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster); - Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.Cluster); + Setting testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.CLUSTER); AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY, Collections.singleton(testSetting)); AtomicInteger consumer = new AtomicInteger(); @@ -59,8 +59,8 @@ public class ScopedSettingsTests extends ESTestCase { } public void testApply() { - Setting testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster); - Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.Cluster); + Setting testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.CLUSTER); AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(testSetting, testSetting2))); AtomicInteger consumer = new AtomicInteger(); @@ -120,15 +120,15 @@ public class ScopedSettingsTests extends ESTestCase { } public void testIsDynamic(){ - ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster), Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.Cluster)))); + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER), Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.CLUSTER)))); assertFalse(settings.hasDynamicSetting("foo.bar.baz")); assertTrue(settings.hasDynamicSetting("foo.bar")); assertNotNull(settings.get("foo.bar.baz")); } public void testDiff() throws IOException { - Setting foobarbaz = Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.Cluster); - Setting foobar = Setting.intSetting("foo.bar", 1, true, Setting.Scope.Cluster); + Setting foobarbaz = Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.CLUSTER); + Setting foobar = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(foobar, foobarbaz))); Settings diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.EMPTY); assertEquals(diff.getAsMap().size(), 1); diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 5f6f2b3615c..3f048e454a5 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -30,14 +30,14 @@ public class SettingTests extends ESTestCase { public void testGet() { - Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.Cluster); + Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); assertFalse(booleanSetting.get(Settings.EMPTY)); assertFalse(booleanSetting.get(Settings.builder().put("foo.bar", false).build())); assertTrue(booleanSetting.get(Settings.builder().put("foo.bar", true).build())); } public void testByteSize() { - Setting byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), true, Setting.Scope.Cluster); + Setting byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), true, Setting.Scope.CLUSTER); assertFalse(byteSizeValueSetting.isGroupSetting()); ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); assertEquals(byteSizeValue.bytes(), 1024); @@ -56,7 +56,7 @@ public class SettingTests extends ESTestCase { } public void testSimpleUpdate() { - Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.Cluster); + Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); AtomicReference atomicBoolean = new AtomicReference<>(null); ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(atomicBoolean::set, logger, Settings.EMPTY); Settings build = Settings.builder().put("foo.bar", false).build(); @@ -81,7 +81,7 @@ public class SettingTests extends ESTestCase { } public void testUpdateNotDynamic() { - Setting booleanSetting = Setting.boolSetting("foo.bar", false, false, Setting.Scope.Cluster); + Setting booleanSetting = Setting.boolSetting("foo.bar", false, false, Setting.Scope.CLUSTER); assertFalse(booleanSetting.isGroupSetting()); AtomicReference atomicBoolean = new AtomicReference<>(null); try { @@ -93,7 +93,7 @@ public class SettingTests extends ESTestCase { } public void testUpdaterIsIsolated() { - Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.Cluster); + Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); AtomicReference ab1 = new AtomicReference<>(null); AtomicReference ab2 = new AtomicReference<>(null); ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(ab1::set, logger, Settings.EMPTY); @@ -107,20 +107,20 @@ public class SettingTests extends ESTestCase { public void testDefault() { TimeValue defautlValue = TimeValue.timeValueMillis(randomIntBetween(0, 1000000)); - Setting setting = Setting.positiveTimeSetting("my.time.value", defautlValue, randomBoolean(), Setting.Scope.Cluster); + Setting setting = Setting.positiveTimeSetting("my.time.value", defautlValue, randomBoolean(), Setting.Scope.CLUSTER); assertFalse(setting.isGroupSetting()); String aDefault = setting.getDefault(Settings.EMPTY); assertEquals(defautlValue.millis() + "ms", aDefault); assertEquals(defautlValue.millis(), setting.get(Settings.EMPTY).millis()); - Setting secondaryDefault = new Setting<>("foo.bar", (s) -> s.get("old.foo.bar", "some_default"), (s) -> s, randomBoolean(), Setting.Scope.Cluster); + Setting secondaryDefault = new Setting<>("foo.bar", (s) -> s.get("old.foo.bar", "some_default"), (s) -> s, randomBoolean(), Setting.Scope.CLUSTER); assertEquals("some_default", secondaryDefault.get(Settings.EMPTY)); assertEquals("42", secondaryDefault.get(Settings.builder().put("old.foo.bar", 42).build())); } public void testComplexType() { AtomicReference ref = new AtomicReference<>(null); - Setting setting = new Setting<>("foo.bar", (s) -> "", (s) -> new ComplexType(s), true, Setting.Scope.Cluster); + Setting setting = new Setting<>("foo.bar", (s) -> "", (s) -> new ComplexType(s), true, Setting.Scope.CLUSTER); assertFalse(setting.isGroupSetting()); ref.set(setting.get(Settings.EMPTY)); ComplexType type = ref.get(); @@ -144,7 +144,7 @@ public class SettingTests extends ESTestCase { } public void testRollback() { - Setting integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.Cluster); + Setting integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.CLUSTER); assertFalse(integerSetting.isGroupSetting()); AtomicReference ref = new AtomicReference<>(null); ClusterSettings.SettingUpdater settingUpdater = integerSetting.newUpdater(ref::set, logger, Settings.EMPTY); @@ -159,15 +159,15 @@ public class SettingTests extends ESTestCase { } public void testType() { - Setting integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.Cluster); - assertEquals(integerSetting.getScope(), Setting.Scope.Cluster); - integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.Index); - assertEquals(integerSetting.getScope(), Setting.Scope.Index); + Setting integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.CLUSTER); + assertEquals(integerSetting.getScope(), Setting.Scope.CLUSTER); + integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.INDEX); + assertEquals(integerSetting.getScope(), Setting.Scope.INDEX); } public void testGroups() { AtomicReference ref = new AtomicReference<>(null); - Setting setting = Setting.groupSetting("foo.bar.", true, Setting.Scope.Cluster); + Setting setting = Setting.groupSetting("foo.bar.", true, Setting.Scope.CLUSTER); assertTrue(setting.isGroupSetting()); ClusterSettings.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY); @@ -241,8 +241,8 @@ public class SettingTests extends ESTestCase { public void testComposite() { Composite c = new Composite(); - Setting a = Setting.intSetting("foo.int.bar.a", 1, true, Setting.Scope.Cluster); - Setting b = Setting.intSetting("foo.int.bar.b", 1, true, Setting.Scope.Cluster); + Setting a = Setting.intSetting("foo.int.bar.a", 1, true, Setting.Scope.CLUSTER); + Setting b = Setting.intSetting("foo.int.bar.b", 1, true, Setting.Scope.CLUSTER); ClusterSettings.SettingUpdater settingUpdater = Setting.compoundUpdater(c::set, a, b, logger, Settings.EMPTY); assertFalse(settingUpdater.prepareApply(Settings.EMPTY)); settingUpdater.apply(); From 8e0a610c263fc49504b1c41bb230afd1043941d1 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Dec 2015 12:24:20 +0100 Subject: [PATCH 016/167] add simple javadoc reference to usage --- .../common/settings/Setting.java | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 3f182885151..0c0380d24e6 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -56,21 +56,21 @@ public class Setting extends ToXContentToBytes { * Returns the settings key or a prefix if this setting is a group setting * @see #isGroupSetting() */ - public String getKey() { + public final String getKey() { return key; } /** * Returns true iff this setting is dynamically updateable, otherwise false */ - public boolean isDynamic() { + public final boolean isDynamic() { return dynamic; } /** * Returns the settings scope */ - public Scope getScope() { + public final Scope getScope() { return scope; } @@ -87,14 +87,14 @@ public class Setting extends ToXContentToBytes { * Returns the default values string representation for this setting. * @param settings a settings object for settings that has a default value depending on another setting if available */ - public String getDefault(Settings settings) { + public final String getDefault(Settings settings) { return defaultValue.apply(settings); } /** * Returns true iff this setting is present in the given settings object. Otherwise false */ - public boolean exists(Settings settings) { + public final boolean exists(Settings settings) { return settings.get(key) != null; } @@ -117,7 +117,7 @@ public class Setting extends ToXContentToBytes { * Returns the raw (string) settings value. If the setting is not present in the given settings object the default value is returned * instead. This is useful if the value can't be parsed due to an invalid value to access the actual value. */ - public String getRaw(Settings settings) { + public final String getRaw(Settings settings) { return settings.get(key, defaultValue.apply(settings)); } @@ -131,7 +131,7 @@ public class Setting extends ToXContentToBytes { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("key", key); builder.field("type", scope.name()); @@ -149,7 +149,7 @@ public class Setting extends ToXContentToBytes { INDEX; } - AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings) { + final AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings) { return newUpdater(consumer, logger, settings, (s) -> {}); } @@ -161,6 +161,10 @@ public class Setting extends ToXContentToBytes { } } + /** + * this is used for settings that depend on each other... see {@link org.elasticsearch.common.settings.AbstractScopedSettings#addSettingsUpdateConsumer(Setting, Setting, BiConsumer)} and it's + * usage for details. + */ static AbstractScopedSettings.SettingUpdater compoundUpdater(final BiConsumer consumer, final Setting
aSettting, final Setting bSetting, ESLogger logger, Settings settings) { final AtomicReference aRef = new AtomicReference<>(); final AtomicReference bRef = new AtomicReference<>(); From ce417540c5f51361699b2cbcb81277e702efc9e6 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Dec 2015 12:24:40 +0100 Subject: [PATCH 017/167] apply review from @clintongormley --- docs/reference/cluster/update-settings.asciidoc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc index 2106bdf53b0..8ec58424730 100644 --- a/docs/reference/cluster/update-settings.asciidoc +++ b/docs/reference/cluster/update-settings.asciidoc @@ -62,6 +62,19 @@ the response for the last example will be: "persistent" : {}, "transient" : {} } +-------------------------------------------------- + +Settings can also be reset using simple wildcards. For instance to reset +all dynamic `discovery.zen` setting a prefix can be used: + +[source,js] +-------------------------------------------------- +curl -XPUT localhost:9200/_cluster/settings -d '{ + "transient" : { + "discovery.zen.*" : null + } +}' +-------------------------------------------------- Cluster wide settings can be returned using: From a49120bfc1780c8a1fab432e99ea069328b84f16 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Dec 2015 12:26:28 +0100 Subject: [PATCH 018/167] fix compilation --- .../java/org/elasticsearch/discovery/ec2/Ec2Discovery.java | 5 +++-- .../java/org/elasticsearch/discovery/gce/GceDiscovery.java | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java index 3fd3f1948e8..aa3cef01d03 100755 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java @@ -22,6 +22,7 @@ package org.elasticsearch.discovery.ec2; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -39,10 +40,10 @@ public class Ec2Discovery extends ZenDiscovery { @Inject public Ec2Discovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, ClusterSettingsService clusterSettingsService, ZenPingService pingService, + ClusterService clusterService, ClusterSettings clusterSettings, ZenPingService pingService, DiscoverySettings discoverySettings, ElectMasterService electMasterService) { - super(settings, clusterName, threadPool, transportService, clusterService, clusterSettingsService, + super(settings, clusterName, threadPool, transportService, clusterService, clusterSettings, pingService, electMasterService, discoverySettings); } } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java index c1a00979fcd..fe87b9244d4 100755 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java @@ -22,6 +22,7 @@ package org.elasticsearch.discovery.gce; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -39,10 +40,10 @@ public class GceDiscovery extends ZenDiscovery { @Inject public GceDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, ClusterSettingsService clusterSettingsService, ZenPingService pingService, + ClusterService clusterService, ClusterSettings clusterSettings, ZenPingService pingService, DiscoverySettings discoverySettings, ElectMasterService electMasterService) { - super(settings, clusterName, threadPool, transportService, clusterService, clusterSettingsService, + super(settings, clusterName, threadPool, transportService, clusterService, clusterSettings, pingService, electMasterService, discoverySettings); } } From 3b85dbb8c806ac1ac3e5e8ce5551d68acc7bce4f Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Dec 2015 13:42:35 +0100 Subject: [PATCH 019/167] use raw value directly --- .../main/java/org/elasticsearch/common/settings/Setting.java | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 0c0380d24e6..d82a8cb1cd5 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -226,10 +226,7 @@ public class Setting extends ToXContentToBytes { public boolean prepareApply(Settings settings) { - String newValue = settings.get(key); - if (newValue == null) { - newValue = getRaw(settings); - } + final String newValue = getRaw(settings); if (value.equals(newValue) == false) { T inst = get(settings); try { From ab44366d939d277043a40bad08caf34709f37502 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 9 Dec 2015 19:12:15 +0100 Subject: [PATCH 020/167] Remove back compat for the `_source` compression options. These options have been deprecated before 2.0 so they don't need to be supported by 3.0. --- .../mapper/internal/SourceFieldMapper.java | 85 +--------------- .../source/CompressSourceMappingTests.java | 97 ------------------- .../source/DefaultSourceMappingTests.java | 26 ----- 3 files changed, 5 insertions(+), 203 deletions(-) delete mode 100644 core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java index da3b8dbc5ab..a844ab223aa 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java @@ -24,7 +24,6 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -35,7 +34,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; @@ -93,10 +91,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { private boolean enabled = Defaults.ENABLED; - private long compressThreshold = Defaults.COMPRESS_THRESHOLD; - - private Boolean compress = null; - private String format = Defaults.FORMAT; private String[] includes = null; @@ -111,16 +105,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { return this; } - public Builder compress(boolean compress) { - this.compress = compress; - return this; - } - - public Builder compressThreshold(long compressThreshold) { - this.compressThreshold = compressThreshold; - return this; - } - public Builder format(String format) { this.format = format; return this; @@ -138,7 +122,7 @@ public class SourceFieldMapper extends MetadataFieldMapper { @Override public SourceFieldMapper build(BuilderContext context) { - return new SourceFieldMapper(enabled, format, compress, compressThreshold, includes, excludes, context.indexSettings()); + return new SourceFieldMapper(enabled, format, includes, excludes, context.indexSettings()); } } @@ -154,22 +138,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { if (fieldName.equals("enabled")) { builder.enabled(nodeBooleanValue(fieldNode)); iterator.remove(); - } else if (fieldName.equals("compress") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - if (fieldNode != null) { - builder.compress(nodeBooleanValue(fieldNode)); - } - iterator.remove(); - } else if (fieldName.equals("compress_threshold") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - if (fieldNode != null) { - if (fieldNode instanceof Number) { - builder.compressThreshold(((Number) fieldNode).longValue()); - builder.compress(true); - } else { - builder.compressThreshold(ByteSizeValue.parseBytesSizeValue(fieldNode.toString(), "compress_threshold").bytes()); - builder.compress(true); - } - } - iterator.remove(); } else if ("format".equals(fieldName)) { builder.format(nodeStringValue(fieldNode, null)); iterator.remove(); @@ -242,9 +210,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { /** indicates whether the source will always exist and be complete, for use by features like the update API */ private final boolean complete; - private Boolean compress; - private long compressThreshold; - private final String[] includes; private final String[] excludes; @@ -253,15 +218,13 @@ public class SourceFieldMapper extends MetadataFieldMapper { private XContentType formatContentType; private SourceFieldMapper(Settings indexSettings) { - this(Defaults.ENABLED, Defaults.FORMAT, null, -1, null, null, indexSettings); + this(Defaults.ENABLED, Defaults.FORMAT, null, null, indexSettings); } - private SourceFieldMapper(boolean enabled, String format, Boolean compress, long compressThreshold, + private SourceFieldMapper(boolean enabled, String format, String[] includes, String[] excludes, Settings indexSettings) { super(NAME, Defaults.FIELD_TYPE.clone(), Defaults.FIELD_TYPE, indexSettings); // Only stored. this.enabled = enabled; - this.compress = compress; - this.compressThreshold = compressThreshold; this.includes = includes; this.excludes = excludes; this.format = format; @@ -321,35 +284,14 @@ public class SourceFieldMapper extends MetadataFieldMapper { Tuple> mapTuple = XContentHelper.convertToMap(source, true); Map filteredSource = XContentMapValues.filter(mapTuple.v2(), includes, excludes); BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput streamOutput = bStream; - if (compress != null && compress && (compressThreshold == -1 || source.length() > compressThreshold)) { - streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); - } XContentType contentType = formatContentType; if (contentType == null) { contentType = mapTuple.v1(); } - XContentBuilder builder = XContentFactory.contentBuilder(contentType, streamOutput).map(filteredSource); + XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource); builder.close(); source = bStream.bytes(); - } else if (compress != null && compress && !CompressorFactory.isCompressed(source)) { - if (compressThreshold == -1 || source.length() > compressThreshold) { - BytesStreamOutput bStream = new BytesStreamOutput(); - XContentType contentType = XContentFactory.xContentType(source); - if (formatContentType != null && formatContentType != contentType) { - XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, CompressorFactory.defaultCompressor().streamOutput(bStream)); - builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source)); - builder.close(); - } else { - StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); - source.writeTo(streamOutput); - streamOutput.close(); - } - source = bStream.bytes(); - // update the data in the context, so it can be compressed and stored compressed outside... - context.source(source); - } } else if (formatContentType != null) { // see if we need to convert the content type Compressor compressor = CompressorFactory.compressor(source); @@ -403,7 +345,7 @@ public class SourceFieldMapper extends MetadataFieldMapper { boolean includeDefaults = params.paramAsBoolean("include_defaults", false); // all are defaults, no need to write it at all - if (!includeDefaults && enabled == Defaults.ENABLED && compress == null && compressThreshold == -1 && includes == null && excludes == null) { + if (!includeDefaults && enabled == Defaults.ENABLED && includes == null && excludes == null) { return builder; } builder.startObject(contentType()); @@ -413,16 +355,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { if (includeDefaults || !Objects.equals(format, Defaults.FORMAT)) { builder.field("format", format); } - if (compress != null) { - builder.field("compress", compress); - } else if (includeDefaults) { - builder.field("compress", false); - } - if (compressThreshold != -1) { - builder.field("compress_threshold", new ByteSizeValue(compressThreshold).toString()); - } else if (includeDefaults) { - builder.field("compress_threshold", -1); - } if (includes != null) { builder.field("includes", includes); @@ -453,13 +385,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) { mergeResult.addConflict("Cannot update excludes setting for [_source]"); } - } else { - if (sourceMergeWith.compress != null) { - this.compress = sourceMergeWith.compress; - } - if (sourceMergeWith.compressThreshold != -1) { - this.compressThreshold = sourceMergeWith.compressThreshold; - } } } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java deleted file mode 100644 index 7c1875be550..00000000000 --- a/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper.source; - -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.test.ESSingleNodeTestCase; - -import static org.hamcrest.Matchers.equalTo; - -/** - * - */ -public class CompressSourceMappingTests extends ESSingleNodeTestCase { - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); - - public void testCompressDisabled() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("compress", false).endObject() - .endObject().endObject().string(); - - DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); - - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .field("field2", "value2") - .endObject().bytes()); - BytesRef bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(false)); - } - - public void testCompressEnabled() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("compress", true).endObject() - .endObject().endObject().string(); - - DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); - - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .field("field2", "value2") - .endObject().bytes()); - - BytesRef bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true)); - } - - public void testCompressThreshold() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("compress_threshold", "200b").endObject() - .endObject().endObject().string(); - - DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); - - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .endObject().bytes()); - - BytesRef bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(false)); - - doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .endObject().bytes()); - - bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true)); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java index 4ec0ff5211e..bbc1847dc08 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java @@ -84,32 +84,6 @@ public class DefaultSourceMappingTests extends ESSingleNodeTestCase { assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON)); } - public void testJsonFormatCompressedBackcompat() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("format", "json").field("compress", true).endObject() - .endObject().endObject().string(); - - Settings backcompatSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); - DocumentMapperParser parser = createIndex("test", backcompatSettings).mapperService().documentMapperParser(); - DocumentMapper documentMapper = parser.parse(mapping); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true)); - byte[] uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes(); - assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON)); - - documentMapper = parser.parse(mapping); - doc = documentMapper.parse("test", "type", "1", XContentFactory.smileBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true)); - uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes(); - assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON)); - } - public void testIncludes() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_source").field("includes", new String[]{"path1*"}).endObject() From 437488ae64f863daeda4ee5c9b1a53123eceb9be Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 10 Dec 2015 12:55:05 +0100 Subject: [PATCH 021/167] Remove the MissingQueryBuilder which was deprecated in 2.2.0. As a replacement use ExistsQueryBuilder inside a mustNot() clause. So instead of using `new ExistsQueryBuilder(name)` now use: `new BoolQueryBuilder().mustNot(new ExistsQueryBuilder(name))`. Closes #14112 --- .../classic/MapperQueryParser.java | 1 - .../classic/MissingFieldQueryExtension.java | 42 ---- .../index/query/MissingQueryBuilder.java | 234 ------------------ .../index/query/MissingQueryParser.java | 88 ------- .../index/query/QueryBuilders.java | 21 -- .../elasticsearch/indices/IndicesModule.java | 1 - .../elasticsearch/aliases/IndexAliasesIT.java | 2 +- .../BasicBackwardsCompatibilityIT.java | 17 -- .../OldIndexBackwardsCompatibilityIT.java | 7 - .../index/query/MissingQueryBuilderTests.java | 107 -------- .../query/QueryDSLDocumentationTests.java | 5 - .../template/SimpleIndexTemplateIT.java | 2 +- .../search/highlight/HighlighterSearchIT.java | 8 +- .../{ExistsMissingIT.java => ExistsIT.java} | 122 +++------ .../search/query/SearchQueryIT.java | 27 -- .../java-api/query-dsl/missing-query.asciidoc | 14 -- docs/reference/migration/migrate_3_0.asciidoc | 7 +- .../reference/query-dsl/exists-query.asciidoc | 20 +- .../query-dsl/missing-query.asciidoc | 132 ---------- 19 files changed, 63 insertions(+), 794 deletions(-) delete mode 100644 core/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java delete mode 100644 core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java delete mode 100644 core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java delete mode 100644 core/src/test/java/org/elasticsearch/index/query/MissingQueryBuilderTests.java rename core/src/test/java/org/elasticsearch/search/query/{ExistsMissingIT.java => ExistsIT.java} (54%) delete mode 100644 docs/java-api/query-dsl/missing-query.asciidoc delete mode 100644 docs/reference/query-dsl/missing-query.asciidoc diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index fce58d2f88f..9f2b1b66221 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -54,7 +54,6 @@ public class MapperQueryParser extends QueryParser { static { Map fieldQueryExtensions = new HashMap<>(); fieldQueryExtensions.put(ExistsFieldQueryExtension.NAME, new ExistsFieldQueryExtension()); - fieldQueryExtensions.put(MissingFieldQueryExtension.NAME, new MissingFieldQueryExtension()); FIELD_QUERY_EXTENSIONS = unmodifiableMap(fieldQueryExtensions); } diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java deleted file mode 100644 index f9fc8c9d5dc..00000000000 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.queryparser.classic; - -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.Query; -import org.elasticsearch.index.query.MissingQueryBuilder; -import org.elasticsearch.index.query.QueryShardContext; - -/** - * - */ -public class MissingFieldQueryExtension implements FieldQueryExtension { - - public static final String NAME = "_missing_"; - - @Override - public Query query(QueryShardContext context, String queryText) { - Query query = MissingQueryBuilder.newFilter(context, queryText, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE, MissingQueryBuilder.DEFAULT_NULL_VALUE); - if (query != null) { - return new ConstantScoreQuery(query); - } - return null; - } -} diff --git a/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java deleted file mode 100644 index 70d0bb9350f..00000000000 --- a/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermRangeQuery; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper; -import org.elasticsearch.index.mapper.object.ObjectMapper; - -import java.io.IOException; -import java.util.Collection; -import java.util.Objects; - -/** - * Constructs a filter that have only null values or no value in the original field. - */ -public class MissingQueryBuilder extends AbstractQueryBuilder { - - public static final String NAME = "missing"; - - public static final boolean DEFAULT_NULL_VALUE = false; - - public static final boolean DEFAULT_EXISTENCE_VALUE = true; - - private final String fieldPattern; - - private final boolean nullValue; - - private final boolean existence; - - static final MissingQueryBuilder PROTOTYPE = new MissingQueryBuilder("field", DEFAULT_NULL_VALUE, DEFAULT_EXISTENCE_VALUE); - - /** - * Constructs a filter that returns documents with only null values or no value in the original field. - * @param fieldPattern the field to query - * @param nullValue should the missing filter automatically include fields with null value configured in the - * mappings. Defaults to false. - * @param existence should the missing filter include documents where the field doesn't exist in the docs. - * Defaults to true. - * @throws IllegalArgumentException when both existence and nullValue are set to false - */ - public MissingQueryBuilder(String fieldPattern, boolean nullValue, boolean existence) { - if (Strings.isEmpty(fieldPattern)) { - throw new IllegalArgumentException("missing query must be provided with a [field]"); - } - if (nullValue == false && existence == false) { - throw new IllegalArgumentException("missing query must have either 'existence', or 'null_value', or both set to true"); - } - this.fieldPattern = fieldPattern; - this.nullValue = nullValue; - this.existence = existence; - } - - public MissingQueryBuilder(String fieldPattern) { - this(fieldPattern, DEFAULT_NULL_VALUE, DEFAULT_EXISTENCE_VALUE); - } - - public String fieldPattern() { - return this.fieldPattern; - } - - /** - * Returns true if the missing filter will include documents where the field contains a null value, otherwise - * these documents will not be included. - */ - public boolean nullValue() { - return this.nullValue; - } - - /** - * Returns true if the missing filter will include documents where the field has no values, otherwise - * these documents will not be included. - */ - public boolean existence() { - return this.existence; - } - - @Override - protected void doXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(NAME); - builder.field(MissingQueryParser.FIELD_FIELD.getPreferredName(), fieldPattern); - builder.field(MissingQueryParser.NULL_VALUE_FIELD.getPreferredName(), nullValue); - builder.field(MissingQueryParser.EXISTENCE_FIELD.getPreferredName(), existence); - printBoostAndQueryName(builder); - builder.endObject(); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - protected Query doToQuery(QueryShardContext context) throws IOException { - return newFilter(context, fieldPattern, existence, nullValue); - } - - public static Query newFilter(QueryShardContext context, String fieldPattern, boolean existence, boolean nullValue) { - if (!existence && !nullValue) { - throw new QueryShardException(context, "missing must have either existence, or null_value, or both set to true"); - } - - final FieldNamesFieldMapper.FieldNamesFieldType fieldNamesFieldType = (FieldNamesFieldMapper.FieldNamesFieldType) context.getMapperService().fullName(FieldNamesFieldMapper.NAME); - if (fieldNamesFieldType == null) { - // can only happen when no types exist, so no docs exist either - return Queries.newMatchNoDocsQuery(); - } - - ObjectMapper objectMapper = context.getObjectMapper(fieldPattern); - if (objectMapper != null) { - // automatic make the object mapper pattern - fieldPattern = fieldPattern + ".*"; - } - - Collection fields = context.simpleMatchToIndexNames(fieldPattern); - if (fields.isEmpty()) { - if (existence) { - // if we ask for existence of fields, and we found none, then we should match on all - return Queries.newMatchAllQuery(); - } - return null; - } - - Query existenceFilter = null; - Query nullFilter = null; - - if (existence) { - BooleanQuery.Builder boolFilter = new BooleanQuery.Builder(); - for (String field : fields) { - MappedFieldType fieldType = context.fieldMapper(field); - Query filter = null; - if (fieldNamesFieldType.isEnabled()) { - final String f; - if (fieldType != null) { - f = fieldType.names().indexName(); - } else { - f = field; - } - filter = fieldNamesFieldType.termQuery(f, context); - } - // if _field_names are not indexed, we need to go the slow way - if (filter == null && fieldType != null) { - filter = fieldType.rangeQuery(null, null, true, true); - } - if (filter == null) { - filter = new TermRangeQuery(field, null, null, true, true); - } - boolFilter.add(filter, BooleanClause.Occur.SHOULD); - } - - existenceFilter = boolFilter.build(); - existenceFilter = Queries.not(existenceFilter);; - } - - if (nullValue) { - for (String field : fields) { - MappedFieldType fieldType = context.fieldMapper(field); - if (fieldType != null) { - nullFilter = fieldType.nullValueQuery(); - } - } - } - - Query filter; - if (nullFilter != null) { - if (existenceFilter != null) { - filter = new BooleanQuery.Builder() - .add(existenceFilter, BooleanClause.Occur.SHOULD) - .add(nullFilter, BooleanClause.Occur.SHOULD) - .build(); - } else { - filter = nullFilter; - } - } else { - filter = existenceFilter; - } - - if (filter == null) { - return null; - } - - return new ConstantScoreQuery(filter); - } - - @Override - protected MissingQueryBuilder doReadFrom(StreamInput in) throws IOException { - return new MissingQueryBuilder(in.readString(), in.readBoolean(), in.readBoolean()); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - out.writeString(fieldPattern); - out.writeBoolean(nullValue); - out.writeBoolean(existence); - } - - @Override - protected int doHashCode() { - return Objects.hash(fieldPattern, nullValue, existence); - } - - @Override - protected boolean doEquals(MissingQueryBuilder other) { - return Objects.equals(fieldPattern, other.fieldPattern) && - Objects.equals(nullValue, other.nullValue) && - Objects.equals(existence, other.existence); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java deleted file mode 100644 index 467971b65ca..00000000000 --- a/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; - -/** - * Parser for missing query - */ -public class MissingQueryParser implements QueryParser { - - public static final ParseField FIELD_FIELD = new ParseField("field"); - public static final ParseField NULL_VALUE_FIELD = new ParseField("null_value"); - public static final ParseField EXISTENCE_FIELD = new ParseField("existence"); - - @Override - public String[] names() { - return new String[]{MissingQueryBuilder.NAME}; - } - - @Override - public MissingQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { - XContentParser parser = parseContext.parser(); - - String fieldPattern = null; - String queryName = null; - float boost = AbstractQueryBuilder.DEFAULT_BOOST; - boolean nullValue = MissingQueryBuilder.DEFAULT_NULL_VALUE; - boolean existence = MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE; - - XContentParser.Token token; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if (parseContext.parseFieldMatcher().match(currentFieldName, FIELD_FIELD)) { - fieldPattern = parser.text(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, NULL_VALUE_FIELD)) { - nullValue = parser.booleanValue(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, EXISTENCE_FIELD)) { - existence = parser.booleanValue(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { - queryName = parser.text(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { - boost = parser.floatValue(); - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]"); - } - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"); - } - } - - if (fieldPattern == null) { - throw new ParsingException(parser.getTokenLocation(), "missing must be provided with a [field]"); - } - return new MissingQueryBuilder(fieldPattern, nullValue, existence) - .boost(boost) - .queryName(queryName); - } - - @Override - public MissingQueryBuilder getBuilderPrototype() { - return MissingQueryBuilder.PROTOTYPE; - } -} diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index 45f97d68c3f..3fb09679204 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -810,27 +810,6 @@ public abstract class QueryBuilders { return new ExistsQueryBuilder(name); } - /** - * A filter to filter only documents where a field does not exists in them. - * @param name the field to query - */ - public static MissingQueryBuilder missingQuery(String name) { - return missingQuery(name, MissingQueryBuilder.DEFAULT_NULL_VALUE, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE); - } - - /** - * A filter to filter only documents where a field does not exists in them. - * @param name the field to query - * @param nullValue should the missing filter automatically include fields with null value configured in the - * mappings. Defaults to false. - * @param existence should the missing filter include documents where the field doesn't exist in the docs. - * Defaults to true. - * @throws IllegalArgumentException when both existence and nullValue are set to false - */ - public static MissingQueryBuilder missingQuery(String name, boolean nullValue, boolean existence) { - return new MissingQueryBuilder(name, nullValue, existence); - } - private QueryBuilders() { } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index cdd7f050331..6878002c015 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -120,7 +120,6 @@ public class IndicesModule extends AbstractModule { registerQueryParser(GeohashCellQuery.Parser.class); registerQueryParser(GeoPolygonQueryParser.class); registerQueryParser(ExistsQueryParser.class); - registerQueryParser(MissingQueryParser.class); registerQueryParser(MatchNoneQueryParser.class); if (ShapesAvailability.JTS_AVAILABLE) { diff --git a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java index 81b09c8c8e2..76759394d31 100644 --- a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -865,7 +865,7 @@ public class IndexAliasesIT extends ESIntegTestCase { assertAcked(prepareCreate("test") .addMapping("type", "field", "type=string") .addAlias(new Alias("alias1")) - .addAlias(new Alias("alias2").filter(QueryBuilders.missingQuery("field"))) + .addAlias(new Alias("alias2").filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("field")))) .addAlias(new Alias("alias3").indexRouting("index").searchRouting("search"))); checkAliases(); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java index fbc6cc58067..95735b8648f 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java @@ -71,7 +71,6 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.existsQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -440,25 +439,9 @@ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase { countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("obj1")).get(); assertHitCount(countResponse, 2l); - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("field1")).get(); - assertHitCount(countResponse, 2l); - - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("field1")).get(); - assertHitCount(countResponse, 2l); - - countResponse = client().prepareSearch().setSize(0).setQuery(constantScoreQuery(missingQuery("field1"))).get(); - assertHitCount(countResponse, 2l); - countResponse = client().prepareSearch().setSize(0).setQuery(queryStringQuery("_missing_:field1")).get(); assertHitCount(countResponse, 2l); - // wildcard check - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("x*")).get(); - assertHitCount(countResponse, 2l); - - // object check - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("obj1")).get(); - assertHitCount(countResponse, 2l); if (!backwardsCluster().upgradeOneNode()) { break; } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 137c6c5b2c2..eb0d17d22c9 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -341,13 +341,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { searchRsp = searchReq.get(); ElasticsearchAssertions.assertNoFailures(searchRsp); assertEquals(numDocs, searchRsp.getHits().getTotalHits()); - - logger.info("--> testing missing filter"); - // the field for the missing filter here needs to be different than the exists filter above, to avoid being found in the cache - searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.missingQuery("long_sort")); - searchRsp = searchReq.get(); - ElasticsearchAssertions.assertNoFailures(searchRsp); - assertEquals(0, searchRsp.getHits().getTotalHits()); } void assertBasicAggregationWorks(String indexName) { diff --git a/core/src/test/java/org/elasticsearch/index/query/MissingQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MissingQueryBuilderTests.java deleted file mode 100644 index 253509b2fc6..00000000000 --- a/core/src/test/java/org/elasticsearch/index/query/MissingQueryBuilderTests.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.apache.lucene.search.Query; - -import java.io.IOException; - -import static org.hamcrest.Matchers.containsString; - -public class MissingQueryBuilderTests extends AbstractQueryTestCase { - - @Override - protected MissingQueryBuilder doCreateTestQueryBuilder() { - String fieldName = randomBoolean() ? randomFrom(MAPPED_FIELD_NAMES) : randomAsciiOfLengthBetween(1, 10); - Boolean existence = randomBoolean(); - Boolean nullValue = randomBoolean(); - if (existence == false && nullValue == false) { - if (randomBoolean()) { - existence = true; - } else { - nullValue = true; - } - } - return new MissingQueryBuilder(fieldName, nullValue, existence); - } - - @Override - protected void doAssertLuceneQuery(MissingQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { - // too many mapping dependent cases to test, we don't want to end up - // duplication the toQuery method - } - - public void testIllegalArguments() { - try { - if (randomBoolean()) { - new MissingQueryBuilder("", true, true); - } else { - new MissingQueryBuilder(null, true, true); - } - fail("must not be null or empty"); - } catch (IllegalArgumentException e) { - // expected - } - - try { - new MissingQueryBuilder("fieldname", false, false); - fail("existence and nullValue cannot both be false"); - } catch (IllegalArgumentException e) { - // expected - } - - try { - new MissingQueryBuilder("fieldname", MissingQueryBuilder.DEFAULT_NULL_VALUE, false); - fail("existence and nullValue cannot both be false"); - } catch (IllegalArgumentException e) { - // expected - } - } - - public void testBothNullValueAndExistenceFalse() throws IOException { - QueryShardContext context = createShardContext(); - context.setAllowUnmappedFields(true); - try { - MissingQueryBuilder.newFilter(context, "field", false, false); - fail("Expected QueryShardException"); - } catch (QueryShardException e) { - assertThat(e.getMessage(), containsString("missing must have either existence, or null_value")); - } - } - - public void testFromJson() throws IOException { - String json = - "{\n" + - " \"missing\" : {\n" + - " \"field\" : \"user\",\n" + - " \"null_value\" : false,\n" + - " \"existence\" : true,\n" + - " \"boost\" : 1.0\n" + - " }\n" + - "}"; - - MissingQueryBuilder parsed = (MissingQueryBuilder) parseQuery(json); - checkGeneratedJson(json, parsed); - - assertEquals(json, false, parsed.nullValue()); - assertEquals(json, true, parsed.existence()); - assertEquals(json, "user", parsed.fieldPattern()); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java index 7ba1d11815a..028987448f0 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java @@ -58,7 +58,6 @@ import static org.elasticsearch.index.query.QueryBuilders.idsQuery; import static org.elasticsearch.index.query.QueryBuilders.indicesQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.moreLikeThisQuery; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.index.query.QueryBuilders.nestedQuery; @@ -240,10 +239,6 @@ public class QueryDSLDocumentationTests extends ESTestCase { matchQuery("name", "kimchy elasticsearch"); } - public void testMissing() { - missingQuery("user", true, true); - } - public void testMLT() { String[] fields = {"name.first", "name.last"}; String[] texts = {"text like this one"}; diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index ca2025ced1b..b32cfef76b6 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -560,7 +560,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .setOrder(0) .addAlias(new Alias("alias1")) .addAlias(new Alias("{index}-alias")) - .addAlias(new Alias("alias3").filter(QueryBuilders.missingQuery("test"))) + .addAlias(new Alias("alias3").filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("test")))) .addAlias(new Alias("alias4")).get(); client().admin().indices().preparePutTemplate("template2") diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java index 8a2506ec3ad..310ffff3007 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java @@ -58,7 +58,6 @@ import static org.elasticsearch.index.query.QueryBuilders.fuzzyQuery; import static org.elasticsearch.index.query.QueryBuilders.matchPhrasePrefixQuery; import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.index.query.QueryBuilders.prefixQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; @@ -67,6 +66,7 @@ import static org.elasticsearch.index.query.QueryBuilders.regexpQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.QueryBuilders.typeQuery; import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery; +import static org.elasticsearch.index.query.QueryBuilders.existsQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.highlight; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -2471,7 +2471,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(boolQuery() - .should(constantScoreQuery(QueryBuilders.missingQuery("field1"))) + .should(boolQuery().mustNot(constantScoreQuery(QueryBuilders.existsQuery("field1")))) .should(matchQuery("field1", "test")) .should(constantScoreQuery(queryStringQuery("field1:photo*")))) .highlighter(highlight().field("field1")); @@ -2501,7 +2501,9 @@ public class HighlighterSearchIT extends ESIntegTestCase { refresh(); logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource().query(boolQuery().must(queryStringQuery("field1:photo*")).filter(missingQuery("field_null"))) + SearchSourceBuilder source = searchSource().query(boolQuery() + .must(queryStringQuery("field1:photo*")) + .mustNot(existsQuery("field_null"))) .highlighter(highlight().field("field1")); SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The photography word will get highlighted")); diff --git a/core/src/test/java/org/elasticsearch/search/query/ExistsMissingIT.java b/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java similarity index 54% rename from core/src/test/java/org/elasticsearch/search/query/ExistsMissingIT.java rename to core/src/test/java/org/elasticsearch/search/query/ExistsIT.java index 349197d5f48..ae1f7ac5aa4 100644 --- a/core/src/test/java/org/elasticsearch/search/query/ExistsMissingIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java @@ -43,7 +43,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitC import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -public class ExistsMissingIT extends ESIntegTestCase { +public class ExistsIT extends ESIntegTestCase { // TODO: move this to a unit test somewhere... public void testEmptyIndex() throws Exception { @@ -51,50 +51,50 @@ public class ExistsMissingIT extends ESIntegTestCase { ensureYellow("test"); SearchResponse resp = client().prepareSearch("test").setQuery(QueryBuilders.existsQuery("foo")).execute().actionGet(); assertSearchResponse(resp); - resp = client().prepareSearch("test").setQuery(QueryBuilders.missingQuery("foo")).execute().actionGet(); + resp = client().prepareSearch("test").setQuery(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("foo"))).execute().actionGet(); assertSearchResponse(resp); } - public void testExistsMissing() throws Exception { + public void testExists() throws Exception { XContentBuilder mapping = XContentBuilder.builder(JsonXContent.jsonXContent) - .startObject() + .startObject() .startObject("type") - .startObject(FieldNamesFieldMapper.NAME) - .field("enabled", randomBoolean()) - .endObject() - .startObject("properties") - .startObject("foo") - .field("type", "string") - .endObject() - .startObject("bar") - .field("type", "object") - .startObject("properties") - .startObject("foo") - .field("type", "string") - .endObject() - .startObject("bar") - .field("type", "object") - .startObject("properties") - .startObject("bar") - .field("type", "string") - .endObject() - .endObject() - .endObject() - .startObject("baz") - .field("type", "long") - .endObject() - .endObject() - .endObject() - .endObject() + .startObject(FieldNamesFieldMapper.NAME) + .field("enabled", randomBoolean()) .endObject() - .endObject(); + .startObject("properties") + .startObject("foo") + .field("type", "string") + .endObject() + .startObject("bar") + .field("type", "object") + .startObject("properties") + .startObject("foo") + .field("type", "string") + .endObject() + .startObject("bar") + .field("type", "object") + .startObject("properties") + .startObject("bar") + .field("type", "string") + .endObject() + .endObject() + .endObject() + .startObject("baz") + .field("type", "long") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", mapping)); @SuppressWarnings("unchecked") Map barObject = new HashMap<>(); barObject.put("foo", "bar"); barObject.put("bar", singletonMap("bar", "foo")); - final Map[] sources = new Map[] { + final Map[] sources = new Map[]{ // simple property singletonMap("foo", "bar"), // object fields @@ -145,62 +145,6 @@ public class ExistsMissingIT extends ESIntegTestCase { } throw e; } - - // missing - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery(fieldName)).execute().actionGet(); - assertSearchResponse(resp); - assertEquals(String.format(Locale.ROOT, "missing(%s, %d) mapping: %s response: %s", fieldName, count, mapping.string(), resp), numDocs - count, resp.getHits().totalHits()); } } - - public void testNullValueUnset() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", "f", "type=string,index=not_analyzed")); - indexRandom(true, - client().prepareIndex("idx", "type", "1").setSource("f", "foo"), - client().prepareIndex("idx", "type", "2").setSource("f", null), - client().prepareIndex("idx", "type", "3").setSource("g", "bar"), - client().prepareIndex("idx", "type", "4").setSource("f", "bar")); - - SearchResponse resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, true)).get(); - assertSearchHits(resp, "2", "3"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, true)).get(); - assertSearchHits(resp, "2", "3"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, false)).get(); - assertSearchHits(resp); - - try { - client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, false)).get(); - fail("both existence and null_value can't be false"); - } catch (IllegalArgumentException e) { - // expected - } - } - - public void testNullValueSet() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", "f", "type=string,index=not_analyzed,null_value=bar")); - indexRandom(true, - client().prepareIndex("idx", "type", "1").setSource("f", "foo"), - client().prepareIndex("idx", "type", "2").setSource("f", null), - client().prepareIndex("idx", "type", "3").setSource("g", "bar"), - client().prepareIndex("idx", "type", "4").setSource("f", "bar")); - - SearchResponse resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, true)).get(); - assertSearchHits(resp, "2", "3", "4"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, true)).get(); - assertSearchHits(resp, "3"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, false)).get(); - assertSearchHits(resp, "2", "4"); - - try { - client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, false)).get(); - fail("both existence and null_value can't be false"); - } catch (IllegalArgumentException e) { - // expected - } - } - } diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 61a237c1a93..9918d449657 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -70,7 +70,6 @@ import static org.elasticsearch.index.query.QueryBuilders.idsQuery; import static org.elasticsearch.index.query.QueryBuilders.indicesQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.index.query.QueryBuilders.prefixQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; @@ -805,32 +804,6 @@ public class SearchQueryIT extends ESIntegTestCase { searchResponse = client().prepareSearch().setQuery(existsQuery("obj1")).get(); assertHitCount(searchResponse, 2l); assertSearchHits(searchResponse, "1", "2"); - - searchResponse = client().prepareSearch().setQuery(missingQuery("field1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - searchResponse = client().prepareSearch().setQuery(missingQuery("field1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - searchResponse = client().prepareSearch().setQuery(constantScoreQuery(missingQuery("field1"))).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - searchResponse = client().prepareSearch().setQuery(queryStringQuery("_missing_:field1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - // wildcard check - searchResponse = client().prepareSearch().setQuery(missingQuery("x*")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - // object check - searchResponse = client().prepareSearch().setQuery(missingQuery("obj1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); } public void testPassQueryOrFilterAsJSONString() throws Exception { diff --git a/docs/java-api/query-dsl/missing-query.asciidoc b/docs/java-api/query-dsl/missing-query.asciidoc deleted file mode 100644 index 00086cf737b..00000000000 --- a/docs/java-api/query-dsl/missing-query.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -[[java-query-dsl-missing-query]] -==== Missing Query - -See {ref}/query-dsl-missing-query.html[Missing Query] - -[source,java] --------------------------------------------------- -QueryBuilder qb = missingQuery("user", <1> - true, <2> - true); <3> --------------------------------------------------- -<1> field -<2> find missing field with an explicit `null` value -<3> find missing field that doesn’t exist diff --git a/docs/reference/migration/migrate_3_0.asciidoc b/docs/reference/migration/migrate_3_0.asciidoc index b8683bc6fd0..c649c679352 100644 --- a/docs/reference/migration/migrate_3_0.asciidoc +++ b/docs/reference/migration/migrate_3_0.asciidoc @@ -411,9 +411,9 @@ Use the `field(String, float)` method instead. ==== MissingQueryBuilder -The two individual setters for existence() and nullValue() were removed in favour of -optional constructor settings in order to better capture and validate their interdependent -settings at construction time. +The MissingQueryBuilder which was deprecated in 2.2.0 is removed. As a replacement use ExistsQueryBuilder +inside a mustNot() clause. So instead of using `new ExistsQueryBuilder(name)` now use +`new BoolQueryBuilder().mustNot(new ExistsQueryBuilder(name))`. ==== NotQueryBuilder @@ -491,3 +491,4 @@ from `OsStats.Cpu#getPercent`. === Fields option Only stored fields are retrievable with this option. The fields option won't be able to load non stored fields from _source anymore. + diff --git a/docs/reference/query-dsl/exists-query.asciidoc b/docs/reference/query-dsl/exists-query.asciidoc index 0ae3bf28ffa..404dce4a4ae 100644 --- a/docs/reference/query-dsl/exists-query.asciidoc +++ b/docs/reference/query-dsl/exists-query.asciidoc @@ -38,7 +38,7 @@ These documents would *not* match the above query: <3> The `user` field is missing completely. [float] -===== `null_value` mapping +==== `null_value` mapping If the field mapping includes the <> setting then explicit `null` values are replaced with the specified `null_value`. For @@ -70,3 +70,21 @@ no values in the `user` field and thus would not match the `exists` filter: { "foo": "bar" } -------------------------------------------------- +==== `missing` query + +'missing' query has been removed because it can be advantageously replaced by an `exists` query inside a must_not +clause as follows: + +[source,js] +-------------------------------------------------- +"bool": { + "must_not": { + "exists": { + "field": "user" + } + } +} +-------------------------------------------------- + +This query returns documents that have no value in the user field. + diff --git a/docs/reference/query-dsl/missing-query.asciidoc b/docs/reference/query-dsl/missing-query.asciidoc deleted file mode 100644 index 648da068189..00000000000 --- a/docs/reference/query-dsl/missing-query.asciidoc +++ /dev/null @@ -1,132 +0,0 @@ -[[query-dsl-missing-query]] -=== Missing Query - -Returns documents that have only `null` values or no value in the original field: - -[source,js] --------------------------------------------------- -{ - "constant_score" : { - "filter" : { - "missing" : { "field" : "user" } - } - } -} --------------------------------------------------- - -For instance, the following docs would match the above filter: - -[source,js] --------------------------------------------------- -{ "user": null } -{ "user": [] } <1> -{ "user": [null] } <2> -{ "foo": "bar" } <3> --------------------------------------------------- -<1> This field has no values. -<2> This field has no non-`null` values. -<3> The `user` field is missing completely. - -These documents would *not* match the above filter: - -[source,js] --------------------------------------------------- -{ "user": "jane" } -{ "user": "" } <1> -{ "user": "-" } <2> -{ "user": ["jane"] } -{ "user": ["jane", null ] } <3> --------------------------------------------------- -<1> An empty string is a non-`null` value. -<2> Even though the `standard` analyzer would emit zero tokens, the original field is non-`null`. -<3> This field has one non-`null` value. - -[float] -==== `null_value` mapping - -If the field mapping includes a <> then explicit `null` values -are replaced with the specified `null_value`. For instance, if the `user` field were mapped -as follows: - -[source,js] --------------------------------------------------- - "user": { - "type": "string", - "null_value": "_null_" - } --------------------------------------------------- - -then explicit `null` values would be indexed as the string `_null_`, and the -the following docs would *not* match the `missing` filter: - -[source,js] --------------------------------------------------- -{ "user": null } -{ "user": [null] } --------------------------------------------------- - -However, these docs--without explicit `null` values--would still have -no values in the `user` field and thus would match the `missing` filter: - -[source,js] --------------------------------------------------- -{ "user": [] } -{ "foo": "bar" } --------------------------------------------------- - -[float] -===== `existence` and `null_value` parameters - -When the field being queried has a `null_value` mapping, then the behaviour of -the `missing` filter can be altered with the `existence` and `null_value` -parameters: - -[source,js] --------------------------------------------------- -{ - "constant_score" : { - "filter" : { - "missing" : { - "field" : "user", - "existence" : true, - "null_value" : false - } - } - } -} --------------------------------------------------- - - -`existence`:: -+ --- -When the `existence` parameter is set to `true` (the default), the missing -filter will include documents where the field has *no* values, ie: - -[source,js] --------------------------------------------------- -{ "user": [] } -{ "foo": "bar" } --------------------------------------------------- - -When set to `false`, these documents will not be included. --- - -`null_value`:: -+ --- -When the `null_value` parameter is set to `true`, the missing -filter will include documents where the field contains a `null` value, ie: - -[source,js] --------------------------------------------------- -{ "user": null } -{ "user": [null] } -{ "user": ["jane",null] } <1> --------------------------------------------------- -<1> Matches because the field contains a `null` value, even though it also contains a non-`null` value. - -When set to `false` (the default), these documents will not be included. --- - -NOTE: Either `existence` or `null_value` or both must be set to `true`. From 176a4fc8dabfc3868597e0040e93beca35b53db7 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 10 Dec 2015 14:19:19 +0100 Subject: [PATCH 022/167] Review ++ --- .../search/highlight/HighlighterSearchIT.java | 2 +- .../elasticsearch/search/query/ExistsIT.java | 60 +++++++++---------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java index 310ffff3007..63378baa721 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java @@ -2471,7 +2471,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(boolQuery() - .should(boolQuery().mustNot(constantScoreQuery(QueryBuilders.existsQuery("field1")))) + .should(boolQuery().mustNot(QueryBuilders.existsQuery("field1"))) .should(matchQuery("field1", "test")) .should(constantScoreQuery(queryStringQuery("field1:photo*")))) .highlighter(highlight().field("field1")); diff --git a/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java b/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java index ae1f7ac5aa4..73906b2ed83 100644 --- a/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java @@ -57,44 +57,44 @@ public class ExistsIT extends ESIntegTestCase { public void testExists() throws Exception { XContentBuilder mapping = XContentBuilder.builder(JsonXContent.jsonXContent) - .startObject() + .startObject() .startObject("type") - .startObject(FieldNamesFieldMapper.NAME) - .field("enabled", randomBoolean()) + .startObject(FieldNamesFieldMapper.NAME) + .field("enabled", randomBoolean()) + .endObject() + .startObject("properties") + .startObject("foo") + .field("type", "string") + .endObject() + .startObject("bar") + .field("type", "object") + .startObject("properties") + .startObject("foo") + .field("type", "string") + .endObject() + .startObject("bar") + .field("type", "object") + .startObject("properties") + .startObject("bar") + .field("type", "string") + .endObject() + .endObject() + .endObject() + .startObject("baz") + .field("type", "long") + .endObject() + .endObject() + .endObject() + .endObject() .endObject() - .startObject("properties") - .startObject("foo") - .field("type", "string") - .endObject() - .startObject("bar") - .field("type", "object") - .startObject("properties") - .startObject("foo") - .field("type", "string") - .endObject() - .startObject("bar") - .field("type", "object") - .startObject("properties") - .startObject("bar") - .field("type", "string") - .endObject() - .endObject() - .endObject() - .startObject("baz") - .field("type", "long") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject(); + .endObject(); assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", mapping)); @SuppressWarnings("unchecked") Map barObject = new HashMap<>(); barObject.put("foo", "bar"); barObject.put("bar", singletonMap("bar", "foo")); - final Map[] sources = new Map[]{ + final Map[] sources = new Map[] { // simple property singletonMap("foo", "bar"), // object fields From 8011e18880c071db32473bc1789dd054619d7233 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 10 Dec 2015 16:45:11 +0100 Subject: [PATCH 023/167] apply simple comments --- .../allocator/BalancedShardsAllocator.java | 7 +++--- .../settings/AbstractScopedSettings.java | 16 -------------- .../common/settings/ClusterSettings.java | 22 +++++++++++++++++++ .../common/settings/Setting.java | 1 + .../discovery/DiscoverySettings.java | 8 +++---- 5 files changed, 29 insertions(+), 25 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index da73cdc1455..e6dc9a65efd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -87,18 +87,17 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards @Inject public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSettings) { super(settings); - weightFunction = new WeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings)); + setWeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings)); setThreshold(THRESHOLD_SETTING.get(settings)); clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction); clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); } - public void setWeightFunction(float indexBalance, float shardBalanceFactor) { + private void setWeightFunction(float indexBalance, float shardBalanceFactor) { weightFunction = new WeightFunction(indexBalance, shardBalanceFactor); } - - public void setThreshold(float threshold) { + private void setThreshold(float threshold) { this.threshold = threshold; } diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index af82f97bf0c..7fc8cebd31b 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -128,22 +128,6 @@ public abstract class AbstractScopedSettings extends AbstractComponent { } } } - - try { - for (Map.Entry entry : newSettings.getAsMap().entrySet()) { - if (entry.getKey().startsWith("logger.")) { - String component = entry.getKey().substring("logger.".length()); - if ("_root".equals(component)) { - ESLoggerFactory.getRootLogger().setLevel(entry.getValue()); - } else { - ESLoggerFactory.getLogger(component).setLevel(entry.getValue()); - } - } - } - } catch (Exception e) { - logger.warn("failed to refresh settings for [{}]", e, "logger"); - } - return lastSettingsApplied = newSettings; } diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 602081a1d47..e7b7eb9960d 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.*; import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; @@ -48,6 +49,27 @@ public final class ClusterSettings extends AbstractScopedSettings { super(settings, settingsSet, Setting.Scope.CLUSTER); } + + @Override + public synchronized Settings applySettings(Settings newSettings) { + Settings settings = super.applySettings(newSettings); + try { + for (Map.Entry entry : settings.getAsMap().entrySet()) { + if (entry.getKey().startsWith("logger.")) { + String component = entry.getKey().substring("logger.".length()); + if ("_root".equals(component)) { + ESLoggerFactory.getRootLogger().setLevel(entry.getValue()); + } else { + ESLoggerFactory.getLogger(component).setLevel(entry.getValue()); + } + } + } + } catch (Exception e) { + logger.warn("failed to refresh settings for [{}]", e, "logger"); + } + return settings; + } + /** * Returns true if the settings is a logger setting. */ diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index d82a8cb1cd5..1f4e8ed04b1 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -45,6 +45,7 @@ public class Setting extends ToXContentToBytes { private final Scope scope; public Setting(String key, Function defaultValue, Function parser, boolean dynamic, Scope scope) { + assert parser.apply(defaultValue.apply(Settings.EMPTY)) != null || this.isGroupSetting(): "parser returned null"; this.key = key; this.defaultValue = defaultValue; this.parser = parser; diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java index 519a2330353..6689d9c8688 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java @@ -36,6 +36,9 @@ import java.util.EnumSet; */ public class DiscoverySettings extends AbstractComponent { + public final static int NO_MASTER_BLOCK_ID = 2; + public final static ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); + public final static ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); /** * sets the timeout for a complete publishing cycle, including both sending and committing. the master * will continute to process the next cluster state update after this time has elapsed @@ -50,11 +53,6 @@ public class DiscoverySettings extends AbstractComponent { public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, true, Setting.Scope.CLUSTER); public static final Setting PUBLISH_DIFF_ENABLE_SETTING = Setting.boolSetting("discovery.zen.publish_diff.enable", true, true, Setting.Scope.CLUSTER); - public final static int NO_MASTER_BLOCK_ID = 2; - - public final static ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); - public final static ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); - private volatile ClusterBlock noMasterBlock; private volatile TimeValue publishTimeout; From 86a18a08fb5b3d8b4f000c71b0f69acea80b0b6f Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 10 Dec 2015 18:01:00 +0100 Subject: [PATCH 024/167] Make SettingsUpdater less stateful --- .../settings/AbstractScopedSettings.java | 111 +++++++------ .../common/settings/Setting.java | 157 +++++++----------- .../common/settings/SettingTests.java | 103 +++++------- 3 files changed, 159 insertions(+), 212 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 7fc8cebd31b..e3de252e083 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -32,7 +32,7 @@ import java.util.function.Consumer; * This service offers transactional application of updates settings. */ public abstract class AbstractScopedSettings extends AbstractComponent { - private Settings lastSettingsApplied; + private Settings lastSettingsApplied = Settings.EMPTY; private final List settingUpdaters = new ArrayList<>(); private final Map> groupSettings = new HashMap<>(); private final Map> keySettings = new HashMap<>(); @@ -62,29 +62,22 @@ public abstract class AbstractScopedSettings extends AbstractComponent { * method will not change any settings but will fail if any of the settings can't be applied. */ public synchronized Settings dryRun(Settings settings) { - final Settings build = Settings.builder().put(this.settings).put(settings).build(); - try { - List exceptions = new ArrayList<>(); - for (SettingUpdater settingUpdater : settingUpdaters) { - try { - settingUpdater.prepareApply(build); - } catch (RuntimeException ex) { - exceptions.add(ex); - logger.debug("failed to prepareCommit settings for [{}]", ex, settingUpdater); - } - } - // here we are exhaustive and record all settings that failed. - ExceptionsHelper.rethrowAndSuppress(exceptions); - } finally { - for (SettingUpdater settingUpdater : settingUpdaters) { - try { - settingUpdater.rollback(); - } catch (Exception e) { - logger.error("failed to rollback settings for [{}]", e, settingUpdater); + final Settings current = Settings.builder().put(this.settings).put(settings).build(); + final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build(); + List exceptions = new ArrayList<>(); + for (SettingUpdater settingUpdater : settingUpdaters) { + try { + if (settingUpdater.hasChanged(current, previous)) { + settingUpdater.getValue(current, previous); } + } catch (RuntimeException ex) { + exceptions.add(ex); + logger.debug("failed to prepareCommit settings for [{}]", ex, settingUpdater); } } - return build; + // here we are exhaustive and record all settings that failed. + ExceptionsHelper.rethrowAndSuppress(exceptions); + return current; } /** @@ -99,34 +92,25 @@ public abstract class AbstractScopedSettings extends AbstractComponent { // nothing changed in the settings, ignore return newSettings; } - final Settings build = Settings.builder().put(this.settings).put(newSettings).build(); - boolean success = false; + final Settings current = Settings.builder().put(this.settings).put(newSettings).build(); + final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build(); try { + List applyRunnables = new ArrayList<>(); for (SettingUpdater settingUpdater : settingUpdaters) { try { - settingUpdater.prepareApply(build); + applyRunnables.add(settingUpdater.updater(current, previous)); } catch (Exception ex) { logger.warn("failed to prepareCommit settings for [{}]", ex, settingUpdater); throw ex; } } - for (SettingUpdater settingUpdater : settingUpdaters) { - settingUpdater.apply(); + for (Runnable settingUpdater : applyRunnables) { + settingUpdater.run(); } - success = true; } catch (Exception ex) { logger.warn("failed to apply settings", ex); throw ex; } finally { - if (success == false) { - for (SettingUpdater settingUpdater : settingUpdaters) { - try { - settingUpdater.rollback(); - } catch (Exception e) { - logger.error("failed to refresh settings for [{}]", e, settingUpdater); - } - } - } } return lastSettingsApplied = newSettings; } @@ -141,7 +125,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { if (setting != get(setting.getKey())) { throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]"); } - this.settingUpdaters.add(setting.newUpdater(consumer, logger, settings, predicate)); + this.settingUpdaters.add(setting.newUpdater(consumer, logger, predicate)); } /** @@ -159,7 +143,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { if (b != get(b.getKey())) { throw new IllegalArgumentException("Setting is not registered for key [" + b.getKey() + "]"); } - this.settingUpdaters.add(Setting.compoundUpdater(consumer, a, b, logger, settings)); + this.settingUpdaters.add(Setting.compoundUpdater(consumer, a, b, logger)); } /** @@ -176,24 +160,51 @@ public abstract class AbstractScopedSettings extends AbstractComponent { * Transactional interface to update settings. * @see Setting */ - public interface SettingUpdater { - /** - * Prepares applying the given settings to this updater. All the heavy lifting like parsing and validation - * happens in this method. Yet the actual setting should not be changed by this call. - * @param settings the settings to apply - * @return true if this updater will update a setting on calling {@link #apply()} otherwise false - */ - boolean prepareApply(Settings settings); + public interface SettingUpdater { /** - * Applies the settings passed to {@link #prepareApply(Settings)} + * Returns true if this updaters setting has changed with the current update + * @param current the current settings + * @param previous the previous setting + * @return true if this updaters setting has changed with the current update */ - void apply(); + boolean hasChanged(Settings current, Settings previous); /** - * Rolls back to the state before {@link #prepareApply(Settings)} was called. All internal prepared state is cleared after this call. + * Returns the instance value for the current settings. This method is stateless and idempotent. */ - void rollback(); + T getValue(Settings current, Settings previous); + + /** + * Applies the given value to the updater. This methods will actually run the update. + */ + void apply(T value, Settings current, Settings previous); + + /** + * Updates this updaters value if it has changed. + * @return true iff the value has been updated. + */ + default boolean apply(Settings current, Settings previous) { + if (hasChanged(current, previous)) { + T value = getValue(current, previous); + apply(value, current, previous); + return true; + } + return false; + } + + /** + * Returns a callable runnable that calls {@link #apply(Object, Settings, Settings)} if the settings + * actually changed. This allows to defer the update to a later point in time while keeping type safety. + * If the value didn't change the returned runnable is a noop. + */ + default Runnable updater(Settings current, Settings previous) { + if (hasChanged(current, previous)) { + T value = getValue(current, previous); + return () -> { apply(value, current, previous);}; + } + return () -> {}; + } } /** diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 1f4e8ed04b1..b164c906e7a 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.settings; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.ByteSizeValue; @@ -150,13 +151,13 @@ public class Setting extends ToXContentToBytes { INDEX; } - final AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings) { - return newUpdater(consumer, logger, settings, (s) -> {}); + final AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger) { + return newUpdater(consumer, logger, (s) -> {}); } - AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings, Consumer accept) { + AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Consumer accept) { if (isDynamic()) { - return new Updater(consumer, logger, settings, accept); + return new Updater(consumer, logger, accept); } else { throw new IllegalStateException("setting [" + getKey() + "] is not dynamic"); } @@ -166,39 +167,23 @@ public class Setting extends ToXContentToBytes { * this is used for settings that depend on each other... see {@link org.elasticsearch.common.settings.AbstractScopedSettings#addSettingsUpdateConsumer(Setting, Setting, BiConsumer)} and it's * usage for details. */ - static AbstractScopedSettings.SettingUpdater compoundUpdater(final BiConsumer consumer, final Setting aSettting, final Setting bSetting, ESLogger logger, Settings settings) { - final AtomicReference aRef = new AtomicReference<>(); - final AtomicReference bRef = new AtomicReference<>(); - final AbstractScopedSettings.SettingUpdater aSettingUpdater = aSettting.newUpdater(aRef::set, logger, settings); - final AbstractScopedSettings.SettingUpdater bSettingUpdater = bSetting.newUpdater(bRef::set, logger, settings); - return new AbstractScopedSettings.SettingUpdater() { - boolean aHasChanged = false; - boolean bHasChanged = false; + static AbstractScopedSettings.SettingUpdater> compoundUpdater(final BiConsumer consumer, final Setting aSettting, final Setting bSetting, ESLogger logger) { + final AbstractScopedSettings.SettingUpdater aSettingUpdater = aSettting.newUpdater(null, logger); + final AbstractScopedSettings.SettingUpdater bSettingUpdater = bSetting.newUpdater(null, logger); + return new AbstractScopedSettings.SettingUpdater>() { @Override - public boolean prepareApply(Settings settings) { - aHasChanged = aSettingUpdater.prepareApply(settings); - bHasChanged = bSettingUpdater.prepareApply(settings); - return aHasChanged || bHasChanged; + public boolean hasChanged(Settings current, Settings previous) { + return aSettingUpdater.hasChanged(current, previous) || bSettingUpdater.hasChanged(current, previous); } @Override - public void apply() { - aSettingUpdater.apply(); - bSettingUpdater.apply(); - if (aHasChanged || bHasChanged) { - consumer.accept(aRef.get(), bRef.get()); - } + public Tuple getValue(Settings current, Settings previous) { + return new Tuple<>(aSettingUpdater.getValue(current, previous), bSettingUpdater.getValue(current, previous)); } @Override - public void rollback() { - try { - aRef.set(null); - aSettingUpdater.rollback(); - } finally { - bRef.set(null); - bSettingUpdater.rollback(); - } + public void apply(Tuple value, Settings current, Settings previous) { + consumer.accept(value.v1(), value.v2()); } @Override @@ -209,63 +194,47 @@ public class Setting extends ToXContentToBytes { } - private class Updater implements AbstractScopedSettings.SettingUpdater { + private class Updater implements AbstractScopedSettings.SettingUpdater { private final Consumer consumer; private final ESLogger logger; private final Consumer accept; - private String value; - private boolean commitPending; - private String pendingValue; - private T valueInstance; - public Updater(Consumer consumer, ESLogger logger, Settings settings, Consumer accept) { + public Updater(Consumer consumer, ESLogger logger, Consumer accept) { this.consumer = consumer; this.logger = logger; - value = getRaw(settings); this.accept = accept; } - - public boolean prepareApply(Settings settings) { - final String newValue = getRaw(settings); - if (value.equals(newValue) == false) { - T inst = get(settings); - try { - accept.accept(inst); - } catch (Exception | AssertionError e) { - throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + value + "] to [" + getRaw(settings) + "]", e); - } - pendingValue = newValue; - valueInstance = inst; - commitPending = true; - - } else { - commitPending = false; - } - return commitPending; - } - - public void apply() { - if (commitPending) { - logger.info("update [{}] from [{}] to [{}]", key, value, pendingValue); - value = pendingValue; - consumer.accept(valueInstance); - } - commitPending = false; - valueInstance = null; - pendingValue = null; - } - - public void rollback() { - commitPending = false; - valueInstance = null; - pendingValue = null; - } - @Override public String toString() { return "Updater for: " + Setting.this.toString(); } + + @Override + public boolean hasChanged(Settings current, Settings previous) { + final String newValue = getRaw(current); + final String value = getRaw(previous); + return value.equals(newValue) == false; + } + + @Override + public T getValue(Settings current, Settings previous) { + final String newValue = getRaw(current); + final String value = getRaw(previous); + T inst = get(current); + try { + accept.accept(inst); + } catch (Exception | AssertionError e) { + throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + value + "] to [" + newValue + "]", e); + } + return inst; + } + + @Override + public void apply(T value, Settings current, Settings previous) { + logger.info("update [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current)); + consumer.accept(value); + } } @@ -329,43 +298,35 @@ public class Setting extends ToXContentToBytes { } @Override - public AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Settings settings, Consumer accept) { + public AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Consumer accept) { if (isDynamic() == false) { throw new IllegalStateException("setting [" + getKey() + "] is not dynamic"); } final Setting setting = this; - return new AbstractScopedSettings.SettingUpdater() { - private Settings pendingSettings; - private Settings committedSettings = get(settings); + return new AbstractScopedSettings.SettingUpdater() { @Override - public boolean prepareApply(Settings settings) { - Settings currentSettings = get(settings); - if (currentSettings.equals(committedSettings) == false) { - try { - accept.accept(currentSettings); - } catch (Exception | AssertionError e) { - throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + committedSettings.getAsMap() + "] to [" + currentSettings.getAsMap() + "]", e); - } - pendingSettings = currentSettings; - return true; - } else { - return false; - } + public boolean hasChanged(Settings current, Settings previous) { + Settings currentSettings = get(current); + Settings previousSettings = get(previous); + return currentSettings.equals(previousSettings) == false; } @Override - public void apply() { - if (pendingSettings != null) { - consumer.accept(pendingSettings); - committedSettings = pendingSettings; + public Settings getValue(Settings current, Settings previous) { + Settings currentSettings = get(current); + Settings previousSettings = get(previous); + try { + accept.accept(currentSettings); + } catch (Exception | AssertionError e) { + throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + previousSettings.getAsMap() + "] to [" + currentSettings.getAsMap() + "]", e); } - pendingSettings = null; + return currentSettings; } @Override - public void rollback() { - pendingSettings = null; + public void apply(Settings value, Settings current, Settings previous) { + consumer.accept(value); } @Override diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 3f048e454a5..d8ac616eccb 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.settings; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; @@ -42,38 +43,33 @@ public class SettingTests extends ESTestCase { ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); assertEquals(byteSizeValue.bytes(), 1024); AtomicReference value = new AtomicReference<>(null); - ClusterSettings.SettingUpdater settingUpdater = byteSizeValueSetting.newUpdater(value::set, logger, Settings.EMPTY); + ClusterSettings.SettingUpdater settingUpdater = byteSizeValueSetting.newUpdater(value::set, logger); try { - settingUpdater.prepareApply(Settings.builder().put("a.byte.size", 12).build()); + settingUpdater.apply(Settings.builder().put("a.byte.size", 12).build(), Settings.EMPTY); fail("no unit"); } catch (ElasticsearchParseException ex) { assertEquals("failed to parse setting [a.byte.size] with value [12] as a size in bytes: unit is missing or unrecognized", ex.getMessage()); } - assertTrue(settingUpdater.prepareApply(Settings.builder().put("a.byte.size", "12b").build())); - settingUpdater.apply(); + assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "12b").build(), Settings.EMPTY)); assertEquals(new ByteSizeValue(12), value.get()); } public void testSimpleUpdate() { Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); AtomicReference atomicBoolean = new AtomicReference<>(null); - ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(atomicBoolean::set, logger, Settings.EMPTY); + ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(atomicBoolean::set, logger); Settings build = Settings.builder().put("foo.bar", false).build(); - settingUpdater.prepareApply(build); - assertNull(atomicBoolean.get()); - settingUpdater.rollback(); + settingUpdater.apply(build, Settings.EMPTY); assertNull(atomicBoolean.get()); build = Settings.builder().put("foo.bar", true).build(); - settingUpdater.prepareApply(build); - assertNull(atomicBoolean.get()); - settingUpdater.apply(); + settingUpdater.apply(build, Settings.EMPTY); assertTrue(atomicBoolean.get()); // try update bogus value build = Settings.builder().put("foo.bar", "I am not a boolean").build(); try { - settingUpdater.prepareApply(build); + settingUpdater.apply(build, Settings.EMPTY); fail("not a boolean"); } catch (IllegalArgumentException ex) { assertEquals("Failed to parse value [I am not a boolean] for setting [foo.bar]", ex.getMessage()); @@ -85,7 +81,7 @@ public class SettingTests extends ESTestCase { assertFalse(booleanSetting.isGroupSetting()); AtomicReference atomicBoolean = new AtomicReference<>(null); try { - booleanSetting.newUpdater(atomicBoolean::set, logger, Settings.EMPTY); + booleanSetting.newUpdater(atomicBoolean::set, logger); fail("not dynamic"); } catch (IllegalStateException ex) { assertEquals("setting [foo.bar] is not dynamic", ex.getMessage()); @@ -96,11 +92,9 @@ public class SettingTests extends ESTestCase { Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); AtomicReference ab1 = new AtomicReference<>(null); AtomicReference ab2 = new AtomicReference<>(null); - ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(ab1::set, logger, Settings.EMPTY); - settingUpdater.prepareApply(Settings.builder().put("foo.bar", true).build()); - assertNull(ab1.get()); - assertNull(ab2.get()); - settingUpdater.apply(); + ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(ab1::set, logger); + ClusterSettings.SettingUpdater settingUpdater2 = booleanSetting.newUpdater(ab2::set, logger); + settingUpdater.apply(Settings.builder().put("foo.bar", true).build(), Settings.EMPTY); assertTrue(ab1.get()); assertNull(ab2.get()); } @@ -124,40 +118,22 @@ public class SettingTests extends ESTestCase { assertFalse(setting.isGroupSetting()); ref.set(setting.get(Settings.EMPTY)); ComplexType type = ref.get(); - ClusterSettings.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY); - assertFalse(settingUpdater.prepareApply(Settings.EMPTY)); - settingUpdater.apply(); + ClusterSettings.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger); + assertFalse(settingUpdater.apply(Settings.EMPTY, Settings.EMPTY)); assertSame("no update - type has not changed", type, ref.get()); // change from default - assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.bar", "2").build())); - settingUpdater.apply(); + assertTrue(settingUpdater.apply(Settings.builder().put("foo.bar", "2").build(), Settings.EMPTY)); assertNotSame("update - type has changed", type, ref.get()); assertEquals("2", ref.get().foo); // change back to default... - assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.bar.baz", "2").build())); - settingUpdater.apply(); + assertTrue(settingUpdater.apply(Settings.EMPTY, Settings.builder().put("foo.bar", "2").build())); assertNotSame("update - type has changed", type, ref.get()); assertEquals("", ref.get().foo); } - public void testRollback() { - Setting integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.CLUSTER); - assertFalse(integerSetting.isGroupSetting()); - AtomicReference ref = new AtomicReference<>(null); - ClusterSettings.SettingUpdater settingUpdater = integerSetting.newUpdater(ref::set, logger, Settings.EMPTY); - assertNull(ref.get()); - assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.int.bar", "2").build())); - settingUpdater.rollback(); - settingUpdater.apply(); - assertNull(ref.get()); - assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.int.bar", "2").build())); - settingUpdater.apply(); - assertEquals(2, ref.get().intValue()); - } - public void testType() { Setting integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.CLUSTER); assertEquals(integerSetting.getScope(), Setting.Scope.CLUSTER); @@ -169,10 +145,11 @@ public class SettingTests extends ESTestCase { AtomicReference ref = new AtomicReference<>(null); Setting setting = Setting.groupSetting("foo.bar.", true, Setting.Scope.CLUSTER); assertTrue(setting.isGroupSetting()); - ClusterSettings.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY); + ClusterSettings.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger); - assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").put("foo.bar.3.value", "3").build())); - settingUpdater.apply(); + Settings currentInput = Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").put("foo.bar.3.value", "3").build(); + Settings previousInput = Settings.EMPTY; + assertTrue(settingUpdater.apply(currentInput, previousInput)); assertNotNull(ref.get()); Settings settings = ref.get(); Map asMap = settings.getAsGroups(); @@ -181,14 +158,16 @@ public class SettingTests extends ESTestCase { assertEquals(asMap.get("2").get("value"), "2"); assertEquals(asMap.get("3").get("value"), "3"); + previousInput = currentInput; + currentInput = Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").put("foo.bar.3.value", "3").build(); Settings current = ref.get(); - assertFalse(settingUpdater.prepareApply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").put("foo.bar.3.value", "3").build())); - settingUpdater.apply(); + assertFalse(settingUpdater.apply(currentInput, previousInput)); assertSame(current, ref.get()); + previousInput = currentInput; + currentInput = Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").build(); // now update and check that we got it - assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").build())); - settingUpdater.apply(); + assertTrue(settingUpdater.apply(currentInput, previousInput)); assertNotSame(current, ref.get()); asMap = ref.get().getAsGroups(); @@ -196,9 +175,10 @@ public class SettingTests extends ESTestCase { assertEquals(asMap.get("1").get("value"), "1"); assertEquals(asMap.get("2").get("value"), "2"); + previousInput = currentInput; + currentInput = Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "4").build(); // now update and check that we got it - assertTrue(settingUpdater.prepareApply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "4").build())); - settingUpdater.apply(); + assertTrue(settingUpdater.apply(currentInput, previousInput)); assertNotSame(current, ref.get()); asMap = ref.get().getAsGroups(); @@ -209,9 +189,9 @@ public class SettingTests extends ESTestCase { assertTrue(setting.match("foo.bar.baz")); assertFalse(setting.match("foo.baz.bar")); - ClusterSettings.SettingUpdater predicateSettingUpdater = setting.newUpdater(ref::set, logger, Settings.EMPTY, (s) -> assertFalse(true)); + ClusterSettings.SettingUpdater predicateSettingUpdater = setting.newUpdater(ref::set, logger,(s) -> assertFalse(true)); try { - predicateSettingUpdater.prepareApply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").build()); + predicateSettingUpdater.apply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").build(), Settings.EMPTY); fail("not accepted"); } catch (IllegalArgumentException ex) { assertEquals(ex.getMessage(), "illegal value can't update [foo.bar.] from [{}] to [{1.value=1, 2.value=2}]"); @@ -243,32 +223,27 @@ public class SettingTests extends ESTestCase { Composite c = new Composite(); Setting a = Setting.intSetting("foo.int.bar.a", 1, true, Setting.Scope.CLUSTER); Setting b = Setting.intSetting("foo.int.bar.b", 1, true, Setting.Scope.CLUSTER); - ClusterSettings.SettingUpdater settingUpdater = Setting.compoundUpdater(c::set, a, b, logger, Settings.EMPTY); - assertFalse(settingUpdater.prepareApply(Settings.EMPTY)); - settingUpdater.apply(); + ClusterSettings.SettingUpdater> settingUpdater = Setting.compoundUpdater(c::set, a, b, logger); + assertFalse(settingUpdater.apply(Settings.EMPTY, Settings.EMPTY)); assertNull(c.a); assertNull(c.b); Settings build = Settings.builder().put("foo.int.bar.a", 2).build(); - assertTrue(settingUpdater.prepareApply(build)); - settingUpdater.apply(); + assertTrue(settingUpdater.apply(build, Settings.EMPTY)); assertEquals(2, c.a.intValue()); - assertNull(c.b); + assertEquals(1, c.b.intValue()); Integer aValue = c.a; - assertFalse(settingUpdater.prepareApply(build)); - settingUpdater.apply(); + assertFalse(settingUpdater.apply(build, build)); assertSame(aValue, c.a); - + Settings previous = build; build = Settings.builder().put("foo.int.bar.a", 2).put("foo.int.bar.b", 5).build(); - assertTrue(settingUpdater.prepareApply(build)); - settingUpdater.apply(); + assertTrue(settingUpdater.apply(build, previous)); assertEquals(2, c.a.intValue()); assertEquals(5, c.b.intValue()); // reset to default - assertTrue(settingUpdater.prepareApply(Settings.EMPTY)); - settingUpdater.apply(); + assertTrue(settingUpdater.apply(Settings.EMPTY, build)); assertEquals(1, c.a.intValue()); assertEquals(1, c.b.intValue()); From 9e37322377315d7775d67a409e926009c1c9cba0 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 10 Dec 2015 18:02:51 +0100 Subject: [PATCH 025/167] add javadocs --- .../common/settings/AbstractScopedSettings.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index e3de252e083..ff57bcec2b5 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -120,12 +120,14 @@ public abstract class AbstractScopedSettings extends AbstractComponent { *

* Note: Only settings registered in {@link SettingsModule} can be changed dynamically. *

+ * @param validator an additional validator that is only applied to updates of this setting. + * This is useful to add additional validation to settings at runtime compared to at startup time. */ - public synchronized void addSettingsUpdateConsumer(Setting setting, Consumer consumer, Consumer predicate) { + public synchronized void addSettingsUpdateConsumer(Setting setting, Consumer consumer, Consumer validator) { if (setting != get(setting.getKey())) { throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]"); } - this.settingUpdaters.add(setting.newUpdater(consumer, logger, predicate)); + this.settingUpdaters.add(setting.newUpdater(consumer, logger, validator)); } /** From 8c7e142eb0fe6bf94a795b024fb334f5802c7921 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 10 Dec 2015 18:05:51 +0100 Subject: [PATCH 026/167] add javadocs --- .../java/org/elasticsearch/common/settings/Setting.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index b164c906e7a..d1be23d3d9b 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -45,6 +45,14 @@ public class Setting extends ToXContentToBytes { private final boolean dynamic; private final Scope scope; + /** + * Creates a new Setting instance + * @param key the settings key for this setting. + * @param defaultValue a default value function that returns the default values string representation. + * @param parser a parser that parses the string rep into a complex datatype. + * @param dynamic true iff this setting can be dynamically updateable + * @param scope the scope of this setting + */ public Setting(String key, Function defaultValue, Function parser, boolean dynamic, Scope scope) { assert parser.apply(defaultValue.apply(Settings.EMPTY)) != null || this.isGroupSetting(): "parser returned null"; this.key = key; From 3383c24be0a137929f7240edd8f984b0d3df5792 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 7 Dec 2015 18:46:26 -0500 Subject: [PATCH 027/167] Remove and forbid use of Collections#shuffle(List) and Random#() This commit removes and now forbids all uses of Collections#shuffle(List) and Random#() across the codebase. The rationale for removing and forbidding these methods is to increase test reproducibility. As these methods use non-reproducible seeds, production code and tests that rely on these methods contribute to non-reproducbility of tests. Instead of Collections#shuffle(List) the method Collections#shuffle(List, Random) can be used. All that is required then is a reproducible source of randomness. Consequently, the utility class Randomness has been added to assist in creating reproducible sources of randomness. Instead of Random#(), Random#(long) with a reproducible seed or the aforementioned Randomess class can be used. Closes #15287 --- .../resources/forbidden/all-signatures.txt | 3 + .../cluster/routing/RoutingNodes.java | 13 +- .../routing/allocation/AllocationService.java | 7 +- .../org/elasticsearch/common/Randomness.java | 132 ++++++++++++++++++ .../discovery/DiscoveryService.java | 9 +- .../elasticsearch/plugins/PluginManager.java | 7 +- .../lucene/queries/BlendedTermQueryTests.java | 21 +-- .../IndicesShardStoreResponseTests.java | 14 +- .../OldIndexBackwardsCompatibilityIT.java | 19 +-- .../node/DiscoveryNodeFiltersTests.java | 8 +- .../cluster/routing/DelayedAllocationIT.java | 2 +- .../allocation/AddIncrementallyTests.java | 3 +- .../NodeVersionAllocationDeciderTests.java | 11 +- .../common/lucene/LuceneTests.java | 16 +-- .../lucene/index/FreqTermsEnumTests.java | 19 +-- .../common/util/CollectionUtilsTests.java | 13 +- .../concurrent/PrioritizedExecutorsTests.java | 10 +- .../DiscoveryWithServiceDisruptionsIT.java | 6 +- .../zen/ElectMasterServiceTests.java | 8 +- .../zen/NodeJoinControllerTests.java | 6 +- .../discovery/zen/ZenPingTests.java | 2 +- .../PublishClusterStateActionTests.java | 30 +--- .../gateway/MetaDataStateFormatTests.java | 29 +--- .../mapper/all/SimpleAllMapperTests.java | 15 +- .../core/TokenCountFieldMapperTests.java | 8 +- .../mapper/multifield/MultiFieldTests.java | 2 +- .../index/store/CorruptedFileIT.java | 37 ++--- .../indices/recovery/RecoveryStateTests.java | 19 +-- .../recovery/TruncatedRecoveryIT.java | 3 +- .../suggest/CompletionSuggestSearchIT.java | 4 +- .../messy/tests/SimpleSortTests.java | 48 +------ .../cloud/aws/AwsEc2ServiceImpl.java | 3 +- .../elasticsearch/tribe/TribeUnitTests.java | 21 ++- .../org/elasticsearch/tribe/elasticsearch.yml | 4 +- .../test/ESAllocationTestCase.java | 3 +- .../elasticsearch/test/ESIntegTestCase.java | 4 +- .../org/elasticsearch/test/ESTestCase.java | 4 +- 37 files changed, 251 insertions(+), 312 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/common/Randomness.java diff --git a/buildSrc/src/main/resources/forbidden/all-signatures.txt b/buildSrc/src/main/resources/forbidden/all-signatures.txt index 4bc24c9a2af..c1e65cbaf22 100644 --- a/buildSrc/src/main/resources/forbidden/all-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/all-signatures.txt @@ -123,3 +123,6 @@ java.lang.System#getProperties() @ Use BootstrapInfo.getSystemProperties for a r java.util.Collections#EMPTY_LIST java.util.Collections#EMPTY_MAP java.util.Collections#EMPTY_SET + +java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness +java.util.Random#() @ Use org.elasticsearch.common.random.Randomness#create for reproducible sources of randomness diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index badf70a191e..5d17a59339a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -27,18 +27,11 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.index.shard.ShardId; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; import java.util.function.Predicate; /** @@ -671,7 +664,7 @@ public class RoutingNodes implements Iterable { } public void shuffle() { - Collections.shuffle(unassigned); + Randomness.shuffle(unassigned); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index feafb76a5f2..2268bf1d995 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -39,15 +39,12 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.Set; +import java.util.*; import java.util.function.Function; import java.util.stream.Collectors; diff --git a/core/src/main/java/org/elasticsearch/common/Randomness.java b/core/src/main/java/org/elasticsearch/common/Randomness.java new file mode 100644 index 00000000000..770b6554cf6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/Randomness.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +import org.elasticsearch.common.settings.Settings; + +import java.lang.reflect.Method; +import java.security.SecureRandom; +import java.util.Collections; +import java.util.List; +import java.util.Random; + +/** + * Provides factory methods for producing reproducible sources of + * randomness. Reproducible sources of randomness contribute to + * reproducible tests. When running the Elasticsearch test suite, the + * test runner will establish a global random seed accessible via the + * system property "tests.seed". By seeding a random number generator + * with this global seed, we ensure that instances of Random produced + * with this class produce reproducible sources of randomness under + * when running under the Elasticsearch test suite. Alternatively, + * a reproducible source of randomness can be produced by providing a + * setting a reproducible seed. When running the Elasticsearch server + * process, non-reproducible sources of randomness are provided (unless + * a setting is provided for a module that exposes a seed setting (e.g., + * DiscoveryService#SETTING_DISCOVERY_SEED)). + */ +public final class Randomness { + private static final SecureRandom SR = new SecureRandom(); + private static final Method currentMethod; + private static final Method getRandomMethod; + + static { + Method maybeCurrentMethod; + Method maybeGetRandomMethod; + try { + Class clazz = Class.forName("com.carrotsearch.randomizedtesting.RandomizedContext"); + maybeCurrentMethod = clazz.getMethod("current"); + maybeGetRandomMethod = clazz.getMethod("getRandom"); + } catch (Throwable t) { + maybeCurrentMethod = null; + maybeGetRandomMethod = null; + } + currentMethod = maybeCurrentMethod; + getRandomMethod = maybeGetRandomMethod; + } + + private Randomness() {} + + /** + * Provides a reproducible source of randomness seeded by a long + * seed in the settings with the key setting. + * + * @param settings the settings containing the seed + * @param setting the key to access the seed + * @return a reproducible source of randomness + */ + public static Random get(Settings settings, String setting) { + Long maybeSeed = settings.getAsLong(setting, null); + if (maybeSeed != null) { + return new Random(maybeSeed); + } else { + return get(); + } + } + + /** + * Provides a source of randomness that is reproducible when + * running under the Elasticsearch test suite, and otherwise + * produces a non-reproducible source of randomness. Reproducible + * sources of randomness are created when the system property + * "tests.seed" is set and the security policy allows reading this + * system property. Otherwise, non-reproducible sources of + * randomness are created. + * + * @return a source of randomness + * @throws IllegalStateException if running tests but was not able + * to acquire an instance of Random from + * RandomizedContext or tests are + * running but tests.seed is not set + */ + public static Random get() { + if (currentMethod != null && getRandomMethod != null) { + try { + Object randomizedContext = currentMethod.invoke(null); + return (Random) getRandomMethod.invoke(randomizedContext); + } catch (ReflectiveOperationException e) { + // unexpected, bail + throw new IllegalStateException("running tests but failed to invoke RandomizedContext#getRandom", e); + } + } else { + return getWithoutSeed(); + } + } + + private static ThreadLocal LOCAL; + + private static Random getWithoutSeed() { + assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random"; + if (LOCAL.get() == null) { + byte[] bytes = SR.generateSeed(8); + long accumulator = 0; + for (int i = 0; i < bytes.length; i++) { + accumulator = (accumulator << 8) + bytes[i] & 0xFFL; + } + final long seed = accumulator; + LOCAL = ThreadLocal.withInitial(() -> new Random(seed)); + } + return LOCAL.get(); + } + + public static void shuffle(List list) { + Collections.shuffle(list, get()); + } +} diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java index eeba9baa32a..a82099658ea 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java @@ -23,6 +23,7 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -41,7 +42,6 @@ public class DiscoveryService extends AbstractLifecycleComponent MODULES = unmodifiableSet(newHashSet( "lang-expression", "lang-groovy")); @@ -124,7 +125,7 @@ public class PluginManager { checkForForbiddenName(pluginHandle.name); } else { // if we have no name but url, use temporary name that will be overwritten later - pluginHandle = new PluginHandle("temp_name" + new Random().nextInt(), null, null); + pluginHandle = new PluginHandle("temp_name" + Randomness.get().nextInt(), null, null); } Path pluginFile = download(pluginHandle, terminal); @@ -224,7 +225,7 @@ public class PluginManager { PluginInfo info = PluginInfo.readFromProperties(root); terminal.println(VERBOSE, "%s", info); - // don't let luser install plugin as a module... + // don't let luser install plugin as a module... // they might be unavoidably in maven central and are packaged up the same way) if (MODULES.contains(info.getName())) { throw new IOException("plugin '" + info.getName() + "' cannot be installed like this, it is a system module"); diff --git a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java index 0f29ed5a2f7..a287ec119e7 100644 --- a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java +++ b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java @@ -23,19 +23,8 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.MultiReader; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.DisjunctionMaxQuery; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.QueryUtils; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.TopDocs; +import org.apache.lucene.index.*; +import org.apache.lucene.search.*; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.DefaultSimilarity; import org.apache.lucene.search.similarities.Similarity; @@ -44,11 +33,7 @@ import org.apache.lucene.util.TestUtil; import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java index c862583a3f7..cf197a27faf 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java @@ -26,20 +26,12 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.transport.DummyTransportAddress; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.*; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.NodeDisconnectedException; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import static org.hamcrest.Matchers.equalTo; @@ -119,7 +111,7 @@ public class IndicesShardStoreResponseTests extends ESTestCase { orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, new IOException("corrupted"))); List storeStatuses = new ArrayList<>(orderedStoreStatuses); - Collections.shuffle(storeStatuses); + Collections.shuffle(storeStatuses, random()); CollectionUtil.timSort(storeStatuses); assertThat(storeStatuses, equalTo(orderedStoreStatuses)); } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 137c6c5b2c2..297099e45ac 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -32,8 +32,6 @@ import org.elasticsearch.action.admin.indices.upgrade.UpgradeIT; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; @@ -62,23 +60,12 @@ import org.junit.Before; import java.io.IOException; import java.io.InputStream; -import java.nio.file.DirectoryStream; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; +import java.nio.file.*; import java.nio.file.attribute.BasicFileAttributes; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.SortedSet; -import java.util.TreeSet; +import java.util.*; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.greaterThanOrEqualTo; // needs at least 2 nodes since it bumps replicas to 1 @@ -271,7 +258,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { public void testOldIndexes() throws Exception { setupCluster(); - Collections.shuffle(indexes, getRandom()); + Collections.shuffle(indexes, random()); for (String index : indexes) { long startTime = System.currentTimeMillis(); logger.info("--> Testing old index " + index); diff --git a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java index ef548140192..9a91e1cd562 100644 --- a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java @@ -29,11 +29,7 @@ import org.junit.BeforeClass; import java.net.InetAddress; import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; @@ -246,7 +242,7 @@ public class DiscoveryNodeFiltersTests extends ESTestCase { private Settings shuffleSettings(Settings source) { Settings.Builder settings = Settings.settingsBuilder(); List keys = new ArrayList<>(source.getAsMap().keySet()); - Collections.shuffle(keys, getRandom()); + Collections.shuffle(keys, random()); for (String o : keys) { settings.put(o, source.getAsMap().get(o)); } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index c236ea54878..2d704380ae0 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -170,7 +170,7 @@ public class DelayedAllocationIT extends ESIntegTestCase { private String findNodeWithShard() { ClusterState state = client().admin().cluster().prepareState().get().getState(); List startedShards = state.routingTable().shardsWithState(ShardRoutingState.STARTED); - Collections.shuffle(startedShards, getRandom()); + Collections.shuffle(startedShards,random()); return state.nodes().get(startedShards.get(0).currentNodeId()).getName(); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index d10912e69db..ee8bd067008 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -388,7 +387,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { logger.info("Removing [{}] nodes", numNodes); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); ArrayList discoveryNodes = CollectionUtils.iterableAsArrayList(clusterState.nodes()); - Collections.shuffle(discoveryNodes, getRandom()); + Collections.shuffle(discoveryNodes, random()); for (DiscoveryNode node : discoveryNodes) { nodes.remove(node.id()); numNodes--; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 62501cbf9fb..2fe1d85b1f4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -39,15 +39,10 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; -import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; -import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; +import static org.elasticsearch.cluster.routing.ShardRoutingState.*; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.VersionUtils.randomVersion; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; /** * @@ -199,7 +194,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); int numNodes = between(1, 20); if (nodes.size() > numNodes) { - Collections.shuffle(nodes, getRandom()); + Collections.shuffle(nodes, random()); nodes = nodes.subList(0, numNodes); } else { for (int j = nodes.size(); j < numNodes; j++) { diff --git a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index fcd2f7d8767..17345fd714f 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -17,20 +17,14 @@ * under the License. */ package org.elasticsearch.common.lucene; + import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.NoDeletionPolicy; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.index.Term; +import org.apache.lucene.index.*; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; @@ -41,11 +35,7 @@ import org.apache.lucene.util.Version; import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java index 96715a05b3c..ad811a38aed 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java @@ -24,13 +24,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.Term; +import org.apache.lucene.index.*; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -42,14 +36,7 @@ import org.elasticsearch.test.ESTestCase; import org.junit.After; import org.junit.Before; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -201,7 +188,7 @@ public class FreqTermsEnumTests extends ESTestCase { for (int i = 0; i < cycles; i++) { List terms = new ArrayList<>(Arrays.asList(this.terms)); - Collections.shuffle(terms, getRandom()); + Collections.shuffle(terms, random()); for (String term : terms) { if (!termsEnum.seekExact(new BytesRef(term))) { assertThat("term : " + term, reference.get(term).docFreq, is(0)); diff --git a/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java b/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java index fe9ba6b1fcf..4c3612da8e0 100644 --- a/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java @@ -25,14 +25,7 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.Counter; import org.elasticsearch.test.ESTestCase; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.SortedSet; -import java.util.TreeSet; +import java.util.*; import static org.elasticsearch.common.util.CollectionUtils.eagerPartition; import static org.hamcrest.Matchers.equalTo; @@ -80,7 +73,7 @@ public class CollectionUtilsTests extends ESTestCase { array.append(new BytesRef(s)); } if (randomBoolean()) { - Collections.shuffle(tmpList, getRandom()); + Collections.shuffle(tmpList, random()); for (BytesRef ref : tmpList) { array.append(ref); } @@ -111,7 +104,7 @@ public class CollectionUtilsTests extends ESTestCase { array.append(new BytesRef(s)); } if (randomBoolean()) { - Collections.shuffle(values, getRandom()); + Collections.shuffle(values, random()); } int[] indices = new int[array.size()]; for (int i = 0; i < indices.length; i++) { diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java index 1d2d2141166..deac15b50d3 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java @@ -27,13 +27,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.PriorityBlockingQueue; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.TimeUnit; +import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.equalTo; @@ -46,7 +40,7 @@ public class PrioritizedExecutorsTests extends ESTestCase { public void testPriorityQueue() throws Exception { PriorityBlockingQueue queue = new PriorityBlockingQueue<>(); List priorities = Arrays.asList(Priority.values()); - Collections.shuffle(priorities); + Collections.shuffle(priorities, random()); for (Priority priority : priorities) { queue.add(priority); diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index b14792a2c33..f9778f6438f 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -473,7 +473,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { docsPerIndexer = 1 + randomInt(5); logger.info("indexing " + docsPerIndexer + " docs per indexer during partition"); countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size())); - Collections.shuffle(semaphores); + Collections.shuffle(semaphores, random()); for (Semaphore semaphore : semaphores) { assertThat(semaphore.availablePermits(), equalTo(0)); semaphore.release(docsPerIndexer); @@ -683,7 +683,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { ensureGreen("test"); nodes = new ArrayList<>(nodes); - Collections.shuffle(nodes, getRandom()); + Collections.shuffle(nodes, random()); String isolatedNode = nodes.get(0); String notIsolatedNode = nodes.get(1); @@ -1038,7 +1038,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { new NetworkDisconnectPartition(getRandom()), new SlowClusterStateProcessing(getRandom()) ); - Collections.shuffle(list); + Collections.shuffle(list, random()); setDisruptionScheme(list.get(0)); return list.get(0); } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java index e7bded344d9..c4955561905 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java @@ -26,11 +26,7 @@ import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.test.ESTestCase; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; public class ElectMasterServiceTests extends ESTestCase { @@ -53,7 +49,7 @@ public class ElectMasterServiceTests extends ESTestCase { nodes.add(node); } - Collections.shuffle(nodes, getRandom()); + Collections.shuffle(nodes, random()); return nodes; } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index ea590756a8b..42cb7cf43f4 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -244,7 +244,7 @@ public class NodeJoinControllerTests extends ESTestCase { // add - Collections.shuffle(nodesToJoin); + Collections.shuffle(nodesToJoin, random()); logger.debug("--> joining [{}] unique master nodes. Total of [{}] join requests", initialJoins, nodesToJoin.size()); for (DiscoveryNode node : nodesToJoin) { pendingJoins.add(joinNodeAsync(node)); @@ -269,7 +269,7 @@ public class NodeJoinControllerTests extends ESTestCase { } } - Collections.shuffle(nodesToJoin); + Collections.shuffle(nodesToJoin, random()); logger.debug("--> joining [{}] nodes, with repetition a total of [{}]", finalJoins, nodesToJoin.size()); for (DiscoveryNode node : nodesToJoin) { pendingJoins.add(joinNodeAsync(node)); @@ -316,7 +316,7 @@ public class NodeJoinControllerTests extends ESTestCase { nodesToJoin.add(node); } } - Collections.shuffle(nodesToJoin); + Collections.shuffle(nodesToJoin, random()); logger.debug("--> joining [{}] nodes, with repetition a total of [{}]", initialJoins, nodesToJoin.size()); for (DiscoveryNode node : nodesToJoin) { pendingJoins.add(joinNodeAsync(node)); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java index 82733d92206..c54489bceba 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java @@ -62,7 +62,7 @@ public class ZenPingTests extends ESTestCase { } // shuffle - Collections.shuffle(pings); + Collections.shuffle(pings, random()); ZenPing.PingCollection collection = new ZenPing.PingCollection(); collection.addPings(pings.toArray(new ZenPing.PingResponse[pings.size()])); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java index d33aadd84aa..8dea09ba093 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java @@ -21,17 +21,14 @@ package org.elasticsearch.discovery.zen.publish; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -47,35 +44,20 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BytesTransportRequest; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportConnectionListener; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseOptions; -import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.*; import org.elasticsearch.transport.local.LocalTransport; import org.junit.After; import org.junit.Before; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.emptyIterable; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; @TestLogging("discovery.zen.publish:TRACE") public class PublishClusterStateActionTests extends ESTestCase { @@ -675,7 +657,7 @@ public class PublishClusterStateActionTests extends ESTestCase { logger.info("--> committing states"); - Collections.shuffle(states, random()); + Randomness.shuffle(states); for (ClusterState state : states) { node.action.handleCommitRequest(new PublishClusterStateAction.CommitClusterStateRequest(state.stateUUID()), channel); assertThat(channel.response.get(), equalTo((TransportResponse) TransportResponse.Empty.INSTANCE)); diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index d7f4c9176d0..441314b1e35 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -19,12 +19,7 @@ package org.elasticsearch.gateway; import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.store.ChecksumIndexInput; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexInput; -import org.apache.lucene.store.MockDirectoryWrapper; -import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.store.*; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -33,11 +28,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.*; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -50,20 +41,10 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import java.util.stream.StreamSupport; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.startsWith; +import static org.hamcrest.Matchers.*; @LuceneTestCase.SuppressFileSystems("ExtrasFS") // TODO: fix test to work with ExtrasFS public class MetaDataStateFormatTests extends ESTestCase { @@ -349,7 +330,7 @@ public class MetaDataStateFormatTests extends ESTestCase { } List dirList = Arrays.asList(dirs); - Collections.shuffle(dirList, getRandom()); + Collections.shuffle(dirList, random()); MetaData loadedMetaData = format.loadLatestState(logger, dirList.toArray(new Path[0])); MetaData latestMetaData = meta.get(numStates-1); assertThat(loadedMetaData.clusterUUID(), not(equalTo("_na_"))); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java index bfb7ead734d..bbba3432b66 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java @@ -49,21 +49,12 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.hamcrest.Matchers; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; public class SimpleAllMapperTests extends ESSingleNodeTestCase { @@ -251,7 +242,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase { if (randomBoolean()) { booleanOptionList.add(new Tuple<>("store_term_vector_payloads", tv_payloads = randomBoolean())); } - Collections.shuffle(booleanOptionList, getRandom()); + Collections.shuffle(booleanOptionList, random()); for (Tuple option : booleanOptionList) { mappingBuilder.field(option.v1(), option.v2().booleanValue()); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java index b1224d5c6c7..ba9303e8b58 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java @@ -19,11 +19,7 @@ package org.elasticsearch.index.mapper.core; -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.CannedTokenStream; -import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.analysis.Token; -import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.*; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; @@ -85,7 +81,7 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase { t2.setPositionIncrement(2); // Count funny tokens with more than one increment int finalTokenIncrement = 4; // Count the final token increment on the rare token streams that have them Token[] tokens = new Token[] {t1, t2, t3}; - Collections.shuffle(Arrays.asList(tokens), getRandom()); + Collections.shuffle(Arrays.asList(tokens), random()); final TokenStream tokenStream = new CannedTokenStream(finalTokenIncrement, 0, tokens); // TODO: we have no CannedAnalyzer? Analyzer analyzer = new Analyzer() { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java index a5a073d147f..58fa8fd69b0 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java @@ -479,7 +479,7 @@ public class MultiFieldTests extends ESSingleNodeTestCase { .startObject("my_field").field("type", "string").startObject("fields").startObject(MY_MULTI_FIELD) .field("type", "string").startObject("fielddata"); String[] keys = possibleSettings.keySet().toArray(new String[]{}); - Collections.shuffle(Arrays.asList(keys)); + Collections.shuffle(Arrays.asList(keys), random()); for(int i = randomIntBetween(0, possibleSettings.size()-1); i >= 0; --i) builder.field(keys[i], possibleSettings.get(keys[i])); builder.endObject().endObject().endObject().endObject().endObject().endObject().endObject(); diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 4858e0a6e3c..53d18eaef81 100644 --- a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -19,26 +19,21 @@ package org.elasticsearch.index.store; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexFileNames; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Nullable; @@ -48,7 +43,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.discovery.Discovery; import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.index.shard.*; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; @@ -75,14 +69,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeSet; +import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -91,16 +78,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; +import static org.hamcrest.Matchers.*; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) public class CorruptedFileIT extends ESIntegTestCase { @@ -320,7 +299,7 @@ public class CorruptedFileIT extends ESIntegTestCase { } assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); - Collections.shuffle(dataNodeStats, getRandom()); + Collections.shuffle(dataNodeStats, random()); NodeStats primariesNode = dataNodeStats.get(0); NodeStats unluckyNode = dataNodeStats.get(1); assertAcked(prepareCreate("test").setSettings(Settings.builder() @@ -380,7 +359,7 @@ public class CorruptedFileIT extends ESIntegTestCase { } assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); - Collections.shuffle(dataNodeStats, getRandom()); + Collections.shuffle(dataNodeStats, random()); NodeStats primariesNode = dataNodeStats.get(0); NodeStats unluckyNode = dataNodeStats.get(1); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java index fea8f0fdf14..8b23354ebb8 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java @@ -26,13 +26,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.recovery.RecoveryState.File; -import org.elasticsearch.indices.recovery.RecoveryState.Index; -import org.elasticsearch.indices.recovery.RecoveryState.Stage; -import org.elasticsearch.indices.recovery.RecoveryState.Timer; -import org.elasticsearch.indices.recovery.RecoveryState.Translog; -import org.elasticsearch.indices.recovery.RecoveryState.Type; -import org.elasticsearch.indices.recovery.RecoveryState.VerifyIndex; +import org.elasticsearch.indices.recovery.RecoveryState.*; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -43,14 +37,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.test.VersionUtils.randomVersion; -import static org.hamcrest.Matchers.arrayContainingInAnyOrder; -import static org.hamcrest.Matchers.closeTo; -import static org.hamcrest.Matchers.either; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.*; public class RecoveryStateTests extends ESTestCase { abstract class Streamer extends Thread { @@ -201,7 +188,7 @@ public class RecoveryStateTests extends ESTestCase { } } - Collections.shuffle(Arrays.asList(files)); + Collections.shuffle(Arrays.asList(files), random()); final RecoveryState.Index index = new RecoveryState.Index(); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 4d111469505..60a14abac7c 100644 --- a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.discovery.Discovery; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.recovery.IndexRecoveryIT; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; @@ -84,7 +83,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { } } assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); - Collections.shuffle(dataNodeStats, getRandom()); + Collections.shuffle(dataNodeStats, random()); // we use 2 nodes a lucky and unlucky one // the lucky one holds the primary // the unlucky one gets the replica and the truncated leftovers diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 6f00e9977cf..0f5ac1a522f 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -942,13 +942,13 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { } public void assertSuggestions(String suggestion, String... suggestions) { - String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10); + String suggestionName = RandomStrings.randomAsciiOfLength(random(), 10); CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggestion).size(10); assertSuggestions(suggestionName, suggestionBuilder, suggestions); } public void assertSuggestionsNotInOrder(String suggestString, String... suggestions) { - String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10); + String suggestionName = RandomStrings.randomAsciiOfLength(random(), 10); SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion( SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggestString).size(10) ).execute().actionGet(); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java index 5a00bca9fac..a5c5f8aa2fe 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java @@ -48,58 +48,24 @@ import org.elasticsearch.script.groovy.GroovyPlugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.sort.FieldSortBuilder; -import org.elasticsearch.search.sort.GeoDistanceSortBuilder; -import org.elasticsearch.search.sort.ScriptSortBuilder; -import org.elasticsearch.search.sort.SortBuilders; -import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.search.sort.*; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.junit.annotations.TestLogging; import org.hamcrest.Matchers; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Locale; +import java.util.*; import java.util.Map.Entry; -import java.util.Random; -import java.util.Set; -import java.util.TreeMap; import java.util.concurrent.ExecutionException; +import static org.apache.lucene.util.GeoUtils.TOLERANCE; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.index.query.QueryBuilders.*; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; import static org.elasticsearch.search.sort.SortBuilders.fieldSort; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSortValues; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; -import static org.hamcrest.Matchers.closeTo; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -import static org.apache.lucene.util.GeoUtils.TOLERANCE; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; +import static org.hamcrest.Matchers.*; /** * @@ -530,7 +496,7 @@ public class SimpleSortTests extends ESIntegTestCase { .endObject()); builders.add(builder); } - Collections.shuffle(builders, random); + Randomness.shuffle(builders); for (IndexRequestBuilder builder : builders) { builder.execute().actionGet(); if (random.nextBoolean()) { diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java index ec1ffd54a77..b6306e6209c 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java @@ -32,6 +32,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cloud.aws.network.Ec2NameResolver; import org.elasticsearch.cloud.aws.node.Ec2CustomNodeAttributes; import org.elasticsearch.cluster.node.DiscoveryNodeService; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkService; @@ -119,7 +120,7 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent } // Increase the number of retries in case of 5xx API responses - final Random rand = new Random(); + final Random rand = Randomness.get(); RetryPolicy retryPolicy = new RetryPolicy( RetryPolicy.RetryCondition.NO_RETRY_CONDITION, new RetryPolicy.BackoffStrategy() { diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index 66c3d7959f8..01aaad1ca9d 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoveryService; import org.elasticsearch.node.Node; import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.test.ESIntegTestCase; @@ -57,8 +58,20 @@ public class TribeUnitTests extends ESTestCase { .put("node.mode", NODE_MODE) .put("path.home", createTempDir()).build(); - tribe1 = new TribeClientNode(Settings.builder().put(baseSettings).put("cluster.name", "tribe1").put("name", "tribe1_node").build()).start(); - tribe2 = new TribeClientNode(Settings.builder().put(baseSettings).put("cluster.name", "tribe2").put("name", "tribe2_node").build()).start(); + tribe1 = new TribeClientNode( + Settings.builder() + .put(baseSettings) + .put("cluster.name", "tribe1") + .put("node.name", "tribe1_node") + .put(DiscoveryService.SETTING_DISCOVERY_SEED, random().nextLong()) + .build()).start(); + tribe2 = new TribeClientNode( + Settings.builder() + .put(baseSettings) + .put("cluster.name", "tribe2") + .put("node.name", "tribe2_node") + .put(DiscoveryService.SETTING_DISCOVERY_SEED, random().nextLong()) + .build()).start(); } @AfterClass @@ -73,6 +86,8 @@ public class TribeUnitTests extends ESTestCase { System.setProperty("es.cluster.name", "tribe_node_cluster"); System.setProperty("es.tribe.t1.cluster.name", "tribe1"); System.setProperty("es.tribe.t2.cluster.name", "tribe2"); + System.setProperty("es.tribe.t1.discovery.id.seed", Long.toString(random().nextLong())); + System.setProperty("es.tribe.t2.discovery.id.seed", Long.toString(random().nextLong())); try { assertTribeNodeSuccesfullyCreated(Settings.EMPTY); @@ -80,6 +95,8 @@ public class TribeUnitTests extends ESTestCase { System.clearProperty("es.cluster.name"); System.clearProperty("es.tribe.t1.cluster.name"); System.clearProperty("es.tribe.t2.cluster.name"); + System.clearProperty("es.tribe.t1.discovery.id.seed"); + System.clearProperty("es.tribe.t2.discovery.id.seed"); } } diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml b/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml index 89f4922a6af..ad1b9be8c89 100644 --- a/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml +++ b/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml @@ -1,3 +1,5 @@ cluster.name: tribe_node_cluster tribe.t1.cluster.name: tribe1 -tribe.t2.cluster.name: tribe2 \ No newline at end of file +tribe.t2.cluster.name: tribe2 +tribe.t1.discovery.id.seed: 1 +tribe.t2.discovery.id.seed: 2 diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java index c4f4b196739..e82823ae997 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java @@ -36,6 +36,7 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -111,7 +112,7 @@ public abstract class ESAllocationTestCase extends ESTestCase { for (AllocationDecider d : list) { assertThat(defaultAllocationDeciders.contains(d.getClass()), is(true)); } - Collections.shuffle(list, random); + Randomness.shuffle(list); return new AllocationDeciders(settings, list.toArray(new AllocationDecider[0])); } diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 05dab1c35dc..fafff57c92f 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -533,7 +533,7 @@ public abstract class ESIntegTestCase extends ESTestCase { } private TestCluster buildWithPrivateContext(final Scope scope, final long seed) throws Exception { - return RandomizedContext.current().runWithPrivateRandomness(new Randomness(seed), new Callable() { + return RandomizedContext.current().runWithPrivateRandomness(new com.carrotsearch.randomizedtesting.Randomness(seed), new Callable() { @Override public TestCluster call() throws Exception { return buildTestCluster(scope, seed); @@ -1388,7 +1388,7 @@ public abstract class ESIntegTestCase extends ESTestCase { } } final String[] indices = indicesSet.toArray(new String[indicesSet.size()]); - Collections.shuffle(builders, random); + Collections.shuffle(builders, random()); final CopyOnWriteArrayList> errors = new CopyOnWriteArrayList<>(); List inFlightAsyncOperations = new ArrayList<>(); // If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk. diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESTestCase.java index c59c3ba4d4e..e1443110c0d 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -566,7 +566,7 @@ public abstract class ESTestCase extends LuceneTestCase { throw new IllegalArgumentException("Can\'t pick " + size + " random objects from a list of " + values.length + " objects"); } List list = arrayAsArrayList(values); - Collections.shuffle(list); + Collections.shuffle(list, random()); return list.subList(0, size); } @@ -615,7 +615,7 @@ public abstract class ESTestCase extends LuceneTestCase { sb.append("]"); assertThat(count + " files exist that should have been cleaned:\n" + sb.toString(), count, equalTo(0)); } - + /** Returns the suite failure marker: internal use only! */ public static TestRuleMarkFailure getSuiteFailureMarker() { return suiteFailureMarker; From 47e922ba85a120a05ea9166a6fd1ce0f40a0cfb5 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 11 Dec 2015 08:26:00 -0800 Subject: [PATCH 028/167] Add excludes and patterns as explicit inputs to forbidden patterns task --- .../gradle/precommit/ForbiddenPatternsTask.groovy | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy index 5fa63956b57..6809adca946 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy @@ -61,11 +61,14 @@ public class ForbiddenPatternsTask extends DefaultTask { // add mandatory rules patterns.put('nocommit', /nocommit/) patterns.put('tab', /\t/) + + inputs.property("excludes", filesFilter.excludes) + inputs.property("rules", patterns) } /** Adds a file glob pattern to be excluded */ public void exclude(String... excludes) { - this.filesFilter.exclude(excludes) + filesFilter.exclude(excludes) } /** Adds a pattern to forbid. T */ From 11cbc08e45cb7afa5a236e72d68bf2404fd7728b Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 11 Dec 2015 11:46:50 -0500 Subject: [PATCH 029/167] Fix compilation in o.e.m.t.SimpleSortTests --- .../java/org/elasticsearch/messy/tests/SimpleSortTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java index a5c5f8aa2fe..db8a13c5ab8 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java @@ -469,7 +469,7 @@ public class SimpleSortTests extends ESIntegTestCase { } public void testSimpleSorts() throws Exception { - Random random = getRandom(); + Random random = random(); assertAcked(prepareCreate("test") .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("str_value").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject() @@ -496,7 +496,7 @@ public class SimpleSortTests extends ESIntegTestCase { .endObject()); builders.add(builder); } - Randomness.shuffle(builders); + Collections.shuffle(builders, random); for (IndexRequestBuilder builder : builders) { builder.execute().actionGet(); if (random.nextBoolean()) { From 9fd843748e424cac728bb08eff1e71c9ca897eb4 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 11 Dec 2015 18:02:15 +0100 Subject: [PATCH 030/167] Remove the `format` option of the `_source` field. This option allows to force the xcontent type to use to store the `_source` document. The default is to use the same format as the input format. This commit makes this option ignored for 2.x indices and rejected for 3.0 indices. --- .../mapper/internal/SourceFieldMapper.java | 77 ++----------------- .../source/DefaultSourceMappingTests.java | 22 ++---- docs/reference/migration/migrate_3_0.asciidoc | 7 ++ 3 files changed, 21 insertions(+), 85 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java index a844ab223aa..f9bcb31b406 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java @@ -24,14 +24,13 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -46,17 +45,13 @@ import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; -import java.io.BufferedInputStream; import java.io.IOException; -import java.io.InputStream; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Objects; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; -import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue; /** * @@ -70,8 +65,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { public static class Defaults { public static final String NAME = SourceFieldMapper.NAME; public static final boolean ENABLED = true; - public static final long COMPRESS_THRESHOLD = -1; - public static final String FORMAT = null; // default format is to use the one provided public static final MappedFieldType FIELD_TYPE = new SourceFieldType(); @@ -91,8 +84,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { private boolean enabled = Defaults.ENABLED; - private String format = Defaults.FORMAT; - private String[] includes = null; private String[] excludes = null; @@ -105,11 +96,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { return this; } - public Builder format(String format) { - this.format = format; - return this; - } - public Builder includes(String[] includes) { this.includes = includes; return this; @@ -122,7 +108,7 @@ public class SourceFieldMapper extends MetadataFieldMapper { @Override public SourceFieldMapper build(BuilderContext context) { - return new SourceFieldMapper(enabled, format, includes, excludes, context.indexSettings()); + return new SourceFieldMapper(enabled, includes, excludes, context.indexSettings()); } } @@ -138,8 +124,8 @@ public class SourceFieldMapper extends MetadataFieldMapper { if (fieldName.equals("enabled")) { builder.enabled(nodeBooleanValue(fieldNode)); iterator.remove(); - } else if ("format".equals(fieldName)) { - builder.format(nodeStringValue(fieldNode, null)); + } else if ("format".equals(fieldName) && parserContext.indexVersionCreated().before(Version.V_3_0_0)) { + // ignore on old indices, reject on and after 3.0 iterator.remove(); } else if (fieldName.equals("includes")) { List values = (List) fieldNode; @@ -213,22 +199,15 @@ public class SourceFieldMapper extends MetadataFieldMapper { private final String[] includes; private final String[] excludes; - private String format; - - private XContentType formatContentType; - private SourceFieldMapper(Settings indexSettings) { - this(Defaults.ENABLED, Defaults.FORMAT, null, null, indexSettings); + this(Defaults.ENABLED, null, null, indexSettings); } - private SourceFieldMapper(boolean enabled, String format, - String[] includes, String[] excludes, Settings indexSettings) { + private SourceFieldMapper(boolean enabled, String[] includes, String[] excludes, Settings indexSettings) { super(NAME, Defaults.FIELD_TYPE.clone(), Defaults.FIELD_TYPE, indexSettings); // Only stored. this.enabled = enabled; this.includes = includes; this.excludes = excludes; - this.format = format; - this.formatContentType = format == null ? null : XContentType.fromRestContentType(format); this.complete = enabled && includes == null && excludes == null; } @@ -284,50 +263,11 @@ public class SourceFieldMapper extends MetadataFieldMapper { Tuple> mapTuple = XContentHelper.convertToMap(source, true); Map filteredSource = XContentMapValues.filter(mapTuple.v2(), includes, excludes); BytesStreamOutput bStream = new BytesStreamOutput(); - XContentType contentType = formatContentType; - if (contentType == null) { - contentType = mapTuple.v1(); - } + XContentType contentType = mapTuple.v1(); XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource); builder.close(); source = bStream.bytes(); - } else if (formatContentType != null) { - // see if we need to convert the content type - Compressor compressor = CompressorFactory.compressor(source); - if (compressor != null) { - InputStream compressedStreamInput = compressor.streamInput(source.streamInput()); - if (compressedStreamInput.markSupported() == false) { - compressedStreamInput = new BufferedInputStream(compressedStreamInput); - } - XContentType contentType = XContentFactory.xContentType(compressedStreamInput); - if (contentType != formatContentType) { - // we need to reread and store back, compressed.... - BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); - XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, streamOutput); - builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(compressedStreamInput)); - builder.close(); - source = bStream.bytes(); - // update the data in the context, so we store it in the translog in this format - context.source(source); - } else { - compressedStreamInput.close(); - } - } else { - XContentType contentType = XContentFactory.xContentType(source); - if (contentType != formatContentType) { - // we need to reread and store back - // we need to reread and store back, compressed.... - BytesStreamOutput bStream = new BytesStreamOutput(); - XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, bStream); - builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source)); - builder.close(); - source = bStream.bytes(); - // update the data in the context, so we store it in the translog in this format - context.source(source); - } - } } if (!source.hasArray()) { source = source.toBytesArray(); @@ -352,9 +292,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { if (includeDefaults || enabled != Defaults.ENABLED) { builder.field("enabled", enabled); } - if (includeDefaults || !Objects.equals(format, Defaults.FORMAT)) { - builder.field("format", format); - } if (includes != null) { builder.field("includes", includes); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java index bbc1847dc08..364e9f2063f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.*; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; @@ -63,25 +64,16 @@ public class DefaultSourceMappingTests extends ESSingleNodeTestCase { assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.SMILE)); } - public void testJsonFormat() throws Exception { + public void testFormatBackCompat() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_source").field("format", "json").endObject() .endObject().endObject().string(); + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_2_0)) + .build(); - DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); - DocumentMapper documentMapper = parser.parse(mapping); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON)); - - documentMapper = parser.parse(mapping); - doc = documentMapper.parse("test", "type", "1", XContentFactory.smileBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON)); + DocumentMapperParser parser = createIndex("test", settings).mapperService().documentMapperParser(); + parser.parse(mapping); // no exception } public void testIncludes() throws Exception { diff --git a/docs/reference/migration/migrate_3_0.asciidoc b/docs/reference/migration/migrate_3_0.asciidoc index fcee4105160..6588f22a85a 100644 --- a/docs/reference/migration/migrate_3_0.asciidoc +++ b/docs/reference/migration/migrate_3_0.asciidoc @@ -213,6 +213,13 @@ float by default instead of a double. The reasoning is that floats should be more than enough for most cases but would decrease storage requirements significantly. +==== `_source`'s `format` option + +The `_source` mapping does not support the `format` option anymore. This option +will still be accepted for indices created before the upgrade to 3.0 for backward +compatibility, but it will have no effect. Indices created on or after 3.0 will +reject this option. + [[breaking_30_plugins]] === Plugin changes From a087d557f9920bac8f3d3309677416db292cd926 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 11 Dec 2015 12:10:43 -0500 Subject: [PATCH 031/167] Fix NullPointerException in o.e.c.Randomness --- core/src/main/java/org/elasticsearch/common/Randomness.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/common/Randomness.java b/core/src/main/java/org/elasticsearch/common/Randomness.java index 770b6554cf6..cc0af86cbba 100644 --- a/core/src/main/java/org/elasticsearch/common/Randomness.java +++ b/core/src/main/java/org/elasticsearch/common/Randomness.java @@ -114,7 +114,7 @@ public final class Randomness { private static Random getWithoutSeed() { assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random"; - if (LOCAL.get() == null) { + if (LOCAL == null) { byte[] bytes = SR.generateSeed(8); long accumulator = 0; for (int i = 0; i < bytes.length; i++) { From 0f51d81410e2e941518caa8a9f077c7250062be1 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 11 Dec 2015 20:00:38 +0100 Subject: [PATCH 032/167] Fix settings for TribeUnitTests. --- .../src/test/java/org/elasticsearch/tribe/TribeUnitTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index 01aaad1ca9d..1ad972e10ef 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -62,14 +62,14 @@ public class TribeUnitTests extends ESTestCase { Settings.builder() .put(baseSettings) .put("cluster.name", "tribe1") - .put("node.name", "tribe1_node") + .put("name", "tribe1_node") .put(DiscoveryService.SETTING_DISCOVERY_SEED, random().nextLong()) .build()).start(); tribe2 = new TribeClientNode( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe2") - .put("node.name", "tribe2_node") + .put("name", "tribe2_node") .put(DiscoveryService.SETTING_DISCOVERY_SEED, random().nextLong()) .build()).start(); } From 9ab168dbf6b7069022b9a1a2ae6df6d2de42a815 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 11 Dec 2015 20:07:57 +0100 Subject: [PATCH 033/167] Removes all the reference of the query in the docs --- docs/java-api/query-dsl/term-level-queries.asciidoc | 10 ---------- .../mapping/fields/field-names-field.asciidoc | 8 +++----- docs/reference/mapping/params/null-value.asciidoc | 4 ---- docs/reference/query-dsl/term-level-queries.asciidoc | 10 ---------- docs/reference/redirects.asciidoc | 9 --------- 5 files changed, 3 insertions(+), 38 deletions(-) diff --git a/docs/java-api/query-dsl/term-level-queries.asciidoc b/docs/java-api/query-dsl/term-level-queries.asciidoc index 44fc3639072..e7d5ad4e52b 100644 --- a/docs/java-api/query-dsl/term-level-queries.asciidoc +++ b/docs/java-api/query-dsl/term-level-queries.asciidoc @@ -30,11 +30,6 @@ The queries in this group are: Find documents where the field specified contains any non-null value. -<>:: - - Find documents where the field specified does is missing or contains only - `null` values. - <>:: Find documents where the field specified contains terms which being with @@ -75,8 +70,6 @@ include::range-query.asciidoc[] include::exists-query.asciidoc[] -include::missing-query.asciidoc[] - include::prefix-query.asciidoc[] include::wildcard-query.asciidoc[] @@ -88,6 +81,3 @@ include::fuzzy-query.asciidoc[] include::type-query.asciidoc[] include::ids-query.asciidoc[] - - - diff --git a/docs/reference/mapping/fields/field-names-field.asciidoc b/docs/reference/mapping/fields/field-names-field.asciidoc index 2c40f72bbea..bafc3e3f7d9 100644 --- a/docs/reference/mapping/fields/field-names-field.asciidoc +++ b/docs/reference/mapping/fields/field-names-field.asciidoc @@ -3,9 +3,8 @@ The `_field_names` field indexes the names of every field in a document that contains any value other than `null`. This field is used by the -<> and <> -queries to find documents that either have or don't have any non-+null+ value -for a particular field. +<> query to find documents that +either have or don't have any non-+null+ value for a particular field. The value of the `_field_name` field is accessible in queries, aggregations, and scripts: @@ -49,7 +48,6 @@ GET my_index/_search -------------------------- // AUTOSENSE -<1> Querying on the `_field_names` field (also see the <> and <> queries) +<1> Querying on the `_field_names` field (also see the <> query) <2> Aggregating on the `_field_names` field <3> Accessing the `_field_names` field in scripts (inline scripts must be <> for this example to work) - diff --git a/docs/reference/mapping/params/null-value.asciidoc b/docs/reference/mapping/params/null-value.asciidoc index 552ce66ded8..4d70d4a6ac5 100644 --- a/docs/reference/mapping/params/null-value.asciidoc +++ b/docs/reference/mapping/params/null-value.asciidoc @@ -53,7 +53,3 @@ IMPORTANT: The `null_value` needs to be the same datatype as the field. For instance, a `long` field cannot have a string `null_value`. String fields which are `analyzed` will also pass the `null_value` through the configured analyzer. - -Also see the <> for its `null_value` support. - - diff --git a/docs/reference/query-dsl/term-level-queries.asciidoc b/docs/reference/query-dsl/term-level-queries.asciidoc index 7e9f5e5ca3e..9c28a727b33 100644 --- a/docs/reference/query-dsl/term-level-queries.asciidoc +++ b/docs/reference/query-dsl/term-level-queries.asciidoc @@ -30,11 +30,6 @@ The queries in this group are: Find documents where the field specified contains any non-null value. -<>:: - - Find documents where the field specified does is missing or contains only - `null` values. - <>:: Find documents where the field specified contains terms which being with @@ -75,8 +70,6 @@ include::range-query.asciidoc[] include::exists-query.asciidoc[] -include::missing-query.asciidoc[] - include::prefix-query.asciidoc[] include::wildcard-query.asciidoc[] @@ -88,6 +81,3 @@ include::fuzzy-query.asciidoc[] include::type-query.asciidoc[] include::ids-query.asciidoc[] - - - diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 6de6699984e..823bdb70d07 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -96,14 +96,6 @@ The `exists` filter has been replaced by the <>. It beh as a query in ``query context'' and as a filter in ``filter context'' (see <>). -[role="exclude",id="query-dsl-missing-filter"] -=== Missing Filter - -The `missing` filter has been replaced by the <>. It behaves -as a query in ``query context'' and as a filter in ``filter context'' (see -<>). - - [role="exclude",id="query-dsl-geo-bounding-box-filter"] === Geo Bounding Box Filter @@ -451,4 +443,3 @@ The `not` query has been replaced by using a `mustNot` clause in a Boolean query === Nested type The docs for the `nested` field datatype have moved to <>. - From e7127c9f6fdb128ad66231d476c8a5a84182217e Mon Sep 17 00:00:00 2001 From: Andrew Williams Date: Fri, 11 Dec 2015 14:03:54 -0600 Subject: [PATCH 034/167] Fix docs with `match_pattern` in dynamic templates --- docs/reference/mapping/dynamic/templates.asciidoc | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index e38fc31cb37..b60c5f0510e 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -148,13 +148,14 @@ PUT my_index/my_type/1 [[match-pattern]] ==== `match_pattern` -The `match_pattern` parameter behaves just like the `match` parameter, but -supports full Java regular expression matching on the field name instead of -simple wildcards, for instance: +The `match_pattern` parameter adjusts the behavior of the `match` parameter +such that it supports full Java regular expression matching on the field name +instead of simple wildcards, for instance: [source,js] -------------------------------------------------- - "match_pattern": "^profit_\d+$" + "match_pattern": "regex", + "match": "^profit_\d+$" -------------------------------------------------- [[path-match-unmatch]] From 5f3d807f6127fcaad9923e25923f402b0557487f Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Wed, 2 Dec 2015 10:09:00 -0600 Subject: [PATCH 035/167] Update geo_shape/query docs, fix TermStrategy defaults This commit adds the following: * SpatialStrategy documentation to the geo-shape reference docs. * Updates relation documentation to geo-shape-query reference docs. * Updates GeoShapeFiledMapper to set points_only to true if TERM strategy is used (to be consistent with documentation) --- .../index/mapper/geo/GeoShapeFieldMapper.java | 10 ++- .../mapping/types/geo-shape.asciidoc | 64 +++++++++++++++---- .../query-dsl/geo-shape-query.asciidoc | 5 +- 3 files changed, 64 insertions(+), 15 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index 7e784324f36..71b6d89610f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -193,7 +193,8 @@ public class GeoShapeFieldMapper extends FieldMapper { } else if (Names.COERCE.equals(fieldName)) { builder.coerce(nodeBooleanValue(fieldNode)); iterator.remove(); - } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName)) { + } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName) + && builder.fieldType().strategyName.equals(SpatialStrategy.TERM.getStrategyName()) == false) { builder.fieldType().setPointsOnly(XContentMapValues.nodeBooleanValue(fieldNode)); iterator.remove(); } @@ -284,6 +285,7 @@ public class GeoShapeFieldMapper extends FieldMapper { termStrategy = new TermQueryPrefixTreeStrategy(prefixTree, names().indexName()); termStrategy.setDistErrPct(distanceErrorPct()); defaultStrategy = resolveStrategy(strategyName); + defaultStrategy.setPointsOnly(pointsOnly); } @Override @@ -347,6 +349,9 @@ public class GeoShapeFieldMapper extends FieldMapper { public void setStrategyName(String strategyName) { checkIfFrozen(); this.strategyName = strategyName; + if (this.strategyName.equals(SpatialStrategy.TERM)) { + this.pointsOnly = true; + } } public boolean pointsOnly() { @@ -406,7 +411,6 @@ public class GeoShapeFieldMapper extends FieldMapper { public PrefixTreeStrategy resolveStrategy(String strategyName) { if (SpatialStrategy.RECURSIVE.getStrategyName().equals(strategyName)) { - recursiveStrategy.setPointsOnly(pointsOnly()); return recursiveStrategy; } if (SpatialStrategy.TERM.getStrategyName().equals(strategyName)) { @@ -446,7 +450,7 @@ public class GeoShapeFieldMapper extends FieldMapper { } shape = shapeBuilder.build(); } - if (fieldType().defaultStrategy() instanceof RecursivePrefixTreeStrategy && fieldType().pointsOnly() && !(shape instanceof Point)) { + if (fieldType().pointsOnly() && !(shape instanceof Point)) { throw new MapperParsingException("[{" + fieldType().names().fullName() + "}] is configured for points only but a " + ((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass()) + " was found"); } diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index d974847a98a..1f0c76e1b93 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -17,13 +17,13 @@ The geo_shape mapping maps geo_json geometry objects to the geo_shape type. To enable it, users must explicitly map fields to the geo_shape type. -[cols="<,<",options="header",] +[cols="<,<,<",options="header",] |======================================================================= -|Option |Description +|Option |Description| Default |`tree` |Name of the PrefixTree implementation to be used: `geohash` for -GeohashPrefixTree and `quadtree` for QuadPrefixTree. Defaults to -`geohash`. +GeohashPrefixTree and `quadtree` for QuadPrefixTree. +| `geohash` |`precision` |This parameter may be used instead of `tree_levels` to set an appropriate value for the `tree_levels` parameter. The value @@ -31,7 +31,8 @@ specifies the desired precision and Elasticsearch will calculate the best tree_levels value to honor this precision. The value should be a number followed by an optional distance unit. Valid distance units include: `in`, `inch`, `yd`, `yard`, `mi`, `miles`, `km`, `kilometers`, -`m`,`meters` (default), `cm`,`centimeters`, `mm`, `millimeters`. +`m`,`meters`, `cm`,`centimeters`, `mm`, `millimeters`. +| `meters` |`tree_levels` |Maximum number of layers to be used by the PrefixTree. This can be used to control the precision of shape representations and @@ -41,27 +42,40 @@ certain level of understanding of the underlying implementation, users may use the `precision` parameter instead. However, Elasticsearch only uses the tree_levels parameter internally and this is what is returned via the mapping API even if you use the precision parameter. +| `50m` + +|`strategy` |The strategy parameter defines the approach for how to +represent shapes at indexing and search time. It also influences the +capabilities available so it is recommended to let Elasticsearch set +this parameter automatically. There are two strategies available: +`recursive` and `term`. Term strategy supports point types only (the +`points_only` parameter will be automatically set to true) while +Recursive strategy supports all shape types. (IMPORTANT: see +<> for more detailed information) +| `recursive` |`distance_error_pct` |Used as a hint to the PrefixTree about how precise it should be. Defaults to 0.025 (2.5%) with 0.5 as the maximum -supported value. PERFORMANCE NOTE: This value will be default to 0 if a `precision` or +supported value. PERFORMANCE NOTE: This value will default to 0 if a `precision` or `tree_level` definition is explicitly defined. This guarantees spatial precision at the level defined in the mapping. This can lead to significant memory usage for high resolution shapes with low error (e.g., large shapes at 1m with < 0.001 error). To improve indexing performance (at the cost of query accuracy) explicitly define `tree_level` or `precision` along with a reasonable `distance_error_pct`, noting that large shapes will have greater false positives. +| `0.025` |`orientation` |Optionally define how to interpret vertex order for polygons / multipolygons. This parameter defines one of two coordinate system rules (Right-hand or Left-hand) each of which can be specified in three -different ways. 1. Right-hand rule (default): `right`, `ccw`, `counterclockwise`, +different ways. 1. Right-hand rule: `right`, `ccw`, `counterclockwise`, 2. Left-hand rule: `left`, `cw`, `clockwise`. The default orientation (`counterclockwise`) complies with the OGC standard which defines outer ring vertices in counterclockwise order with inner ring(s) vertices (holes) in clockwise order. Setting this parameter in the geo_shape mapping explicitly sets vertex order for the coordinate list of a geo_shape field but can be overridden in each individual GeoJSON document. +| `ccw` |`points_only` |Setting this option to `true` (defaults to `false`) configures the `geo_shape` field type for point shapes only (NOTE: Multi-Points are not @@ -70,18 +84,21 @@ yet supported). This optimizes index and search performance for the `geohash` an queries can not be executed on `geo_point` field types. This option bridges the gap by improving point performance on a `geo_shape` field so that `geo_shape` queries are optimal on a point only field. +| `false` |======================================================================= +[[prefix-trees]] [float] ==== Prefix trees To efficiently represent shapes in the index, Shapes are converted into -a series of hashes representing grid squares using implementations of a -PrefixTree. The tree notion comes from the fact that the PrefixTree uses -multiple grid layers, each with an increasing level of precision to -represent the Earth. +a series of hashes representing grid squares (commonly referred to as "rasters") +using implementations of a PrefixTree. The tree notion comes from the fact that +the PrefixTree uses multiple grid layers, each with an increasing level of +precision to represent the Earth. This can be thought of as increasing the level +of detail of a map or image at higher zoom levels. Multiple PrefixTree implementations are provided: @@ -100,6 +117,29 @@ longitude the resulting hash is a bit set. A tree level in a quad tree represents 2 bits in this bit set, one for each coordinate. The maximum amount of levels for the quad trees in Elasticsearch is 50. +[[spatial-strategy]] +[float] +===== Spatial strategies +The PrefixTree implementations rely on a SpatialStrategy for decomposing +the provided Shape(s) into approximated grid squares. Each strategy answers +the following: + +* What type of Shapes can be indexed? +* What types of Query Operations and Shapes can be used? +* Does it support more than one Shape per field? + +The following Strategy implementations (with corresponding capabilities) +are provided: + +[cols="<,<,<,<",options="header",] +|======================================================================= +|Strategy |Supported Shapes |Supported Queries |Multiple Shapes + +|`recursive` |<> |`INTERSECTS`, `DISJOINT`, `WITHIN`, `CONTAINS` |Yes +|`term` |<> |`INTERSECTS` |Yes + +|======================================================================= + [float] ===== Accuracy @@ -149,6 +189,7 @@ between index size and a reasonable level of precision of 50m at the equator. This allows for indexing tens of millions of shapes without overly bloating the resulting index too much relative to the input size. +[[input-structure]] [float] ==== Input Structure @@ -189,6 +230,7 @@ differs from many Geospatial APIs (e.g., Google Maps) that generally use the colloquial latitude, longitude (Y, X). ============================================= +[[point]] [float] ===== http://geojson.org/geojson-spec.html#id2[Point] diff --git a/docs/reference/query-dsl/geo-shape-query.asciidoc b/docs/reference/query-dsl/geo-shape-query.asciidoc index a6d13562bb0..d389380b781 100644 --- a/docs/reference/query-dsl/geo-shape-query.asciidoc +++ b/docs/reference/query-dsl/geo-shape-query.asciidoc @@ -104,7 +104,10 @@ shape: ==== Spatial Relations -The Query supports the following spatial relations: +The <> mapping parameter determines +which spatial relation operators may be used at search time. + +The following is a complete list of spatial relation operators available: * `INTERSECTS` - (default) Return all documents whose `geo_shape` field intersects the query geometry. From a42823ad459eef6d07ea005d9b63074ad79ca56c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 11 Dec 2015 19:03:16 -0500 Subject: [PATCH 036/167] Improve ThreadLocal handling in o.e.c.Randomness This commit improves the handling of ThreadLocal Random instance allocation in o.e.c.Randomness. - the seed per instance is no longer fixed - a non-dangerous race to create the ThreadLocal instance has been removed - encapsulated all state into an static nested class for safe and lazy instantiation --- .../org/elasticsearch/common/Randomness.java | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/Randomness.java b/core/src/main/java/org/elasticsearch/common/Randomness.java index cc0af86cbba..e912a9977f1 100644 --- a/core/src/main/java/org/elasticsearch/common/Randomness.java +++ b/core/src/main/java/org/elasticsearch/common/Randomness.java @@ -43,7 +43,6 @@ import java.util.Random; * DiscoveryService#SETTING_DISCOVERY_SEED)). */ public final class Randomness { - private static final SecureRandom SR = new SecureRandom(); private static final Method currentMethod; private static final Method getRandomMethod; @@ -110,23 +109,27 @@ public final class Randomness { } } - private static ThreadLocal LOCAL; - private static Random getWithoutSeed() { assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random"; - if (LOCAL == null) { - byte[] bytes = SR.generateSeed(8); - long accumulator = 0; - for (int i = 0; i < bytes.length; i++) { - accumulator = (accumulator << 8) + bytes[i] & 0xFFL; - } - final long seed = accumulator; - LOCAL = ThreadLocal.withInitial(() -> new Random(seed)); - } - return LOCAL.get(); + return RandomnessHelper.LOCAL.get(); } public static void shuffle(List list) { Collections.shuffle(list, get()); } + + private static class RandomnessHelper { + private static final SecureRandom SR = new SecureRandom(); + + private static final ThreadLocal LOCAL = ThreadLocal.withInitial( + () -> { + byte[] bytes = SR.generateSeed(8); + long accumulator = 0; + for (int i = 0; i < bytes.length; i++) { + accumulator = (accumulator << 8) + bytes[i] & 0xFFL; + } + return new Random(accumulator); + } + ); + } } From 708abcc59ab14a037cc71934daf0363fc18c0c67 Mon Sep 17 00:00:00 2001 From: Felipe Forbeck Date: Fri, 11 Dec 2015 22:26:33 -0200 Subject: [PATCH 037/167] Added desc for parameter --- docs/reference/cluster/health.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc index 7d9bdc1b041..137b4ac48cd 100644 --- a/docs/reference/cluster/health.asciidoc +++ b/docs/reference/cluster/health.asciidoc @@ -87,6 +87,10 @@ The cluster health API accepts the following request parameters: A time based parameter controlling how long to wait if one of the wait_for_XXX are provided. Defaults to `30s`. +`local`:: + If `true` returns the local node information and does not provide + the state from master node. Default: `false`. + The following is an example of getting the cluster health at the `shards` level: From 5c8a0da1fd12f16ab0cb26f479465059ada68f87 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 11 Dec 2015 17:36:08 -0800 Subject: [PATCH 038/167] Build: Change rest integ tests to not have hardcoded ports This change removes hardcoded ports from cluster formation. It passes port 0 for http and transport, and then uses a special property to have the node log the ports used for http and transport (just for tests). This does not yet work for multi node tests. This brings us one step closer to working with --parallel. --- .../junit4/RandomizedTestingTask.groovy | 8 ++-- .../gradle/test/ClusterConfiguration.groovy | 6 +-- .../gradle/test/ClusterFormationTasks.groovy | 43 ++++++++++++----- .../elasticsearch/gradle/test/NodeInfo.groovy | 23 +++++++--- .../gradle/test/RestIntegTestTask.groovy | 4 +- .../elasticsearch/gradle/test/RunTask.groovy | 2 +- .../java/org/elasticsearch/node/Node.java | 46 +++++++++++++++++++ .../transport/netty/NettyTransport.java | 6 +++ .../smoketest/ESSmokeClientTestCase.java | 20 ++++---- .../elasticsearch/test/ESIntegTestCase.java | 17 +++---- 10 files changed, 126 insertions(+), 49 deletions(-) diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy index 33e16feb44a..ccb5d5904bf 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy @@ -78,7 +78,7 @@ class RandomizedTestingTask extends DefaultTask { @Input String argLine = null - Map systemProperties = new HashMap<>() + Map systemProperties = new HashMap<>() PatternFilterable patternSet = new PatternSet() RandomizedTestingTask() { @@ -100,7 +100,7 @@ class RandomizedTestingTask extends DefaultTask { jvmArgs.add(argument) } - void systemProperty(String property, String value) { + void systemProperty(String property, Object value) { systemProperties.put(property, value) } @@ -245,8 +245,8 @@ class RandomizedTestingTask extends DefaultTask { exclude(name: excludePattern) } } - for (Map.Entry prop : systemProperties) { - sysproperty key: prop.getKey(), value: prop.getValue() + for (Map.Entry prop : systemProperties) { + sysproperty key: prop.getKey(), value: prop.getValue().toString() } makeListeners() } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 8bc80da74b5..fa23299cee4 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -33,10 +33,10 @@ class ClusterConfiguration { int numNodes = 1 @Input - int baseHttpPort = 9400 + int httpPort = 0 @Input - int baseTransportPort = 9500 + int transportPort = 0 @Input boolean daemonize = true @@ -55,7 +55,7 @@ class ClusterConfiguration { @Input Closure waitCondition = { NodeInfo node, AntBuilder ant -> File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://localhost:${node.httpPort()}", + ant.get(src: "http://${node.httpUri()}", dest: tmpFile.toString(), ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task retries: 10) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index e62175b743e..08976dbdb39 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -38,8 +38,10 @@ class ClusterFormationTasks { /** * Adds dependent tasks to the given task to start and stop a cluster with the given configuration. + * + * Returns an object that will resolve at execution time of the given task to a uri for the cluster. */ - static void setup(Project project, Task task, ClusterConfiguration config) { + static Object setup(Project project, Task task, ClusterConfiguration config) { if (task.getEnabled() == false) { // no need to add cluster formation tasks if the task won't run! return @@ -55,6 +57,9 @@ class ClusterFormationTasks { Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks) task.dependsOn(wait) + + // delay the resolution of the uri by wrapping in a closure, so it is not used until read for tests + return "${-> nodes[0].transportUri()}" } /** Adds a dependency on the given distribution */ @@ -200,17 +205,24 @@ class ClusterFormationTasks { /** Adds a task to write elasticsearch.yml for the given node configuration */ static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node) { Map esConfig = [ - 'cluster.name' : node.clusterName, - 'http.port' : node.httpPort(), - 'transport.tcp.port' : node.transportPort(), - 'pidfile' : node.pidFile, - 'discovery.zen.ping.unicast.hosts': (0.. logger.error("| ${line}") } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 337eea3de97..b369d35c03a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -43,6 +43,12 @@ class NodeInfo { /** the pid file the node will use */ File pidFile + /** a file written by elasticsearch containing the ports of each bound address for http */ + File httpPortsFile + + /** a file written by elasticsearch containing the ports of each bound address for transport */ + File transportPortsFile + /** elasticsearch home dir */ File homeDir @@ -92,6 +98,10 @@ class NodeInfo { homeDir = homeDir(baseDir, config.distribution) confDir = confDir(baseDir, config.distribution) configFile = new File(confDir, 'elasticsearch.yml') + // even for rpm/deb, the logs are under home because we dont start with real services + File logsDir = new File(homeDir, 'logs') + httpPortsFile = new File(logsDir, 'http.ports') + transportPortsFile = new File(logsDir, 'transport.ports') cwd = new File(baseDir, "cwd") failedMarker = new File(cwd, 'run.failed') startLog = new File(cwd, 'run.log') @@ -119,6 +129,7 @@ class NodeInfo { 'JAVA_HOME' : project.javaHome, 'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc ] + args.add("-Des.tests.portsfile=true") args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" }) for (Map.Entry property : System.properties.entrySet()) { if (property.getKey().startsWith('es.')) { @@ -159,14 +170,14 @@ class NodeInfo { wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8') } - /** Returns the http port for this node */ - int httpPort() { - return config.baseHttpPort + nodeNum + /** Returns an address and port suitable for a uri to connect to this node over http */ + String httpUri() { + return httpPortsFile.readLines("UTF-8").get(0) } - /** Returns the transport port for this node */ - int transportPort() { - return config.baseTransportPort + nodeNum + /** Returns an address and port suitable for a uri to connect to this node over transport protocol */ + String transportUri() { + return transportPortsFile.readLines("UTF-8").get(0) } /** Returns the directory elasticsearch home is contained in for the given distribution */ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index cd43cd2ca67..24bd57a3a59 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -57,12 +57,12 @@ public class RestIntegTestTask extends RandomizedTestingTask { RestSpecHack.configureDependencies(project) project.afterEvaluate { dependsOn(RestSpecHack.configureTask(project, includePackaged)) - systemProperty('tests.cluster', "localhost:${clusterConfig.baseTransportPort}") } // this must run after all projects have been configured, so we know any project // references can be accessed as a fully configured project.gradle.projectsEvaluated { - ClusterFormationTasks.setup(project, this, clusterConfig) + Object clusterUri = ClusterFormationTasks.setup(project, this, clusterConfig) + systemProperty('tests.cluster', clusterUri) } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy index 32469218a5b..842ef8c35cd 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy @@ -8,7 +8,7 @@ import org.gradle.util.ConfigureUtil public class RunTask extends DefaultTask { - ClusterConfiguration clusterConfig = new ClusterConfiguration(baseHttpPort: 9200, baseTransportPort: 9300, daemonize: false) + ClusterConfiguration clusterConfig = new ClusterConfiguration(httpPort: 9200, transportPort: 9300, daemonize: false) public RunTask() { description = "Runs elasticsearch with '${project.path}'" diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index f4bc34a91e2..aac0b0cd9d9 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.common.StopWatch; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Injector; @@ -42,11 +43,14 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryService; @@ -59,6 +63,7 @@ import org.elasticsearch.gateway.GatewayModule; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.http.HttpServer; import org.elasticsearch.http.HttpServerModule; +import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; @@ -97,7 +102,15 @@ import org.elasticsearch.tribe.TribeService; import org.elasticsearch.watcher.ResourceWatcherModule; import org.elasticsearch.watcher.ResourceWatcherService; +import java.io.BufferedWriter; import java.io.IOException; +import java.net.Inet6Address; +import java.net.InetSocketAddress; +import java.nio.charset.Charset; +import java.nio.file.CopyOption; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -274,6 +287,15 @@ public class Node implements Releasable { injector.getInstance(ResourceWatcherService.class).start(); injector.getInstance(TribeService.class).start(); + if (System.getProperty("es.tests.portsfile", "false").equals("true")) { + if (settings.getAsBoolean("http.enabled", true)) { + HttpServerTransport http = injector.getInstance(HttpServerTransport.class); + writePortsFile("http", http.boundAddress()); + } + TransportService transport = injector.getInstance(TransportService.class); + writePortsFile("transport", transport.boundAddress()); + } + logger.info("started"); return this; @@ -425,4 +447,28 @@ public class Node implements Releasable { public Injector injector() { return this.injector; } + + @SuppressForbidden(reason = "Need to specify port and pass to formatter") + private void writePortsFile(String type, BoundTransportAddress boundAddress) { + Path tmpPortsFile = environment.logsFile().resolve(type + ".ports.tmp"); + try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) { + for (TransportAddress address : boundAddress.boundAddresses()) { + InetSocketAddress socketAddress = new InetSocketAddress(address.getAddress(), address.getPort()); + if (socketAddress.getAddress() instanceof Inet6Address && + socketAddress.getAddress().isLinkLocalAddress()) { + // no link local, just causes problems + continue; + } + writer.write(NetworkAddress.formatAddress(socketAddress) + "\n"); + } + } catch (IOException e) { + throw new RuntimeException("Failed to write ports file", e); + } + Path portsFile = environment.logsFile().resolve(type + ".ports"); + try { + Files.move(tmpPortsFile, portsFile, StandardCopyOption.ATOMIC_MOVE); + } catch (IOException e) { + throw new RuntimeException("Failed to rename ports file", e); + } + } } diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index ab39a35d224..2f1c52a0ac2 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -87,6 +87,7 @@ import org.jboss.netty.channel.socket.oio.OioServerSocketChannelFactory; import org.jboss.netty.util.HashedWheelTimer; import java.io.IOException; +import java.net.BindException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.SocketAddress; @@ -763,6 +764,11 @@ public class NettyTransport extends AbstractLifecycleComponent implem // close the channel as safe measure, which will cause a node to be disconnected if relevant ctx.getChannel().close(); disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); + } else if (e.getCause() instanceof BindException) { + logger.trace("bind exception caught on transport layer [{}]", e.getCause(), ctx.getChannel()); + // close the channel as safe measure, which will cause a node to be disconnected if relevant + ctx.getChannel().close(); + disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); } else if (e.getCause() instanceof CancelledKeyException) { logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), ctx.getChannel()); // close the channel as safe measure, which will cause a node to be disconnected if relevant diff --git a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java index 6e912cfab22..45cba5871e0 100644 --- a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java +++ b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java @@ -20,6 +20,7 @@ package org.elasticsearch.smoketest; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.SuppressForbidden; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; @@ -34,7 +35,10 @@ import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import java.io.IOException; import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.URL; import java.net.UnknownHostException; import java.nio.file.Path; import java.util.Locale; @@ -103,20 +107,14 @@ public abstract class ESSmokeClientTestCase extends LuceneTestCase { return client; } - private static Client startClient() throws UnknownHostException { + @SuppressForbidden(reason = "Must use port when creating address") + private static Client startClient() throws IOException { String[] stringAddresses = clusterAddresses.split(","); TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; int i = 0; for (String stringAddress : stringAddresses) { - String[] split = stringAddress.split(":"); - if (split.length < 2) { - throw new IllegalArgumentException("address [" + clusterAddresses + "] not valid"); - } - try { - transportAddresses[i++] = new InetSocketTransportAddress(InetAddress.getByName(split[0]), Integer.valueOf(split[1])); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("port is not valid, expected number but was [" + split[1] + "]"); - } + URL url = new URL("http://" + stringAddress); + transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(url.getHost(), url.getPort())); } return startClient(createTempDir(), transportAddresses); } @@ -125,7 +123,7 @@ public abstract class ESSmokeClientTestCase extends LuceneTestCase { if (client == null) { try { client = startClient(); - } catch (UnknownHostException e) { + } catch (IOException e) { logger.error("can not start the client", e); } assertThat(client, notNullValue()); diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index fafff57c92f..51d9a5c55ab 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -28,6 +28,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.http.impl.client.HttpClients; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.SuppressForbidden; import org.apache.lucene.util.TestUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -136,6 +137,8 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.MalformedURLException; +import java.net.URL; import java.net.UnknownHostException; import java.nio.file.DirectoryStream; import java.nio.file.Files; @@ -1727,20 +1730,14 @@ public abstract class ESIntegTestCase extends ESTestCase { return Settings.EMPTY; } - private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws UnknownHostException { + @SuppressForbidden(reason = "Must use port when creating address") + private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws IOException { String[] stringAddresses = clusterAddresses.split(","); TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; int i = 0; for (String stringAddress : stringAddresses) { - String[] split = stringAddress.split(":"); - if (split.length < 2) { - throw new IllegalArgumentException("address [" + clusterAddresses + "] not valid"); - } - try { - transportAddresses[i++] = new InetSocketTransportAddress(InetAddress.getByName(split[0]), Integer.valueOf(split[1])); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("port is not valid, expected number but was [" + split[1] + "]"); - } + URL url = new URL("http://" + stringAddress); + transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(url.getHost(), url.getPort())); } return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses); } From 7cbb955d19128987798e73b98e16b6ba10caaf72 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 11 Dec 2015 18:08:39 -0800 Subject: [PATCH 039/167] Move forbidden suppression for InetSocketAddress to its own method --- core/src/main/java/org/elasticsearch/node/Node.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index aac0b0cd9d9..5cdd32b3f97 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -448,12 +448,12 @@ public class Node implements Releasable { return this.injector; } - @SuppressForbidden(reason = "Need to specify port and pass to formatter") + /** Writes a file to the logs dir containing the ports for the given transport type */ private void writePortsFile(String type, BoundTransportAddress boundAddress) { Path tmpPortsFile = environment.logsFile().resolve(type + ".ports.tmp"); try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) { for (TransportAddress address : boundAddress.boundAddresses()) { - InetSocketAddress socketAddress = new InetSocketAddress(address.getAddress(), address.getPort()); + InetSocketAddress socketAddress = buildAddress(address.getAddress(), address.getPort()); if (socketAddress.getAddress() instanceof Inet6Address && socketAddress.getAddress().isLinkLocalAddress()) { // no link local, just causes problems @@ -471,4 +471,9 @@ public class Node implements Releasable { throw new RuntimeException("Failed to rename ports file", e); } } + + @SuppressForbidden(reason = "Need to specify address and port") + private static InetSocketAddress buildAddress(String address, int port) { + return new InetSocketAddress(address, port); + } } From a0c69fe7f946c54e06225ae9b447f827c59baee0 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 11 Dec 2015 18:20:09 -0800 Subject: [PATCH 040/167] Remove forbidden suppressions for InetSocketAddress --- core/src/main/java/org/elasticsearch/node/Node.java | 13 ++++--------- .../smoketest/ESSmokeClientTestCase.java | 4 ++-- .../org/elasticsearch/test/ESIntegTestCase.java | 4 ++-- 3 files changed, 8 insertions(+), 13 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 5cdd32b3f97..d3f6367cac0 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -105,6 +105,7 @@ import org.elasticsearch.watcher.ResourceWatcherService; import java.io.BufferedWriter; import java.io.IOException; import java.net.Inet6Address; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.charset.Charset; import java.nio.file.CopyOption; @@ -453,13 +454,12 @@ public class Node implements Releasable { Path tmpPortsFile = environment.logsFile().resolve(type + ".ports.tmp"); try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) { for (TransportAddress address : boundAddress.boundAddresses()) { - InetSocketAddress socketAddress = buildAddress(address.getAddress(), address.getPort()); - if (socketAddress.getAddress() instanceof Inet6Address && - socketAddress.getAddress().isLinkLocalAddress()) { + InetAddress inetAddress = InetAddress.getByName(address.getAddress()); + if (inetAddress instanceof Inet6Address && inetAddress.isLinkLocalAddress()) { // no link local, just causes problems continue; } - writer.write(NetworkAddress.formatAddress(socketAddress) + "\n"); + writer.write(NetworkAddress.formatAddress(new InetSocketAddress(inetAddress, address.getPort())) + "\n"); } } catch (IOException e) { throw new RuntimeException("Failed to write ports file", e); @@ -471,9 +471,4 @@ public class Node implements Releasable { throw new RuntimeException("Failed to rename ports file", e); } } - - @SuppressForbidden(reason = "Need to specify address and port") - private static InetSocketAddress buildAddress(String address, int port) { - return new InetSocketAddress(address, port); - } } diff --git a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java index 45cba5871e0..95df2d04458 100644 --- a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java +++ b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java @@ -107,14 +107,14 @@ public abstract class ESSmokeClientTestCase extends LuceneTestCase { return client; } - @SuppressForbidden(reason = "Must use port when creating address") private static Client startClient() throws IOException { String[] stringAddresses = clusterAddresses.split(","); TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; int i = 0; for (String stringAddress : stringAddresses) { URL url = new URL("http://" + stringAddress); - transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(url.getHost(), url.getPort())); + InetAddress inetAddress = InetAddress.getByName(url.getHost()); + transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(inetAddress, url.getPort())); } return startClient(createTempDir(), transportAddresses); } diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 51d9a5c55ab..37a8fd388b8 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1730,14 +1730,14 @@ public abstract class ESIntegTestCase extends ESTestCase { return Settings.EMPTY; } - @SuppressForbidden(reason = "Must use port when creating address") private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws IOException { String[] stringAddresses = clusterAddresses.split(","); TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; int i = 0; for (String stringAddress : stringAddresses) { URL url = new URL("http://" + stringAddress); - transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(url.getHost(), url.getPort())); + InetAddress inetAddress = InetAddress.getByName(url.getHost()); + transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(inetAddress, url.getPort())); } return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses); } From 2602439a51f109e059123b232ec60c9d816c60dd Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 12 Dec 2015 17:51:46 -0500 Subject: [PATCH 041/167] Delegate to j.u.c.ThreadLocalRandom for Random instances --- .../org/elasticsearch/common/Randomness.java | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/Randomness.java b/core/src/main/java/org/elasticsearch/common/Randomness.java index e912a9977f1..dbfa8034b99 100644 --- a/core/src/main/java/org/elasticsearch/common/Randomness.java +++ b/core/src/main/java/org/elasticsearch/common/Randomness.java @@ -22,10 +22,10 @@ package org.elasticsearch.common; import org.elasticsearch.common.settings.Settings; import java.lang.reflect.Method; -import java.security.SecureRandom; import java.util.Collections; import java.util.List; import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; /** * Provides factory methods for producing reproducible sources of @@ -111,25 +111,10 @@ public final class Randomness { private static Random getWithoutSeed() { assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random"; - return RandomnessHelper.LOCAL.get(); + return ThreadLocalRandom.current(); } public static void shuffle(List list) { Collections.shuffle(list, get()); } - - private static class RandomnessHelper { - private static final SecureRandom SR = new SecureRandom(); - - private static final ThreadLocal LOCAL = ThreadLocal.withInitial( - () -> { - byte[] bytes = SR.generateSeed(8); - long accumulator = 0; - for (int i = 0; i < bytes.length; i++) { - accumulator = (accumulator << 8) + bytes[i] & 0xFFL; - } - return new Random(accumulator); - } - ); - } } From f520f8bfb97f8bbb9fa6205045facf1c12123cb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 11 Dec 2015 14:40:50 +0100 Subject: [PATCH 042/167] Tests: Add test for parsing "_name" field in RangeQueryParser --- .../index/query/RangeQueryBuilderTests.java | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index bf580e55f46..4df799e9f37 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -23,7 +23,9 @@ import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.lucene.BytesRefs; +import org.hamcrest.core.IsEqual; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -353,4 +355,42 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase Date: Mon, 14 Dec 2015 12:51:58 +0100 Subject: [PATCH 043/167] Fail and close translog hard if writing to disk fails Today we are super lenient (how could I missed that for f**k sake) with failing / closing the translog writer when we hit an exception. It's actually worse, we allow to further write to it and don't care what has been already written to disk and what hasn't. We keep the buffer in memory and try to write it again on the next operation. When we hit a disk-full expcetion due to for instance a big merge we are likely adding document to the translog but fail to write them to disk. Once the merge failed and freed up it's diskspace (note this is a small window when concurrently indexing and failing the shard due to out of space exceptions) we will allow in-flight operations to add to the translog and then once we fail the shard fsync it. These operations are written to disk and fsynced which is fine but the previous buffer flush might have written some bytes to disk which are not corrupting the translog. That wouldn't be an issue if we prevented the fsync. Closes #15333 --- .../translog/BufferingTranslogWriter.java | 35 ++++-- .../index/translog/Translog.java | 58 +++++---- .../index/translog/TranslogReader.java | 8 +- .../index/translog/TranslogWriter.java | 54 +++++++-- .../index/translog/BufferedTranslogTests.java | 5 +- .../index/translog/TranslogTests.java | 114 +++++++++++++++++- 6 files changed, 218 insertions(+), 56 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java index 6026468973a..2a68d34282d 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java @@ -48,13 +48,17 @@ public final class BufferingTranslogWriter extends TranslogWriter { public Translog.Location add(BytesReference data) throws IOException { try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); - operationCounter++; final long offset = totalOffset; if (data.length() >= buffer.length) { flush(); // we use the channel to write, since on windows, writing to the RAF might not be reflected // when reading through the channel - data.writeTo(channel); + try { + data.writeTo(channel); + } catch (Throwable ex) { + closeWithTragicEvent(ex); + throw ex; + } writtenOffset += data.length(); totalOffset += data.length(); return new Translog.Location(generation, offset, data.length()); @@ -64,6 +68,7 @@ public final class BufferingTranslogWriter extends TranslogWriter { } data.writeTo(bufferOs); totalOffset += data.length(); + operationCounter++; return new Translog.Location(generation, offset, data.length()); } } @@ -71,10 +76,17 @@ public final class BufferingTranslogWriter extends TranslogWriter { protected final void flush() throws IOException { assert writeLock.isHeldByCurrentThread(); if (bufferCount > 0) { + ensureOpen(); // we use the channel to write, since on windows, writing to the RAF might not be reflected // when reading through the channel - Channels.writeToChannel(buffer, 0, bufferCount, channel); - writtenOffset += bufferCount; + final int bufferSize = bufferCount; + try { + Channels.writeToChannel(buffer, 0, bufferSize, channel); + } catch (Throwable ex) { + closeWithTragicEvent(ex); + throw ex; + } + writtenOffset += bufferSize; bufferCount = 0; } } @@ -97,7 +109,7 @@ public final class BufferingTranslogWriter extends TranslogWriter { } @Override - public boolean syncNeeded() { + public synchronized boolean syncNeeded() { return totalOffset != lastSyncedOffset; } @@ -107,15 +119,24 @@ public final class BufferingTranslogWriter extends TranslogWriter { return; } synchronized (this) { + ensureOpen(); channelReference.incRef(); try { + final long offsetToSync; try (ReleasableLock lock = writeLock.acquire()) { flush(); - lastSyncedOffset = totalOffset; + offsetToSync = totalOffset; } // we can do this outside of the write lock but we have to protect from // concurrent syncs - checkpoint(lastSyncedOffset, operationCounter, channelReference); + try { + ensureOpen(); + checkpoint(offsetToSync, operationCounter, channelReference); + } catch (IOException ex) { + closeWithTragicEvent(ex); + throw ex; + } + lastSyncedOffset = offsetToSync; } finally { channelReference.decRef(); } diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 35dd895bc2e..394d7c75ef8 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -115,7 +115,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final Path location; private TranslogWriter current; private volatile ImmutableTranslogReader currentCommittingTranslog; - private long lastCommittedTranslogFileGeneration = -1; // -1 is safe as it will not cause an translog deletion. + private volatile long lastCommittedTranslogFileGeneration = -1; // -1 is safe as it will not cause an translog deletion. private final AtomicBoolean closed = new AtomicBoolean(); private final TranslogConfig config; private final String translogUUID; @@ -288,10 +288,14 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC if (closed.compareAndSet(false, true)) { try (ReleasableLock lock = writeLock.acquire()) { try { - IOUtils.close(current, currentCommittingTranslog); + current.sync(); } finally { - IOUtils.close(recoveredTranslogs); - recoveredTranslogs.clear(); + try { + IOUtils.close(current, currentCommittingTranslog); + } finally { + IOUtils.close(recoveredTranslogs); + recoveredTranslogs.clear(); + } } } finally { FutureUtils.cancel(syncScheduler); @@ -354,7 +358,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC TranslogWriter createWriter(long fileGeneration) throws IOException { TranslogWriter newFile; try { - newFile = TranslogWriter.create(config.getType(), shardId, translogUUID, fileGeneration, location.resolve(getFilename(fileGeneration)), new OnCloseRunnable(), config.getBufferSize()); + newFile = TranslogWriter.create(config.getType(), shardId, translogUUID, fileGeneration, location.resolve(getFilename(fileGeneration)), new OnCloseRunnable(), config.getBufferSize(), getChannelFactory()); } catch (IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); } @@ -548,31 +552,29 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final class OnCloseRunnable implements Callback { @Override public void handle(ChannelReference channelReference) { - try (ReleasableLock lock = writeLock.acquire()) { - if (isReferencedGeneration(channelReference.getGeneration()) == false) { - Path translogPath = channelReference.getPath(); - assert channelReference.getPath().getParent().equals(location) : "translog files must be in the location folder: " + location + " but was: " + translogPath; - // if the given translogPath is not the current we can safely delete the file since all references are released - logger.trace("delete translog file - not referenced and not current anymore {}", translogPath); - IOUtils.deleteFilesIgnoringExceptions(translogPath); - IOUtils.deleteFilesIgnoringExceptions(translogPath.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration()))); + if (isReferencedGeneration(channelReference.getGeneration()) == false) { + Path translogPath = channelReference.getPath(); + assert channelReference.getPath().getParent().equals(location) : "translog files must be in the location folder: " + location + " but was: " + translogPath; + // if the given translogPath is not the current we can safely delete the file since all references are released + logger.trace("delete translog file - not referenced and not current anymore {}", translogPath); + IOUtils.deleteFilesIgnoringExceptions(translogPath); + IOUtils.deleteFilesIgnoringExceptions(translogPath.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration()))); - } - try (DirectoryStream stream = Files.newDirectoryStream(location)) { - for (Path path : stream) { - Matcher matcher = PARSE_STRICT_ID_PATTERN.matcher(path.getFileName().toString()); - if (matcher.matches()) { - long generation = Long.parseLong(matcher.group(1)); - if (isReferencedGeneration(generation) == false) { - logger.trace("delete translog file - not referenced and not current anymore {}", path); - IOUtils.deleteFilesIgnoringExceptions(path); - IOUtils.deleteFilesIgnoringExceptions(path.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration()))); - } + } + try (DirectoryStream stream = Files.newDirectoryStream(location)) { + for (Path path : stream) { + Matcher matcher = PARSE_STRICT_ID_PATTERN.matcher(path.getFileName().toString()); + if (matcher.matches()) { + long generation = Long.parseLong(matcher.group(1)); + if (isReferencedGeneration(generation) == false) { + logger.trace("delete translog file - not referenced and not current anymore {}", path); + IOUtils.deleteFilesIgnoringExceptions(path); + IOUtils.deleteFilesIgnoringExceptions(path.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration()))); } } - } catch (IOException e) { - logger.warn("failed to delete unreferenced translog files", e); } + } catch (IOException e) { + logger.warn("failed to delete unreferenced translog files", e); } } } @@ -1400,4 +1402,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return outstandingViews.size(); } + TranslogWriter.ChannelFactory getChannelFactory() { + return TranslogWriter.ChannelFactory.DEFAULT; + } + } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java index 590bc319057..d7077fd90ad 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java @@ -140,16 +140,16 @@ public abstract class TranslogReader implements Closeable, Comparable onClose, int bufferSize) throws IOException { + public static TranslogWriter create(Type type, ShardId shardId, String translogUUID, long fileGeneration, Path file, Callback onClose, int bufferSize, ChannelFactory channelFactory) throws IOException { final BytesRef ref = new BytesRef(translogUUID); final int headerLength = CodecUtil.headerLength(TRANSLOG_CODEC) + ref.length + RamUsageEstimator.NUM_BYTES_INT; - final FileChannel channel = FileChannel.open(file, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); + final FileChannel channel = channelFactory.open(file); try { // This OutputStreamDataOutput is intentionally not closed because // closing it will close the FileChannel @@ -118,6 +121,18 @@ public class TranslogWriter extends TranslogReader { } } + protected final void closeWithTragicEvent(Throwable throwable) throws IOException { + try (ReleasableLock lock = writeLock.acquire()) { + if (throwable != null) { + if (tragicEvent == null) { + tragicEvent = throwable; + } else { + tragicEvent.addSuppressed(throwable); + } + } + close(); + } + } /** * add the given bytes to the translog and return the location they were written at @@ -127,9 +142,14 @@ public class TranslogWriter extends TranslogReader { try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); position = writtenOffset; - data.writeTo(channel); + try { + data.writeTo(channel); + } catch (Throwable e) { + closeWithTragicEvent(e); + throw e; + } writtenOffset = writtenOffset + data.length(); - operationCounter = operationCounter + 1; + operationCounter++;; } return new Translog.Location(generation, position, data.length()); } @@ -147,6 +167,7 @@ public class TranslogWriter extends TranslogReader { // check if we really need to sync here... if (syncNeeded()) { try (ReleasableLock lock = writeLock.acquire()) { + ensureOpen(); lastSyncedOffset = writtenOffset; checkpoint(lastSyncedOffset, operationCounter, channelReference); } @@ -262,15 +283,6 @@ public class TranslogWriter extends TranslogReader { return false; } - @Override - protected final void doClose() throws IOException { - try (ReleasableLock lock = writeLock.acquire()) { - sync(); - } finally { - super.doClose(); - } - } - @Override protected void readBytes(ByteBuffer buffer, long position) throws IOException { try (ReleasableLock lock = readLock.acquire()) { @@ -288,4 +300,20 @@ public class TranslogWriter extends TranslogReader { Checkpoint checkpoint = new Checkpoint(syncPosition, numOperations, generation); Checkpoint.write(checkpointFile, checkpoint, options); } + + static class ChannelFactory { + + static final ChannelFactory DEFAULT = new ChannelFactory(); + + // only for testing until we have a disk-full FileSystemt + public FileChannel open(Path file) throws IOException { + return FileChannel.open(file, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); + } + } + + protected final void ensureOpen() { + if (isClosed()) { + throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed", tragicEvent); + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java index aab980e975d..a29cc6cf8d0 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java @@ -34,13 +34,12 @@ import java.nio.file.Path; public class BufferedTranslogTests extends TranslogTests { @Override - protected Translog create(Path path) throws IOException { + protected TranslogConfig getTranslogConfig(Path path) { Settings build = Settings.settingsBuilder() .put("index.translog.fs.type", TranslogWriter.Type.BUFFERED.name()) .put("index.translog.fs.buffer_size", 10 + randomInt(128 * 1024), ByteSizeUnit.BYTES) .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) .build(); - TranslogConfig translogConfig = new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null); - return new Translog(translogConfig); + return new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null); } } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 26faa02a17d..d90c3257add 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.translog; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.Term; +import org.apache.lucene.mockfile.FilterFileChannel; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.IOUtils; @@ -110,13 +111,16 @@ public class TranslogTests extends ESTestCase { } } - protected Translog create(Path path) throws IOException { + private Translog create(Path path) throws IOException { + return new Translog(getTranslogConfig(path)); + } + + protected TranslogConfig getTranslogConfig(Path path) { Settings build = Settings.settingsBuilder() .put(TranslogConfig.INDEX_TRANSLOG_FS_TYPE, TranslogWriter.Type.SIMPLE.name()) .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) .build(); - TranslogConfig translogConfig = new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null); - return new Translog(translogConfig); + return new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null); } protected void addToTranslogAndList(Translog translog, ArrayList list, Translog.Operation op) { @@ -1279,4 +1283,108 @@ public class TranslogTests extends ESTestCase { } } } + + public void testFailFlush() throws IOException { + Path tempDir = createTempDir(); + final AtomicBoolean failWrite = new AtomicBoolean(); + final AtomicBoolean simulateDiskFull = new AtomicBoolean(); + TranslogConfig config = getTranslogConfig(tempDir); + Translog translog = new Translog(config) { + @Override + TranslogWriter.ChannelFactory getChannelFactory() { + final TranslogWriter.ChannelFactory factory = super.getChannelFactory(); + + return new TranslogWriter.ChannelFactory() { + @Override + public FileChannel open(Path file) throws IOException { + FileChannel channel = factory.open(file); + return new FilterFileChannel(channel) { + + @Override + public int write(ByteBuffer src) throws IOException { + if (failWrite.get()) { + throw new IOException("boom"); + } + if (simulateDiskFull.get()) { + if (src.limit() > 1) { + final int pos = src.position(); + final int limit = src.limit(); + src.limit(limit / 2); + super.write(src); + src.position(pos); + src.limit(limit); + throw new IOException("no space left on device"); + } + } + return super.write(src); + } + }; + } + }; + } + }; + + List locations = new ArrayList<>(); + int opsSynced = 0; + int opsAdded = 0; + boolean failed = false; + boolean syncFailed = true; + while(failed == false) { + try { + locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); + opsAdded++; + translog.sync(); + opsSynced++; + } catch (IOException ex) { + failed = true; + assertEquals("no space left on device", ex.getMessage()); + } catch (Exception ex) { + failed = true; + assertTrue(ex.toString(), ex.getMessage().startsWith("Failed to write operation")); + } + simulateDiskFull.set(randomBoolean()); + } + simulateDiskFull.set(false); + if (randomBoolean()) { + try { + locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); + opsSynced++; + } catch (AlreadyClosedException ex) { + assertNotNull(ex.getCause()); + assertEquals(ex.getCause().getMessage(), "no space left on device"); + } + + } + Translog.TranslogGeneration translogGeneration = translog.getGeneration(); + try { + translog.newSnapshot(); + fail("already closed"); + } catch (AlreadyClosedException ex) { + // all is well + } + + try { + translog.close(); + if (opsAdded != opsSynced) { + fail("already closed"); + } + } catch (AlreadyClosedException ex) { + assertNotNull(ex.getCause()); + } + config.setTranslogGeneration(translogGeneration); + try (Translog tlog = new Translog(config)){ + assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration()); + assertFalse(tlog.syncNeeded()); + + try (Translog.Snapshot snapshot = tlog.newSnapshot()) { + assertEquals(opsSynced, snapshot.estimatedTotalOperations()); + for (int i = 0; i < opsSynced; i++) { + assertEquals("expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1, locations.get(i).generation); + Translog.Operation next = snapshot.next(); + assertNotNull("operation " + i + " must be non-null", next); + assertEquals(i, Integer.parseInt(next.getSource().source.toUtf8())); + } + } + } + } } From f20f41e02e4a42fc65fb97d8238636ff159dba8c Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Mon, 14 Dec 2015 14:27:40 +0100 Subject: [PATCH 044/167] Merge pull request #15405 from alexg-dev/patch-1 More detailed explanation of some similarity types --- docs/reference/index-modules/similarity.asciidoc | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index 0ade819dc0c..ddec26b8030 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -112,7 +112,10 @@ Type name: `DFR` ==== IB similarity. http://lucene.apache.org/core/5_2_1/core/org/apache/lucene/search/similarities/IBSimilarity.html[Information -based model] . This similarity has the following options: +based model] . The algorithm is based on the concept that the information content in any symbolic 'distribution' +sequence is primarily determined by the repetitive usage of its basic elements. +For written texts this challenge would correspond to comparing the writing styles of diferent authors. +This similarity has the following options: [horizontal] `distribution`:: Possible values: `ll` and `spl`. @@ -138,11 +141,11 @@ Type name: `LMDirichlet` ==== LM Jelinek Mercer similarity. http://lucene.apache.org/core/5_2_1/core/org/apache/lucene/search/similarities/LMJelinekMercerSimilarity.html[LM -Jelinek Mercer similarity] . This similarity has the following options: +Jelinek Mercer similarity] . The algorithm attempts to capture important patterns in the text, while leaving out noise. This similarity has the following options: [horizontal] `lambda`:: The optimal value depends on both the collection and the query. The optimal value is around `0.1` -for title queries and `0.7` for long queries. Default to `0.1`. +for title queries and `0.7` for long queries. Default to `0.1`. When value approaches `0`, documents that match more query terms will be ranked higher than those that match fewer terms. Type name: `LMJelinekMercer` From c6003b6f1378448a0ac3f8721180e61329f4e1ec Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 14 Dec 2015 14:36:20 +0100 Subject: [PATCH 045/167] apply feedback from @bleskes --- .../translog/BufferingTranslogWriter.java | 29 +++++++++---------- .../index/translog/TranslogWriter.java | 11 ++++--- .../index/translog/TranslogTests.java | 24 +++++++++++++-- 3 files changed, 42 insertions(+), 22 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java index 2a68d34282d..97c00ce0a02 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java @@ -61,13 +61,13 @@ public final class BufferingTranslogWriter extends TranslogWriter { } writtenOffset += data.length(); totalOffset += data.length(); - return new Translog.Location(generation, offset, data.length()); + } else { + if (data.length() > buffer.length - bufferCount) { + flush(); + } + data.writeTo(bufferOs); + totalOffset += data.length(); } - if (data.length() > buffer.length - bufferCount) { - flush(); - } - data.writeTo(bufferOs); - totalOffset += data.length(); operationCounter++; return new Translog.Location(generation, offset, data.length()); } @@ -109,29 +109,28 @@ public final class BufferingTranslogWriter extends TranslogWriter { } @Override - public synchronized boolean syncNeeded() { + public boolean syncNeeded() { return totalOffset != lastSyncedOffset; } @Override - public void sync() throws IOException { - if (!syncNeeded()) { - return; - } - synchronized (this) { - ensureOpen(); + public synchronized void sync() throws IOException { + if (syncNeeded()) { + ensureOpen(); // this call gives a better exception that the incRef if we are closed by a tragic event channelReference.incRef(); try { final long offsetToSync; + final int opsCounter; try (ReleasableLock lock = writeLock.acquire()) { flush(); offsetToSync = totalOffset; + opsCounter = operationCounter; } // we can do this outside of the write lock but we have to protect from // concurrent syncs try { - ensureOpen(); - checkpoint(offsetToSync, operationCounter, channelReference); + ensureOpen(); // just for kicks - the checkpoint happens or not either way + checkpoint(offsetToSync, opsCounter, channelReference); } catch (IOException ex) { closeWithTragicEvent(ex); throw ex; diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 6254582c2f8..01da4ae7338 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -55,7 +55,8 @@ public class TranslogWriter extends TranslogReader { protected volatile int operationCounter; /* the offset in bytes written to the file */ protected volatile long writtenOffset; - protected volatile Throwable tragicEvent; + /* if we hit an exception that we can't recover from we assign it to this var and ship it with every AlreadyClosedException we throw */ + private volatile Throwable tragicEvent; public TranslogWriter(ShardId shardId, long generation, ChannelReference channelReference) throws IOException { @@ -163,13 +164,15 @@ public class TranslogWriter extends TranslogReader { /** * write all buffered ops to disk and fsync file */ - public void sync() throws IOException { + public synchronized void sync() throws IOException { // synchronized to ensure only one sync happens a time // check if we really need to sync here... if (syncNeeded()) { try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); - lastSyncedOffset = writtenOffset; - checkpoint(lastSyncedOffset, operationCounter, channelReference); + final long offset = writtenOffset; + final int opsCount = operationCounter; + checkpoint(offset, opsCount, channelReference); + lastSyncedOffset = offset; } } } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index d90c3257add..00ee7e0b84c 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -26,6 +26,7 @@ import org.apache.lucene.mockfile.FilterFileChannel; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -1328,7 +1329,6 @@ public class TranslogTests extends ESTestCase { int opsSynced = 0; int opsAdded = 0; boolean failed = false; - boolean syncFailed = true; while(failed == false) { try { locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); @@ -1338,7 +1338,8 @@ public class TranslogTests extends ESTestCase { } catch (IOException ex) { failed = true; assertEquals("no space left on device", ex.getMessage()); - } catch (Exception ex) { + } catch (TranslogException ex) { + // we catch IOExceptions in Translog#add -- that's how we got here failed = true; assertTrue(ex.toString(), ex.getMessage().startsWith("Failed to write operation")); } @@ -1348,7 +1349,7 @@ public class TranslogTests extends ESTestCase { if (randomBoolean()) { try { locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); - opsSynced++; + fail("we are already closed"); } catch (AlreadyClosedException ex) { assertNotNull(ex.getCause()); assertEquals(ex.getCause().getMessage(), "no space left on device"); @@ -1387,4 +1388,21 @@ public class TranslogTests extends ESTestCase { } } } + + public void testTranslogOpsCountIsCorrect() throws IOException { + List locations = new ArrayList<>(); + int numOps = randomIntBetween(100, 200); + LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly + for (int opsAdded = 0; opsAdded < numOps; opsAdded++) { + locations.add(translog.add(new Translog.Index("test", "" + opsAdded, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))))); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertEquals(opsAdded+1, snapshot.estimatedTotalOperations()); + for (int i = 0; i < opsAdded; i++) { + assertEquals("expected operation" + i + " to be in the current translog but wasn't", translog.currentFileGeneration(), locations.get(i).generation); + Translog.Operation next = snapshot.next(); + assertNotNull("operation " + i + " must be non-null", next); + } + } + } + } } From 6cefdc82f6fa720a059239bcc376b08481464710 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 14 Dec 2015 15:01:55 +0100 Subject: [PATCH 046/167] Expose tragic event to translog, close translog once we hit a tragic even and fail engine if we hit one too --- .../index/engine/InternalEngine.java | 6 +++- .../index/translog/Translog.java | 34 ++++++++++++++++--- .../index/translog/TranslogWriter.java | 16 ++++++--- .../index/translog/TranslogTests.java | 29 +++++++--------- 4 files changed, 59 insertions(+), 26 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 1404b61b8ec..b5a62eca901 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -782,9 +782,13 @@ public class InternalEngine extends Engine { // but we are double-checking it's failed and closed if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) { failEngine("already closed by tragic event", indexWriter.getTragicException()); + } else if (translog.isOpen() == false && translog.getTragicException() != null) { + failEngine("already closed by tragic event", translog.getTragicException()); } return true; - } else if (t != null && indexWriter.isOpen() == false && indexWriter.getTragicException() == t) { + } else if (t != null && + ((indexWriter.isOpen() == false && indexWriter.getTragicException() == t) + || (translog.isOpen() == false && translog.getTragicException() == t))) { // this spot on - we are handling the tragic event exception here so we have to fail the engine // right away failEngine(source, t); diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 394d7c75ef8..3d6b76922a4 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -279,7 +279,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } - boolean isOpen() { + /** Returns {@code true} if this {@code Translog} is still open. */ + public boolean isOpen() { return closed.get() == false; } @@ -397,7 +398,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * @see Index * @see org.elasticsearch.index.translog.Translog.Delete */ - public Location add(Operation operation) throws TranslogException { + public Location add(Operation operation) throws IOException { final ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(bigArrays); try { final BufferedChecksumStreamOutput checksumStreamOutput = new BufferedChecksumStreamOutput(out); @@ -419,7 +420,14 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC assert current.assertBytesAtLocation(location, bytes); return location; } - } catch (AlreadyClosedException ex) { + } catch (AlreadyClosedException | IOException ex) { + if (current.getTragicException() != null) { + try { + close(); + } catch (Exception inner) { + ex.addSuppressed(inner); + } + } throw ex; } catch (Throwable e) { throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e); @@ -433,6 +441,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * Snapshots are fixed in time and will not be updated with future operations. */ public Snapshot newSnapshot() { + ensureOpen(); try (ReleasableLock lock = readLock.acquire()) { ArrayList toOpen = new ArrayList<>(); toOpen.addAll(recoveredTranslogs); @@ -497,6 +506,15 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC if (closed.get() == false) { current.sync(); } + } catch (AlreadyClosedException | IOException ex) { + if (current.getTragicException() != null) { + try { + close(); + } catch (Exception inner) { + ex.addSuppressed(inner); + } + } + throw ex; } } @@ -1296,6 +1314,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC throw new IllegalStateException("already committing a translog with generation: " + currentCommittingTranslog.getGeneration()); } final TranslogWriter oldCurrent = current; + oldCurrent.ensureOpen(); oldCurrent.sync(); currentCommittingTranslog = current.immutableReader(); Path checkpoint = location.resolve(CHECKPOINT_FILE_NAME); @@ -1391,7 +1410,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private void ensureOpen() { if (closed.get()) { - throw new AlreadyClosedException("translog is already closed"); + throw new AlreadyClosedException("translog is already closed", current.getTragicException()); } } @@ -1406,4 +1425,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return TranslogWriter.ChannelFactory.DEFAULT; } + /** If this {@code Translog} was closed as a side-effect of a tragic exception, + * e.g. disk full while flushing a new segment, this returns the root cause exception. + * Otherwise (no tragic exception has occurred) it returns null. */ + public Throwable getTragicException() { + return current.getTragicException(); + } + } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 01da4ae7338..d6f21ebb412 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -56,7 +56,7 @@ public class TranslogWriter extends TranslogReader { /* the offset in bytes written to the file */ protected volatile long writtenOffset; /* if we hit an exception that we can't recover from we assign it to this var and ship it with every AlreadyClosedException we throw */ - private volatile Throwable tragicEvent; + private volatile Throwable tragedy; public TranslogWriter(ShardId shardId, long generation, ChannelReference channelReference) throws IOException { @@ -94,6 +94,12 @@ public class TranslogWriter extends TranslogReader { throw throwable; } } + /** If this {@code TranslogWriter} was closed as a side-effect of a tragic exception, + * e.g. disk full while flushing a new segment, this returns the root cause exception. + * Otherwise (no tragic exception has occurred) it returns null. */ + public Throwable getTragicException() { + return tragedy; + } public enum Type { @@ -125,10 +131,10 @@ public class TranslogWriter extends TranslogReader { protected final void closeWithTragicEvent(Throwable throwable) throws IOException { try (ReleasableLock lock = writeLock.acquire()) { if (throwable != null) { - if (tragicEvent == null) { - tragicEvent = throwable; + if (tragedy == null) { + tragedy = throwable; } else { - tragicEvent.addSuppressed(throwable); + tragedy.addSuppressed(throwable); } } close(); @@ -316,7 +322,7 @@ public class TranslogWriter extends TranslogReader { protected final void ensureOpen() { if (isClosed()) { - throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed", tragicEvent); + throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed", tragedy); } } } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 00ee7e0b84c..ad4ac07f6d6 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -124,7 +124,7 @@ public class TranslogTests extends ESTestCase { return new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null); } - protected void addToTranslogAndList(Translog translog, ArrayList list, Translog.Operation op) { + protected void addToTranslogAndList(Translog translog, ArrayList list, Translog.Operation op) throws IOException { list.add(op); translog.add(op); } @@ -335,7 +335,7 @@ public class TranslogTests extends ESTestCase { } } - public void testSnapshot() { + public void testSnapshot() throws IOException { ArrayList ops = new ArrayList<>(); Translog.Snapshot snapshot = translog.newSnapshot(); assertThat(snapshot, SnapshotMatchers.size(0)); @@ -394,7 +394,7 @@ public class TranslogTests extends ESTestCase { Translog.Snapshot snapshot = translog.newSnapshot(); fail("translog is closed"); } catch (AlreadyClosedException ex) { - assertThat(ex.getMessage(), containsString("translog-1.tlog is already closed can't increment")); + assertEquals(ex.getMessage(), "translog is already closed"); } } @@ -639,7 +639,7 @@ public class TranslogTests extends ESTestCase { final String threadId = "writer_" + i; writers[i] = new Thread(new AbstractRunnable() { @Override - public void doRun() throws BrokenBarrierException, InterruptedException { + public void doRun() throws BrokenBarrierException, InterruptedException, IOException { barrier.await(); int counter = 0; while (run.get()) { @@ -1287,7 +1287,6 @@ public class TranslogTests extends ESTestCase { public void testFailFlush() throws IOException { Path tempDir = createTempDir(); - final AtomicBoolean failWrite = new AtomicBoolean(); final AtomicBoolean simulateDiskFull = new AtomicBoolean(); TranslogConfig config = getTranslogConfig(tempDir); Translog translog = new Translog(config) { @@ -1303,9 +1302,6 @@ public class TranslogTests extends ESTestCase { @Override public int write(ByteBuffer src) throws IOException { - if (failWrite.get()) { - throw new IOException("boom"); - } if (simulateDiskFull.get()) { if (src.limit() > 1) { final int pos = src.position(); @@ -1337,11 +1333,8 @@ public class TranslogTests extends ESTestCase { opsSynced++; } catch (IOException ex) { failed = true; + assertFalse(translog.isOpen()); assertEquals("no space left on device", ex.getMessage()); - } catch (TranslogException ex) { - // we catch IOExceptions in Translog#add -- that's how we got here - failed = true; - assertTrue(ex.toString(), ex.getMessage().startsWith("Failed to write operation")); } simulateDiskFull.set(randomBoolean()); } @@ -1362,16 +1355,20 @@ public class TranslogTests extends ESTestCase { fail("already closed"); } catch (AlreadyClosedException ex) { // all is well + assertNotNull(ex.getCause()); + assertSame(translog.getTragicException(), ex.getCause()); } try { - translog.close(); - if (opsAdded != opsSynced) { - fail("already closed"); - } + translog.commit(); + fail("already closed"); } catch (AlreadyClosedException ex) { assertNotNull(ex.getCause()); + assertSame(translog.getTragicException(), ex.getCause()); } + + assertFalse(translog.isOpen()); + translog.close(); // we are closed config.setTranslogGeneration(translogGeneration); try (Translog tlog = new Translog(config)){ assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration()); From 2ab937d64dc725cf142c5ae50552021d330460d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 2 Dec 2015 17:28:09 +0100 Subject: [PATCH 047/167] Joint parsing of common global Hightlighter and subfield parameters The top-level highlighter has many options that can be overwritten per field. Currently there is very similar code for this in two places. This PR pulls out the parsing of the common parameters into AbstractHighlighterBuilder for better reuse and to keep parsing of common parameters more consistent. --- .../highlight/AbstractHighlighterBuilder.java | 100 +++++++- .../search/highlight/HighlightBuilder.java | 233 +++++------------- .../highlight/HighlightBuilderTests.java | 56 ++++- 3 files changed, 204 insertions(+), 185 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java b/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java index d30144f777f..12fb9874be2 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java @@ -22,13 +22,17 @@ package org.elasticsearch.search.highlight; import org.apache.lucene.search.highlight.SimpleFragmenter; import org.apache.lucene.search.highlight.SimpleSpanFragmenter; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; - +import org.elasticsearch.index.query.QueryParseContext; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import java.util.Map; import java.util.Objects; @@ -419,6 +423,100 @@ public abstract class AbstractHighlighterBuilder preTagsList = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + preTagsList.add(parser.text()); + } + highlightBuilder.preTags(preTagsList.toArray(new String[preTagsList.size()])); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, POST_TAGS_FIELD)) { + List postTagsList = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + postTagsList.add(parser.text()); + } + highlightBuilder.postTags(postTagsList.toArray(new String[postTagsList.size()])); + } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) { + throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", currentFieldName); + } + } else if (token.isValue()) { + if (parseContext.parseFieldMatcher().match(currentFieldName, ORDER_FIELD)) { + highlightBuilder.order(parser.text()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FILTER_FIELD)) { + highlightBuilder.highlightFilter(parser.booleanValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_SIZE_FIELD)) { + highlightBuilder.fragmentSize(parser.intValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, NUMBER_OF_FRAGMENTS_FIELD)) { + highlightBuilder.numOfFragments(parser.intValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, REQUIRE_FIELD_MATCH_FIELD)) { + highlightBuilder.requireFieldMatch(parser.booleanValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_MAX_SCAN_FIELD)) { + highlightBuilder.boundaryMaxScan(parser.intValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_CHARS_FIELD)) { + highlightBuilder.boundaryChars(parser.text().toCharArray()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, TYPE_FIELD)) { + highlightBuilder.highlighterType(parser.text()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENTER_FIELD)) { + highlightBuilder.fragmenter(parser.text()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, NO_MATCH_SIZE_FIELD)) { + highlightBuilder.noMatchSize(parser.intValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, FORCE_SOURCE_FIELD)) { + highlightBuilder.forceSource(parser.booleanValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, PHRASE_LIMIT_FIELD)) { + highlightBuilder.phraseLimit(parser.intValue()); + } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) { + throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", currentFieldName); + } + } else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) { + if (parseContext.parseFieldMatcher().match(currentFieldName, OPTIONS_FIELD)) { + highlightBuilder.options(parser.map()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_QUERY_FIELD)) { + highlightBuilder.highlightQuery(parseContext.parseInnerQueryBuilder()); + } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) { + throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", currentFieldName); + } + } else if (currentFieldName != null) { + throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, currentFieldName); + } + } + + if (highlightBuilder.preTags() != null && highlightBuilder.postTags() == null) { + throw new ParsingException(parser.getTokenLocation(), "Highlighter global preTags are set, but global postTags are not set"); + } + return highlightBuilder; + } + + /** + * @param parser the input parser. Implementing classes might advance the parser depending on the + * information they need to instantiate a new instance + * @return a new instance + */ + protected abstract HB createInstance(XContentParser parser) throws IOException; + + /** + * Implementing subclasses can handle parsing special options depending on the + * current token, field name and the parse context. + * @return true if an option was found and successfully parsed, otherwise false + */ + protected abstract boolean doFromXContent(QueryParseContext parseContext, String currentFieldName, XContentParser.Token endMarkerToken) throws IOException; + @Override public final int hashCode() { return Objects.hash(getClass(), Arrays.hashCode(preTags), Arrays.hashCode(postTags), fragmentSize, diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java index e45303ccb58..4b627340324 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java @@ -30,11 +30,13 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions; import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions.Builder; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -230,117 +232,45 @@ public class HighlightBuilder extends AbstractHighlighterBuilder preTagsList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - preTagsList.add(parser.text()); - } - highlightBuilder.preTags(preTagsList.toArray(new String[preTagsList.size()])); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, POST_TAGS_FIELD)) { - List postTagsList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - postTagsList.add(parser.text()); - } - highlightBuilder.postTags(postTagsList.toArray(new String[postTagsList.size()])); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FIELDS_FIELD)) { - highlightBuilder.useExplicitFieldOrder(true); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.START_OBJECT) { - String highlightFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - if (highlightFieldName != null) { - throw new ParsingException(parser.getTokenLocation(), "If highlighter fields is an array it must contain objects containing a single field"); - } - highlightFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - highlightBuilder.field(Field.fromXContent(highlightFieldName, parseContext)); - } - } - } else { - throw new ParsingException(parser.getTokenLocation(), "If highlighter fields is an array it must contain objects containing a single field"); - } - } - } else { - throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", topLevelFieldName); - } - } else if (token.isValue()) { - if (parseContext.parseFieldMatcher().match(topLevelFieldName, ORDER_FIELD)) { - highlightBuilder.order(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, TAGS_SCHEMA_FIELD)) { - highlightBuilder.tagsSchema(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, HIGHLIGHT_FILTER_FIELD)) { - highlightBuilder.highlightFilter(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FRAGMENT_SIZE_FIELD)) { - highlightBuilder.fragmentSize(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, NUMBER_OF_FRAGMENTS_FIELD)) { - highlightBuilder.numOfFragments(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, ENCODER_FIELD)) { - highlightBuilder.encoder(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, REQUIRE_FIELD_MATCH_FIELD)) { - highlightBuilder.requireFieldMatch(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, BOUNDARY_MAX_SCAN_FIELD)) { - highlightBuilder.boundaryMaxScan(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, BOUNDARY_CHARS_FIELD)) { - highlightBuilder.boundaryChars(parser.text().toCharArray()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, TYPE_FIELD)) { - highlightBuilder.highlighterType(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FRAGMENTER_FIELD)) { - highlightBuilder.fragmenter(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, NO_MATCH_SIZE_FIELD)) { - highlightBuilder.noMatchSize(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FORCE_SOURCE_FIELD)) { - highlightBuilder.forceSource(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, PHRASE_LIMIT_FIELD)) { - highlightBuilder.phraseLimit(parser.intValue()); - } else { - throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", topLevelFieldName); - } - } else if (token == XContentParser.Token.START_OBJECT && topLevelFieldName != null) { - if (parseContext.parseFieldMatcher().match(topLevelFieldName, OPTIONS_FIELD)) { - highlightBuilder.options(parser.map()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FIELDS_FIELD)) { - String highlightFieldName = null; + boolean foundCurrentFieldMatch = false; + if (currentToken.isValue()) { + if (parseContext.parseFieldMatcher().match(currentFieldName, TAGS_SCHEMA_FIELD)) { + tagsSchema(parser.text()); + foundCurrentFieldMatch = true; + } else if (parseContext.parseFieldMatcher().match(currentFieldName, ENCODER_FIELD)) { + encoder(parser.text()); + foundCurrentFieldMatch = true; + } + } else if (currentToken == Token.START_ARRAY && parseContext.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) { + useExplicitFieldOrder(true); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.START_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { - highlightFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - highlightBuilder.field(Field.fromXContent(highlightFieldName, parseContext)); + field(HighlightBuilder.Field.PROTOTYPE.fromXContent(parseContext)); } } - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, HIGHLIGHT_QUERY_FIELD)) { - highlightBuilder.highlightQuery(parseContext.parseInnerQueryBuilder()); + foundCurrentFieldMatch = true; } else { - throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", topLevelFieldName); + throw new ParsingException(parser.getTokenLocation(), + "If highlighter fields is an array it must contain objects containing a single field"); } - } else if (topLevelFieldName != null) { - throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, topLevelFieldName); } + } else if (currentToken == Token.START_OBJECT && parseContext.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + field(HighlightBuilder.Field.PROTOTYPE.fromXContent(parseContext)); + } + } + foundCurrentFieldMatch = true; } - - if (highlightBuilder.preTags() != null && highlightBuilder.postTags() == null) { - throw new ParsingException(parser.getTokenLocation(), "Highlighter global preTags are set, but global postTags are not set"); - } - return highlightBuilder; + return foundCurrentFieldMatch; } public SearchContextHighlight build(QueryShardContext context) throws IOException { @@ -467,6 +397,11 @@ public class HighlightBuilder extends AbstractHighlighterBuilder preTagsList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - preTagsList.add(parser.text()); - } - field.preTags(preTagsList.toArray(new String[preTagsList.size()])); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, POST_TAGS_FIELD)) { - List postTagsList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - postTagsList.add(parser.text()); - } - field.postTags(postTagsList.toArray(new String[postTagsList.size()])); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, MATCHED_FIELDS_FIELD)) { - List matchedFields = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - matchedFields.add(parser.text()); - } - field.matchedFields(matchedFields.toArray(new String[matchedFields.size()])); - } else { - throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", currentFieldName); - } - } else if (token.isValue()) { - if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_SIZE_FIELD)) { - field.fragmentSize(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, NUMBER_OF_FRAGMENTS_FIELD)) { - field.numOfFragments(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_OFFSET_FIELD)) { - field.fragmentOffset(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FILTER_FIELD)) { - field.highlightFilter(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, ORDER_FIELD)) { - field.order(parser.text()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, REQUIRE_FIELD_MATCH_FIELD)) { - field.requireFieldMatch(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_MAX_SCAN_FIELD)) { - field.boundaryMaxScan(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_CHARS_FIELD)) { - field.boundaryChars(parser.text().toCharArray()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, TYPE_FIELD)) { - field.highlighterType(parser.text()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENTER_FIELD)) { - field.fragmenter(parser.text()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, NO_MATCH_SIZE_FIELD)) { - field.noMatchSize(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, FORCE_SOURCE_FIELD)) { - field.forceSource(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, PHRASE_LIMIT_FIELD)) { - field.phraseLimit(parser.intValue()); - } else { - throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", currentFieldName); - } - } else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) { - if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_QUERY_FIELD)) { - field.highlightQuery(parseContext.parseInnerQueryBuilder()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, OPTIONS_FIELD)) { - field.options(parser.map()); - } else { - throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", currentFieldName); - } - } else if (currentFieldName != null) { - throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, currentFieldName); + boolean foundCurrentFieldMatch = false; + if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_OFFSET_FIELD) && currentToken.isValue()) { + fragmentOffset(parser.intValue()); + foundCurrentFieldMatch = true; + } else if (parseContext.parseFieldMatcher().match(currentFieldName, MATCHED_FIELDS_FIELD) + && currentToken == XContentParser.Token.START_ARRAY) { + List matchedFields = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + matchedFields.add(parser.text()); } + matchedFields(matchedFields.toArray(new String[matchedFields.size()])); + foundCurrentFieldMatch = true; + } + return foundCurrentFieldMatch; + } + + @Override + protected Field createInstance(XContentParser parser) throws IOException { + if (parser.currentToken() == XContentParser.Token.FIELD_NAME) { + String fieldname = parser.currentName(); + return new Field(fieldname); + } else { + throw new ParsingException(parser.getTokenLocation(), "unknown token type [{}], expected field name", parser.currentToken()); } - return field; } @Override diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java index 75fc9f98156..f527367daca 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java @@ -19,10 +19,10 @@ package org.elasticsearch.search.highlight; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -52,7 +52,6 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.TermQueryParser; import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.highlight.HighlightBuilder.Field; import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions; import org.elasticsearch.test.ESTestCase; @@ -149,6 +148,7 @@ public class HighlightBuilderTests extends ESTestCase { context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { HighlightBuilder highlightBuilder = randomHighlighterBuilder(); + System.out.println(highlightBuilder); XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); @@ -159,7 +159,8 @@ public class HighlightBuilderTests extends ESTestCase { XContentParser parser = XContentHelper.createParser(builder.bytes()); context.reset(parser); - HighlightBuilder secondHighlightBuilder = HighlightBuilder.fromXContent(context); + parser.nextToken(); + HighlightBuilder secondHighlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); assertNotSame(highlightBuilder, secondHighlightBuilder); assertEquals(highlightBuilder, secondHighlightBuilder); assertEquals(highlightBuilder.hashCode(), secondHighlightBuilder.hashCode()); @@ -179,7 +180,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("cannot parse array with name [bad_fieldname]", e.getMessage()); @@ -196,7 +197,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("cannot parse array with name [bad_fieldname]", e.getMessage()); @@ -216,7 +217,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("unexpected fieldname [bad_fieldname]", e.getMessage()); @@ -233,7 +234,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("unexpected fieldname [bad_fieldname]", e.getMessage()); @@ -253,7 +254,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("cannot parse object with name [bad_fieldname]", e.getMessage()); @@ -270,7 +271,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("cannot parse object with name [bad_fieldname]", e.getMessage()); @@ -354,7 +355,7 @@ public class HighlightBuilderTests extends ESTestCase { XContentParser parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); context.reset(parser); - HighlightBuilder highlightBuilder = HighlightBuilder.fromXContent(context); + HighlightBuilder highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); assertArrayEquals("setting tags_schema 'styled' should alter pre_tags", HighlightBuilder.DEFAULT_STYLED_PRE_TAG, highlightBuilder.preTags()); assertArrayEquals("setting tags_schema 'styled' should alter post_tags", HighlightBuilder.DEFAULT_STYLED_POST_TAGS, @@ -366,7 +367,7 @@ public class HighlightBuilderTests extends ESTestCase { parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); context.reset(parser); - highlightBuilder = HighlightBuilder.fromXContent(context); + highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); assertArrayEquals("setting tags_schema 'default' should alter pre_tags", HighlightBuilder.DEFAULT_PRE_TAGS, highlightBuilder.preTags()); assertArrayEquals("setting tags_schema 'default' should alter post_tags", HighlightBuilder.DEFAULT_POST_TAGS, @@ -379,13 +380,42 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - highlightBuilder = HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("setting unknown tag schema should throw exception"); } catch (IllegalArgumentException e) { assertEquals("Unknown tag schema [somthing_else]", e.getMessage()); } } + /** + * test parsing empty highlight or empty fields blocks + */ + public void testParsingEmptyStructure() throws IOException { + QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); + context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); + String highlightElement = "{ }"; + XContentParser parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); + + context.reset(parser); + HighlightBuilder highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); + assertEquals("expected plain HighlightBuilder", new HighlightBuilder(), highlightBuilder); + + highlightElement = "{ \"fields\" : { } }"; + parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); + + context.reset(parser); + highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); + assertEquals("defining no field should return plain HighlightBuilder", new HighlightBuilder(), highlightBuilder); + + highlightElement = "{ \"fields\" : { \"foo\" : { } } }"; + parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); + + context.reset(parser); + highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); + assertEquals("expected HighlightBuilder with field", new HighlightBuilder().field(new Field("foo")), highlightBuilder); + System.out.println(Math.log(1/(double)(1+1)) + 1.0); + } + protected static XContentBuilder toXContent(HighlightBuilder highlight, XContentType contentType) throws IOException { XContentBuilder builder = XContentFactory.contentBuilder(contentType); if (randomBoolean()) { From 659bab7105d99551ac1b1df224cc9cc9560d685d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 10 Dec 2015 15:37:02 +0100 Subject: [PATCH 048/167] Use HighlightBuilder in SearchSourceBuilder After HighlightBuilder implements Writable now, we can remove the temporary solution for transporting the highlight section in SearchSourceBuilder from the coordinating node to the shard as BytesReference and use HighlightBuilder instead. --- .../elasticsearch/search/SearchService.java | 20 +++++-------- .../metrics/tophits/TopHitsBuilder.java | 3 +- .../search/builder/SearchSourceBuilder.java | 30 ++++++------------- .../fetch/innerhits/InnerHitsBuilder.java | 3 +- .../search/highlight/HighlightBuilder.java | 28 ++++++++++++----- .../builder/SearchSourceBuilderTests.java | 11 ++++--- .../highlight/HighlightBuilderTests.java | 4 +-- 7 files changed, 48 insertions(+), 51 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 9501099997f..eb8414bb32e 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -83,12 +83,14 @@ import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext; import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext.FieldDataField; import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase; import org.elasticsearch.search.fetch.script.ScriptFieldsContext.ScriptField; +import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.internal.*; import org.elasticsearch.search.internal.SearchContext.Lifetime; import org.elasticsearch.search.query.*; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.threadpool.ThreadPool; +import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -656,7 +658,7 @@ public class SearchService extends AbstractLifecycleComponent imp } } - private void parseSource(SearchContext context, SearchSourceBuilder source) throws SearchParseException { + private void parseSource(SearchContext context, SearchSourceBuilder source) throws SearchContextException { // nothing to parse... if (source == null) { return; @@ -807,19 +809,11 @@ public class SearchService extends AbstractLifecycleComponent imp fieldDataFieldsContext.setHitExecutionNeeded(true); } if (source.highlighter() != null) { - XContentParser highlighterParser = null; + HighlightBuilder highlightBuilder = source.highlighter(); try { - highlighterParser = XContentFactory.xContent(source.highlighter()).createParser(source.highlighter()); - this.elementParsers.get("highlight").parse(highlighterParser, context); - } catch (Exception e) { - String sSource = "_na_"; - try { - sSource = source.toString(); - } catch (Throwable e1) { - // ignore - } - XContentLocation location = highlighterParser != null ? highlighterParser.getTokenLocation() : null; - throw new SearchParseException(context, "failed to parse suggest source [" + sSource + "]", location, e); + context.highlight(highlightBuilder.build(context.indexShard().getQueryShardContext())); + } catch (IOException e) { + throw new SearchContextException(context, "failed to create SearchContextHighlighter", e); } } if (source.innerHits() != null) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java index 32b5d7390d2..1efd4a7cd24 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics.tophits; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; @@ -194,7 +193,7 @@ public class TopHitsBuilder extends AbstractAggregationBuilder { return sourceBuilder; } - public BytesReference highlighter() { + public HighlightBuilder highlighter() { return sourceBuilder().highlighter(); } diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 7963b678fb3..2428d1e5f7d 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -144,7 +144,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ private List aggregations; - private BytesReference highlightBuilder; + private HighlightBuilder highlightBuilder; private BytesReference suggestBuilder; @@ -405,22 +405,14 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ * Adds highlight to perform as part of the search. */ public SearchSourceBuilder highlighter(HighlightBuilder highlightBuilder) { - try { - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); - highlightBuilder.innerXContent(builder); - builder.endObject(); - this.highlightBuilder = builder.bytes(); - return this; - } catch (IOException e) { - throw new RuntimeException(e); - } + this.highlightBuilder = highlightBuilder; + return this; } /** - * Gets the bytes representing the hightlighter builder for this request. + * Gets the hightlighter builder for this request. */ - public BytesReference highlighter() { + public HighlightBuilder highlighter() { return highlightBuilder; } @@ -813,8 +805,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } builder.aggregations = aggregations; } else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); - builder.highlightBuilder = xContentBuilder.bytes(); + builder.highlightBuilder = HighlightBuilder.fromXContent(context); } else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); builder.innerHitsBuilder = xContentBuilder.bytes(); @@ -1012,10 +1003,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } if (highlightBuilder != null) { - builder.field(HIGHLIGHT_FIELD.getPreferredName()); - XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(highlightBuilder); - parser.nextToken(); - builder.copyCurrentStructure(parser); + this.highlightBuilder.toXContent(builder, params); } if (innerHitsBuilder != null) { @@ -1158,7 +1146,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } builder.from = in.readVInt(); if (in.readBoolean()) { - builder.highlightBuilder = in.readBytesReference(); + builder.highlightBuilder = HighlightBuilder.PROTOTYPE.readFrom(in); } boolean hasIndexBoost = in.readBoolean(); if (hasIndexBoost) { @@ -1259,7 +1247,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ boolean hasHighlightBuilder = highlightBuilder != null; out.writeBoolean(hasHighlightBuilder); if (hasHighlightBuilder) { - out.writeBytesReference(highlightBuilder); + highlightBuilder.writeTo(out); } boolean hasIndexBoost = indexBoost != null; out.writeBoolean(hasIndexBoost); diff --git a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java index 7941e177750..2e76a4c3703 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.fetch.innerhits; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -266,7 +265,7 @@ public class InnerHitsBuilder implements ToXContent { return this; } - public BytesReference highlighter() { + public HighlightBuilder highlighter() { return sourceBuilder().highlighter(); } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java index 4b627340324..c4c874d4754 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java @@ -308,9 +308,15 @@ public class HighlightBuilder extends AbstractHighlighterBuilder Date: Mon, 14 Dec 2015 16:09:20 +0100 Subject: [PATCH 049/167] apply feedback from @mikemccand --- .../org/elasticsearch/index/engine/InternalEngine.java | 4 ++-- .../index/translog/BufferingTranslogWriter.java | 4 ++-- .../elasticsearch/index/translog/TranslogWriter.java | 10 ++++------ .../elasticsearch/index/translog/TranslogTests.java | 6 +++--- 4 files changed, 11 insertions(+), 13 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index b5a62eca901..de13eb10977 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -781,9 +781,9 @@ public class InternalEngine extends Engine { // we need to fail the engine. it might have already been failed before // but we are double-checking it's failed and closed if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) { - failEngine("already closed by tragic event", indexWriter.getTragicException()); + failEngine("already closed by tragic event on the index writer", indexWriter.getTragicException()); } else if (translog.isOpen() == false && translog.getTragicException() != null) { - failEngine("already closed by tragic event", translog.getTragicException()); + failEngine("already closed by tragic event on the translog", translog.getTragicException()); } return true; } else if (t != null && diff --git a/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java index 97c00ce0a02..a2eb0bff646 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java @@ -128,10 +128,10 @@ public final class BufferingTranslogWriter extends TranslogWriter { } // we can do this outside of the write lock but we have to protect from // concurrent syncs + ensureOpen(); // just for kicks - the checkpoint happens or not either way try { - ensureOpen(); // just for kicks - the checkpoint happens or not either way checkpoint(offsetToSync, opsCounter, channelReference); - } catch (IOException ex) { + } catch (Throwable ex) { closeWithTragicEvent(ex); throw ex; } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index d6f21ebb412..9ebe6041581 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -130,12 +130,10 @@ public class TranslogWriter extends TranslogReader { protected final void closeWithTragicEvent(Throwable throwable) throws IOException { try (ReleasableLock lock = writeLock.acquire()) { - if (throwable != null) { - if (tragedy == null) { - tragedy = throwable; - } else { - tragedy.addSuppressed(throwable); - } + if (tragedy == null) { + tragedy = throwable; + } else { + tragedy.addSuppressed(throwable); } close(); } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index ad4ac07f6d6..e35c04dcd6b 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -1310,7 +1310,7 @@ public class TranslogTests extends ESTestCase { super.write(src); src.position(pos); src.limit(limit); - throw new IOException("no space left on device"); + throw new IOException("__FAKE__ no space left on device"); } } return super.write(src); @@ -1334,7 +1334,7 @@ public class TranslogTests extends ESTestCase { } catch (IOException ex) { failed = true; assertFalse(translog.isOpen()); - assertEquals("no space left on device", ex.getMessage()); + assertEquals("__FAKE__ no space left on device", ex.getMessage()); } simulateDiskFull.set(randomBoolean()); } @@ -1345,7 +1345,7 @@ public class TranslogTests extends ESTestCase { fail("we are already closed"); } catch (AlreadyClosedException ex) { assertNotNull(ex.getCause()); - assertEquals(ex.getCause().getMessage(), "no space left on device"); + assertEquals(ex.getCause().getMessage(), "__FAKE__ no space left on device"); } } From 6f16317a4b37ee87dfdd34e54803e1ace72d0ce0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Mon, 14 Dec 2015 16:56:39 +0100 Subject: [PATCH 050/167] Switch internal representation of order to enum --- .../search/builder/SearchSourceBuilder.java | 2 +- .../highlight/AbstractHighlighterBuilder.java | 36 +++++++++++++------ .../search/highlight/HighlightBuilder.java | 35 +++++++++++++++++- .../highlight/HighlightBuilderTests.java | 15 ++++++-- 4 files changed, 73 insertions(+), 15 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 2428d1e5f7d..465729ca714 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -805,7 +805,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } builder.aggregations = aggregations; } else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) { - builder.highlightBuilder = HighlightBuilder.fromXContent(context); + builder.highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); } else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); builder.innerHitsBuilder = xContentBuilder.bytes(); diff --git a/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java b/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java index 12fb9874be2..b4de465cc74 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.search.highlight.HighlightBuilder.Order; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -78,7 +80,7 @@ public abstract class AbstractHighlighterBuilder highlightQuery; - protected String order; + protected Order order; protected Boolean highlightFilter; @@ -217,18 +219,26 @@ public abstract class AbstractHighlighterBuilderscore, which then it will be ordered - * by score of the fragments. + * by score of the fragments, or none. + */ + public HB order(String order) { + return order(Order.fromString(order)); + } + + /** + * By default, fragments of a field are ordered by the order in the highlighted text. + * If set to {@link Order#SCORE}, this changes order to score of the fragments. */ @SuppressWarnings("unchecked") - public HB order(String order) { - this.order = order; + public HB order(Order scoreOrdered) { + this.order = scoreOrdered; return (HB) this; } /** - * @return the value set by {@link #order(String)} + * @return the value set by {@link #order(Order)} */ - public String order() { + public Order order() { return this.order; } @@ -395,7 +405,7 @@ public abstract class AbstractHighlighterBuilder { + NONE, SCORE; + + static Order PROTOTYPE = NONE; + + @Override + public Order readFrom(StreamInput in) throws IOException { + int ordinal = in.readVInt(); + if (ordinal < 0 || ordinal >= values().length) { + throw new IOException("Unknown Order ordinal [" + ordinal + "]"); + } + return values()[ordinal]; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(this.ordinal()); + } + + public static Order fromString(String order) { + if (order.toUpperCase(Locale.ROOT).equals(SCORE.name())) { + return Order.SCORE; + } + return NONE; + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } } diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java index 19068d9506a..7b20ba587d7 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java @@ -53,6 +53,7 @@ import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.TermQueryParser; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.search.highlight.HighlightBuilder.Field; +import org.elasticsearch.search.highlight.HighlightBuilder.Order; import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -148,7 +149,6 @@ public class HighlightBuilderTests extends ESTestCase { context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { HighlightBuilder highlightBuilder = randomHighlighterBuilder(); - System.out.println(highlightBuilder); XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); @@ -487,7 +487,12 @@ public class HighlightBuilderTests extends ESTestCase { highlightBuilder.highlightQuery(highlightQuery); } if (randomBoolean()) { - highlightBuilder.order(randomAsciiOfLengthBetween(1, 10)); + if (randomBoolean()) { + highlightBuilder.order(randomFrom(Order.values())); + } else { + // also test the string setter + highlightBuilder.order(randomFrom(Order.values()).toString()); + } } if (randomBoolean()) { highlightBuilder.highlightFilter(randomBoolean()); @@ -556,7 +561,11 @@ public class HighlightBuilderTests extends ESTestCase { highlightBuilder.highlightQuery(new TermQueryBuilder(randomAsciiOfLengthBetween(11, 20), randomAsciiOfLengthBetween(11, 20))); break; case 8: - highlightBuilder.order(randomAsciiOfLengthBetween(11, 20)); + if (highlightBuilder.order() == Order.NONE) { + highlightBuilder.order(Order.SCORE); + } else { + highlightBuilder.order(Order.NONE); + } break; case 9: highlightBuilder.highlightFilter(toggleOrSet(highlightBuilder.highlightFilter())); From 59bd21f24f629894d8d3f4318f03413121f83535 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Mon, 14 Dec 2015 17:29:25 +0100 Subject: [PATCH 051/167] Adding test for Order enum ordinals --- .../highlight/HighlightBuilderTests.java | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java index 7b20ba587d7..2ac5895c9eb 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java @@ -416,6 +416,30 @@ public class HighlightBuilderTests extends ESTestCase { System.out.println(Math.log(1/(double)(1+1)) + 1.0); } + /** + * test ordinals of {@link Order}, since serialization depends on it + */ + public void testValidOrderOrdinals() { + assertThat(Order.NONE.ordinal(), equalTo(0)); + assertThat(Order.SCORE.ordinal(), equalTo(1)); + } + + public void testOrderSerialization() throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + Order.NONE.writeTo(out); + try (StreamInput in = StreamInput.wrap(out.bytes())) { + assertThat(in.readVInt(), equalTo(0)); + } + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + Order.SCORE.writeTo(out); + try (StreamInput in = StreamInput.wrap(out.bytes())) { + assertThat(in.readVInt(), equalTo(1)); + } + } + } + protected static XContentBuilder toXContent(HighlightBuilder highlight, XContentType contentType) throws IOException { XContentBuilder builder = XContentFactory.contentBuilder(contentType); if (randomBoolean()) { From 943915e05a45f9d541b500299c35365e8eaa5e70 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 14 Dec 2015 18:06:49 +0100 Subject: [PATCH 052/167] simplify code and use members directly --- .../java/org/elasticsearch/index/translog/Translog.java | 1 + .../org/elasticsearch/index/translog/TranslogWriter.java | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 3d6b76922a4..4016695dd89 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -542,6 +542,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public boolean ensureSynced(Location location) throws IOException { try (ReleasableLock lock = readLock.acquire()) { if (location.generation == current.generation) { // if we have a new one it's already synced + ensureOpen(); return current.syncUpTo(location.translogLocation + location.size); } } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 9ebe6041581..9870bddf871 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -173,10 +173,8 @@ public class TranslogWriter extends TranslogReader { if (syncNeeded()) { try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); - final long offset = writtenOffset; - final int opsCount = operationCounter; - checkpoint(offset, opsCount, channelReference); - lastSyncedOffset = offset; + checkpoint(writtenOffset, operationCounter, channelReference); + lastSyncedOffset = writtenOffset; } } } From 7bca97bba6ac012f875d758cc5f49546396b20e2 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Wed, 25 Nov 2015 01:04:40 +0200 Subject: [PATCH 053/167] HDFS Snapshot/Restore plugin Migrated from ES-Hadoop. Contains several improvements regarding: * Security Takes advantage of the pluggable security in ES 2.2 and uses that in order to grant the necessary permissions to the Hadoop libs. It relies on a dedicated DomainCombiner to grant permissions only when needed only to the libraries installed in the plugin folder Add security checks for SpecialPermission/scripting and provides out of the box permissions for the latest Hadoop 1.x (1.2.1) and 2.x (2.7.1) * Testing Uses a customized Local FS to perform actual integration testing of the Hadoop stack (and thus to make sure the proper permissions and ACC blocks are in place) however without requiring extra permissions for testing. If needed, a MiniDFS cluster is provided (though it requires extra permissions to bind ports) Provides a RestIT test * Build system Picks the build system used in ES (still Gradle) --- build.gradle | 1 + .../elasticsearch/plugins/PluginManager.java | 1 + .../elasticsearch/plugins/plugin-install.help | 1 + docs/plugins/repository-hdfs.asciidoc | 115 ++++++++ plugins/repository-hdfs/build.gradle | 203 ++++++++++++++ .../plugin/hadoop/hdfs/HdfsPlugin.java | 173 ++++++++++++ .../plugin/hadoop/hdfs/Utils.java | 84 ++++++ .../repositories/hdfs/FileSystemFactory.java | 28 ++ .../repositories/hdfs/FsCallback.java | 29 ++ .../repositories/hdfs/HdfsBlobContainer.java | 173 ++++++++++++ .../repositories/hdfs/HdfsBlobStore.java | 125 +++++++++ .../repositories/hdfs/HdfsRepository.java | 259 ++++++++++++++++++ .../repositories/hdfs/SecurityUtils.java | 73 +++++ .../repositories/hdfs/TestingFs.java | 57 ++++ .../plugin-metadata/plugin-security.policy | 67 +++++ .../resources/hadoop-libs/README.asciidoc | 1 + .../hadoop/hdfs/HdfsRepositoryRestIT.java | 30 ++ .../plugin/hadoop/hdfs/HdfsTestPlugin.java | 15 + .../plugin/hadoop/hdfs/HdfsTests.java | 218 +++++++++++++++ .../plugin/hadoop/hdfs/MiniHDFSCluster.java | 48 ++++ .../plugin/hadoop/hdfs/UtilsTests.java | 11 + .../src/test/resources/additional-cfg.xml | 12 + .../src/test/resources/conf-2.xml | 12 + .../test/hdfs_repository/10_basic.yaml | 16 ++ .../hdfs_repository/20_repository.disabled | 25 ++ settings.gradle | 1 + 26 files changed, 1778 insertions(+) create mode 100644 docs/plugins/repository-hdfs.asciidoc create mode 100644 plugins/repository-hdfs/build.gradle create mode 100644 plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsPlugin.java create mode 100644 plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/Utils.java create mode 100644 plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/FileSystemFactory.java create mode 100644 plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/FsCallback.java create mode 100644 plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java create mode 100644 plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java create mode 100644 plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java create mode 100644 plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/SecurityUtils.java create mode 100644 plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/TestingFs.java create mode 100644 plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy create mode 100644 plugins/repository-hdfs/src/main/resources/hadoop-libs/README.asciidoc create mode 100644 plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsRepositoryRestIT.java create mode 100644 plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTestPlugin.java create mode 100644 plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTests.java create mode 100644 plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/MiniHDFSCluster.java create mode 100644 plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/UtilsTests.java create mode 100644 plugins/repository-hdfs/src/test/resources/additional-cfg.xml create mode 100644 plugins/repository-hdfs/src/test/resources/conf-2.xml create mode 100644 plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/10_basic.yaml create mode 100644 plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/20_repository.disabled diff --git a/build.gradle b/build.gradle index 878f7a9a915..831db456a19 100644 --- a/build.gradle +++ b/build.gradle @@ -97,6 +97,7 @@ subprojects { // the "value" -quiet is added, separated by a space. This is ok since the javadoc // command already adds -quiet, so we are just duplicating it // see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959 + javadoc.options.encoding='UTF8' javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet') } } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginManager.java b/core/src/main/java/org/elasticsearch/plugins/PluginManager.java index 0a01667f3c5..1ebe7813d3c 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginManager.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginManager.java @@ -90,6 +90,7 @@ public class PluginManager { "mapper-murmur3", "mapper-size", "repository-azure", + "repository-hdfs", "repository-s3", "store-smb")); diff --git a/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help b/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help index 2a4e6a6382c..8c73e3837a4 100644 --- a/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help +++ b/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help @@ -50,6 +50,7 @@ OFFICIAL PLUGINS - mapper-murmur3 - mapper-size - repository-azure + - repository-hdfs - repository-s3 - store-smb diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc new file mode 100644 index 00000000000..ea13e5ad3a6 --- /dev/null +++ b/docs/plugins/repository-hdfs.asciidoc @@ -0,0 +1,115 @@ +[[repository-hdfs]] +=== Hadoop HDFS Repository Plugin + +The HDFS repository plugin adds support for using HDFS File System as a repository for +{ref}/modules-snapshots.html[Snapshot/Restore]. + +[[repository-hdfs-install]] +[float] +==== Installation + +This plugin can be installed using the plugin manager: + +[source,sh] +---------------------------------------------------------------- +sudo bin/plugin install repository-hdfs +sudo bin/plugin install repository-hdfs-hadoop2 +sudo bin/plugin install repository-hdfs-lite +---------------------------------------------------------------- + +The plugin must be installed on every node in the cluster, and each node must +be restarted after installation. + +[[repository-hdfs-remove]] +[float] +==== Removal + +The plugin can be removed with the following command: + +[source,sh] +---------------------------------------------------------------- +sudo bin/plugin remove repository-hdfs +sudo bin/plugin remove repository-hdfs-hadoop2 +sudo bin/plugin remove repository-hdfs-lite +---------------------------------------------------------------- + +The node must be stopped before removing the plugin. + +[[repository-hdfs-usage]] +==== Getting started with HDFS + +The HDFS snapshot/restore plugin comes in three _flavors_: + +* Default / Hadoop 1.x:: +The default version contains the plugin jar alongside Apache Hadoop 1.x (stable) dependencies. +* YARN / Hadoop 2.x:: +The `hadoop2` version contains the plugin jar plus the Apache Hadoop 2.x (also known as YARN) dependencies. +* Lite:: +The `lite` version contains just the plugin jar, without any Hadoop dependencies. The user should provide these (read below). + +[[repository-hdfs-flavor]] +===== What version to use? + +It depends on whether Hadoop is locally installed or not and if not, whether it is compatible with Apache Hadoop clients. + +* Are you using Apache Hadoop (or a _compatible_ distro) and do not have installed on the Elasticsearch nodes?:: ++ +If the answer is yes, for Apache Hadoop 1 use the default `repository-hdfs` or `repository-hdfs-hadoop2` for Apache Hadoop 2. ++ +* If you are have Hadoop installed locally on the Elasticsearch nodes or are using a certain distro:: ++ +Use the `lite` version and place your Hadoop _client_ jars and their dependencies in the plugin folder under `hadoop-libs`. +For large deployments, it is recommended to package the libraries in the plugin zip and deploy it manually across nodes +(and thus avoiding having to do the libraries setup on each node). + +[[repository-hdfs-security]] +==== Handling JVM Security and Permissions + +Out of the box, Elasticsearch runs in a JVM with the security manager turned _on_ to make sure that unsafe or sensitive actions +are allowed only from trusted code. Hadoop however is not really designed to run under one; it does not rely on privileged blocks +to execute sensitive code, of which it uses plenty. + +The `repository-hdfs` plugin provides the necessary permissions for both Apache Hadoop 1.x and 2.x (latest versions) to successfully +run in a secured JVM as one can tell from the number of permissions required when installing the plugin. +However using a certain Hadoop File-System (outside DFS), a certain distro or operating system (in particular Windows), might require +additional permissions which are not provided by the plugin. + +In this case there are several workarounds: +* add the permission into `plugin-security.policy` (available in the plugin folder) +* disable the security manager through `es.security.manager.enabled=false` configurations setting - NOT RECOMMENDED + +If you find yourself in such a situation, please let us know what Hadoop distro version and OS you are using and what permission is missing +by raising an issue. Thank you! + +[[repository-hdfs-config]] +==== Configuration Properties + +Once installed, define the configuration for the `hdfs` repository through `elasticsearch.yml` or the +{ref}/modules-snapshots.html[REST API]: + +[source] +---- +repositories + hdfs: + uri: "hdfs://:/" # optional - Hadoop file-system URI + path: "some/path" # required - path with the file-system where data is stored/loaded + load_defaults: "true" # optional - whether to load the default Hadoop configuration (default) or not + conf_location: "extra-cfg.xml" # optional - Hadoop configuration XML to be loaded (use commas for multi values) + conf. : "" # optional - 'inlined' key=value added to the Hadoop configuration + concurrent_streams: 5 # optional - the number of concurrent streams (defaults to 5) + compress: "false" # optional - whether to compress the metadata or not (default) + chunk_size: "10mb" # optional - chunk size (disabled by default) +---- + +NOTE: Be careful when including a paths within the `uri` setting; Some implementations ignore them completely while +others consider them. In general, we recommend keeping the `uri` to a minimum and using the `path` element instead. + +===== Plugging other file-systems + +Any HDFS-compatible file-systems (like Amazon `s3://` or Google `gs://`) can be used as long as the proper Hadoop +configuration is passed to the Elasticsearch plugin. In practice, this means making sure the correct Hadoop configuration +files (`core-site.xml` and `hdfs-site.xml`) and its jars are available in plugin classpath, just as you would with any +other Hadoop client or job. + +Otherwise, the plugin will only read the _default_, vanilla configuration of Hadoop and will not be able to recognized +the plugged-in file-system. diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle new file mode 100644 index 00000000000..8f18f67f70d --- /dev/null +++ b/plugins/repository-hdfs/build.gradle @@ -0,0 +1,203 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +//apply plugin: 'nebula.provided-base' + +esplugin { + description 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.' + classname 'org.elasticsearch.plugin.hadoop.hdfs.HdfsPlugin' +} + +configurations { + hadoop1 + hadoop2 +} + +versions << [ + 'hadoop1': '1.2.1', + 'hadoop2': '2.7.1' +] + +dependencies { + provided "org.elasticsearch:elasticsearch:${versions.elasticsearch}" + provided "org.apache.hadoop:hadoop-core:${versions.hadoop1}" + + // use Hadoop1 to compile and test things (a subset of Hadoop2) + testCompile "org.apache.hadoop:hadoop-core:${versions.hadoop1}" + testCompile "org.apache.hadoop:hadoop-test:${versions.hadoop1}" + // Hadoop dependencies + testCompile "commons-configuration:commons-configuration:1.6" + testCompile "commons-lang:commons-lang:${versions.commonslang}" + testCompile "commons-collections:commons-collections:3.2.2" + testCompile "commons-net:commons-net:1.4.1" + testCompile "org.mortbay.jetty:jetty:6.1.26" + testCompile "org.mortbay.jetty:jetty-util:6.1.26" + testCompile "org.mortbay.jetty:servlet-api:2.5-20081211" + testCompile "com.sun.jersey:jersey-core:1.8" + + + hadoop1("org.apache.hadoop:hadoop-core:${versions.hadoop1}") { + exclude module: "commons-cli" + exclude group: "com.sun.jersey" + exclude group: "org.mortbay.jetty" + exclude group: "tomcat" + exclude module: "commons-el" + exclude module: "hsqldb" + exclude group: "org.eclipse.jdt" + exclude module: "commons-beanutils" + exclude module: "commons-beanutils-core" + exclude module: "junit" + // provided by ES itself + exclude group: "log4j" + } + + hadoop2("org.apache.hadoop:hadoop-client:${versions.hadoop2}") { + exclude module: "commons-cli" + exclude group: "com.sun.jersey" + exclude group: "com.sun.jersey.contribs" + exclude group: "com.sun.jersey.jersey-test-framework" + exclude module: "guice" + exclude group: "org.mortbay.jetty" + exclude group: "tomcat" + exclude module: "commons-el" + exclude module: "hsqldb" + exclude group: "org.eclipse.jdt" + exclude module: "commons-beanutils" + exclude module: "commons-beanutils-core" + exclude module: "javax.servlet" + exclude module: "junit" + // provided by ES itself + exclude group: "log4j" + } + + hadoop2("org.apache.hadoop:hadoop-hdfs:${versions.hadoop2}") { + exclude module: "guava" + exclude module: "junit" + // provided by ES itself + exclude group: "log4j" + } +} + +configurations.all { + resolutionStrategy { + force "commons-codec:commons-codec:${versions.commonscodec}" + force "commons-logging:commons-logging:${versions.commonslogging}" + force "commons-lang:commons-lang:2.6" + force "commons-httpclient:commons-httpclient:3.0.1" + force "org.codehaus.jackson:jackson-core-asl:1.8.8" + force "org.codehaus.jackson:jackson-mapper-asl:1.8.8" + force "com.google.code.findbugs:jsr305:3.0.0" + force "com.google.guava:guava:16.0.1" + force "org.slf4j:slf4j-api:1.7.10" + force "org.slf4j:slf4j-log4j12:1.7.10" + } +} + + +dependencyLicenses { + mapping from: /hadoop-core.*/, to: 'hadoop-1' + mapping from: /hadoop-.*/, to: 'hadoop-2' +} + +compileJava.options.compilerArgs << '-Xlint:-deprecation,-rawtypes' + +// main jar includes just the plugin classes +jar { + include "org/elasticsearch/plugin/hadoop/hdfs/*" +} + +// hadoop jar (which actually depend on Hadoop) +task hadoopLinkedJar(type: Jar, dependsOn:jar) { + appendix "internal" + from sourceSets.main.output.classesDir + // exclude plugin + exclude "org/elasticsearch/plugin/hadoop/hdfs/*" +} + + +bundlePlugin.dependsOn hadoopLinkedJar + +// configure 'bundle' as being w/o Hadoop deps +bundlePlugin { + into ("internal-libs") { + from hadoopLinkedJar.archivePath + } + + into ("hadoop-libs") { + from configurations.hadoop2.allArtifacts.files + from configurations.hadoop2 + } +} + + +task distZipHadoop1(type: Zip, dependsOn: [hadoopLinkedJar, jar]) { zipTask -> + from (zipTree(bundlePlugin.archivePath)) { + include "*" + include "internal-libs/**" + } + + description = "Builds archive (with Hadoop1 dependencies) suitable for download page." + classifier = "hadoop1" + + into ("hadoop-libs") { + from configurations.hadoop1.allArtifacts.files + from configurations.hadoop1 + } +} + +task distZipHadoop2(type: Zip, dependsOn: [hadoopLinkedJar, jar]) { zipTask -> + from (zipTree(bundlePlugin.archivePath)) { + include "*" + include "internal-libs/**" + } + + description = "Builds archive (with Hadoop2/YARN dependencies) suitable for download page." + classifier = "hadoop2" + + into ("hadoop-libs") { + from configurations.hadoop2.allArtifacts.files + from configurations.hadoop2 + } +} + +task distZipNoHadoop(type: Zip, dependsOn: [hadoopLinkedJar, jar]) { zipTask -> + from (zipTree(bundlePlugin.archivePath)) { + exclude "hadoop-libs/**" + } + + from sourceSets.main.output.resourcesDir + + description = "Builds archive (without any Hadoop dependencies) suitable for download page." + classifier = "lite" +} + + +artifacts { + archives bundlePlugin + 'default' bundlePlugin + archives distZipHadoop1 + archives distZipHadoop2 + archives distZipNoHadoop +} + +integTest { + cluster { + plugin(pluginProperties.extension.name, zipTree(distZipHadoop2.archivePath)) + } +} \ No newline at end of file diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsPlugin.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsPlugin.java new file mode 100644 index 00000000000..9b65f7bec2f --- /dev/null +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsPlugin.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.plugin.hadoop.hdfs; + +import java.io.IOException; +import java.lang.reflect.Method; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.net.URLClassLoader; +import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.RepositoriesModule; +import org.elasticsearch.repositories.Repository; + +// +// Note this plugin is somewhat special as Hadoop itself loads a number of libraries and thus requires a number of permissions to run even in client mode. +// This poses two problems: +// - Hadoop itself comes with tons of jars, many providing the same classes across packages. In particular Hadoop 2 provides package annotations in the same +// package across jars which trips JarHell. Thus, to allow Hadoop jars to load, the plugin uses a dedicated CL which picks them up from the hadoop-libs folder. +// - The issue though with using a different CL is that it picks up the jars from a different location / codeBase and thus it does not fall under the plugin +// permissions. In other words, the plugin permissions don't apply to the hadoop libraries. +// There are different approaches here: +// - implement a custom classloader that loads the jars but 'lies' about the codesource. It is doable but since URLClassLoader is locked down, one would +// would have to implement the whole jar opening and loading from it. Not impossible but still fairly low-level. +// Further more, even if the code has the proper credentials, it needs to use the proper Privileged blocks to use its full permissions which does not +// happen in the Hadoop code base. +// - use a different Policy. Works but the Policy is JVM wide and thus the code needs to be quite efficient - quite a bit impact to cover just some plugin +// libraries +// - use a DomainCombiner. This doesn't change the semantics (it's clear where the code is loaded from, etc..) however it gives us a scoped, fine-grained +// callback on handling the permission intersection for secured calls. Note that DC works only in the current PAC call - the moment another PA is used, +// the domain combiner is going to be ignored (unless the caller specifically uses it). Due to its scoped impact and official Java support, this approach +// was used. + +// ClassLoading info +// - package plugin.hadoop.hdfs is part of the plugin +// - all the other packages are assumed to be in the nested Hadoop CL. + +// Code +public class HdfsPlugin extends Plugin { + + @Override + public String name() { + return "repository-hdfs"; + } + + @Override + public String description() { + return "HDFS Repository Plugin"; + } + + @SuppressWarnings("unchecked") + public void onModule(RepositoriesModule repositoriesModule) { + String baseLib = Utils.detectLibFolder(); + List cp = getHadoopClassLoaderPath(baseLib); + + ClassLoader hadoopCL = URLClassLoader.newInstance(cp.toArray(new URL[cp.size()]), getClass().getClassLoader()); + + Class repository = null; + try { + repository = (Class) hadoopCL.loadClass("org.elasticsearch.repositories.hdfs.HdfsRepository"); + } catch (ClassNotFoundException cnfe) { + throw new IllegalStateException("Cannot load plugin class; is the plugin class setup correctly?", cnfe); + } + + repositoriesModule.registerRepository("hdfs", repository, BlobStoreIndexShardRepository.class); + Loggers.getLogger(HdfsPlugin.class).info("Loaded Hadoop [{}] libraries from {}", getHadoopVersion(hadoopCL), baseLib); + } + + protected List getHadoopClassLoaderPath(String baseLib) { + List cp = new ArrayList<>(); + // add plugin internal jar + discoverJars(createURI(baseLib, "internal-libs"), cp, false); + // add Hadoop jars + discoverJars(createURI(baseLib, "hadoop-libs"), cp, true); + return cp; + } + + private String getHadoopVersion(ClassLoader hadoopCL) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + // unprivileged code such as scripts do not have SpecialPermission + sm.checkPermission(new SpecialPermission()); + } + + return AccessController.doPrivileged(new PrivilegedAction() { + @Override + public String run() { + // Hadoop 2 relies on TCCL to determine the version + ClassLoader tccl = Thread.currentThread().getContextClassLoader(); + try { + Thread.currentThread().setContextClassLoader(hadoopCL); + return doGetHadoopVersion(hadoopCL); + } finally { + Thread.currentThread().setContextClassLoader(tccl); + } + } + }, Utils.hadoopACC()); + } + + private String doGetHadoopVersion(ClassLoader hadoopCL) { + String version = "Unknown"; + + Class clz = null; + try { + clz = hadoopCL.loadClass("org.apache.hadoop.util.VersionInfo"); + } catch (ClassNotFoundException cnfe) { + // unknown + } + if (clz != null) { + try { + Method method = clz.getMethod("getVersion"); + version = method.invoke(null).toString(); + } catch (Exception ex) { + // class has changed, ignore + } + } + + return version; + } + + private URI createURI(String base, String suffix) { + String location = base + suffix; + try { + return new URI(location); + } catch (URISyntaxException ex) { + throw new IllegalStateException(String.format(Locale.ROOT, "Cannot detect plugin folder; [%s] seems invalid", location), ex); + } + } + + @SuppressForbidden(reason = "discover nested jar") + private void discoverJars(URI libPath, List cp, boolean optional) { + try { + Path[] jars = FileSystemUtils.files(PathUtils.get(libPath), "*.jar"); + + for (Path path : jars) { + cp.add(path.toUri().toURL()); + } + } catch (IOException ex) { + if (!optional) { + throw new IllegalStateException("Cannot compute plugin classpath", ex); + } + } + } +} \ No newline at end of file diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/Utils.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/Utils.java new file mode 100644 index 00000000000..101025d029e --- /dev/null +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/Utils.java @@ -0,0 +1,84 @@ +package org.elasticsearch.plugin.hadoop.hdfs; + +import java.net.URL; +import java.security.AccessControlContext; +import java.security.AccessController; +import java.security.DomainCombiner; +import java.security.PrivilegedAction; +import java.security.ProtectionDomain; + +import org.elasticsearch.SpecialPermission; + +public abstract class Utils { + + protected static AccessControlContext hadoopACC() { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + // unprivileged code such as scripts do not have SpecialPermission + sm.checkPermission(new SpecialPermission()); + } + + return AccessController.doPrivileged(new PrivilegedAction() { + @Override + public AccessControlContext run() { + return new AccessControlContext(AccessController.getContext(), new HadoopDomainCombiner()); + } + }); + } + + private static class HadoopDomainCombiner implements DomainCombiner { + + private static String BASE_LIB = detectLibFolder(); + + @Override + public ProtectionDomain[] combine(ProtectionDomain[] currentDomains, ProtectionDomain[] assignedDomains) { + for (ProtectionDomain pd : assignedDomains) { + if (pd.getCodeSource().getLocation().toString().startsWith(BASE_LIB)) { + return assignedDomains; + } + } + + return currentDomains; + } + } + + static String detectLibFolder() { + ClassLoader cl = Utils.class.getClassLoader(); + + // we could get the URL from the URLClassloader directly + // but that can create issues when running the tests from the IDE + // we could detect that by loading resources but that as well relies on + // the JAR URL + String classToLookFor = HdfsPlugin.class.getName().replace(".", "/").concat(".class"); + URL classURL = cl.getResource(classToLookFor); + if (classURL == null) { + throw new IllegalStateException("Cannot detect itself; something is wrong with this ClassLoader " + cl); + } + + String base = classURL.toString(); + + // extract root + // typically a JAR URL + int index = base.indexOf("!/"); + if (index > 0) { + base = base.substring(0, index); + // remove its prefix (jar:) + base = base.substring(4); + // remove the trailing jar + index = base.lastIndexOf("/"); + base = base.substring(0, index + 1); + } + // not a jar - something else, do a best effort here + else { + // remove the class searched + base = base.substring(0, base.length() - classToLookFor.length()); + } + + // append / + if (!base.endsWith("/")) { + base = base.concat("/"); + } + + return base; + } +} \ No newline at end of file diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/FileSystemFactory.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/FileSystemFactory.java new file mode 100644 index 00000000000..5e7c4d3fa57 --- /dev/null +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/FileSystemFactory.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.hdfs; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; + +interface FileSystemFactory { + + FileSystem getFileSystem() throws IOException; +} diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/FsCallback.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/FsCallback.java new file mode 100644 index 00000000000..3eda2272149 --- /dev/null +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/FsCallback.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.hdfs; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; + +interface FsCallback { + + V doInHdfs(FileSystem fs) throws IOException; +} diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java new file mode 100644 index 00000000000..f71ca7020a8 --- /dev/null +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.hdfs; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; +import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.Streams; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Locale; +import java.util.Map; + +public class HdfsBlobContainer extends AbstractBlobContainer { + + protected final HdfsBlobStore blobStore; + protected final Path path; + + public HdfsBlobContainer(BlobPath blobPath, HdfsBlobStore blobStore, Path path) { + super(blobPath); + this.blobStore = blobStore; + this.path = path; + } + + @Override + public boolean blobExists(String blobName) { + try { + return SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() { + @Override + public Boolean doInHdfs(FileSystem fs) throws IOException { + return fs.exists(new Path(path, blobName)); + } + }); + } catch (Exception e) { + return false; + } + } + + @Override + public void deleteBlob(String blobName) throws IOException { + SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() { + @Override + public Boolean doInHdfs(FileSystem fs) throws IOException { + return fs.delete(new Path(path, blobName), true); + } + }); + } + + @Override + public void move(String sourceBlobName, String targetBlobName) throws IOException { + boolean rename = SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() { + @Override + public Boolean doInHdfs(FileSystem fs) throws IOException { + return fs.rename(new Path(path, sourceBlobName), new Path(path, targetBlobName)); + } + }); + + if (!rename) { + throw new IOException(String.format(Locale.ROOT, "can not move blob from [%s] to [%s]", sourceBlobName, targetBlobName)); + } + } + + @Override + public InputStream readBlob(String blobName) throws IOException { + // FSDataInputStream does buffering internally + return SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() { + @Override + public InputStream doInHdfs(FileSystem fs) throws IOException { + return fs.open(new Path(path, blobName), blobStore.bufferSizeInBytes()); + } + }); + } + + @Override + public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { + SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() { + @Override + public Void doInHdfs(FileSystem fs) throws IOException { + try (OutputStream stream = createOutput(blobName)) { + Streams.copy(inputStream, stream); + } + return null; + } + }); + } + + @Override + public void writeBlob(String blobName, BytesReference bytes) throws IOException { + SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() { + @Override + public Void doInHdfs(FileSystem fs) throws IOException { + try (OutputStream stream = createOutput(blobName)) { + bytes.writeTo(stream); + } + return null; + } + }); + } + + private OutputStream createOutput(String blobName) throws IOException { + Path file = new Path(path, blobName); + // FSDataOutputStream does buffering internally + return blobStore.fileSystemFactory().getFileSystem().create(file, true, blobStore.bufferSizeInBytes()); + } + + @Override + public Map listBlobsByPrefix(final @Nullable String blobNamePrefix) throws IOException { + FileStatus[] files = SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() { + @Override + public FileStatus[] doInHdfs(FileSystem fs) throws IOException { + return fs.listStatus(path, new PathFilter() { + @Override + public boolean accept(Path path) { + return path.getName().startsWith(blobNamePrefix); + } + }); + } + }); + if (files == null || files.length == 0) { + return Collections.emptyMap(); + } + Map map = new LinkedHashMap(); + for (FileStatus file : files) { + map.put(file.getPath().getName(), new PlainBlobMetaData(file.getPath().getName(), file.getLen())); + } + return Collections.unmodifiableMap(map); + } + + @Override + public Map listBlobs() throws IOException { + FileStatus[] files = SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() { + @Override + public FileStatus[] doInHdfs(FileSystem fs) throws IOException { + return fs.listStatus(path); + } + }); + if (files == null || files.length == 0) { + return Collections.emptyMap(); + } + Map map = new LinkedHashMap(); + for (FileStatus file : files) { + map.put(file.getPath().getName(), new PlainBlobMetaData(file.getPath().getName(), file.getLen())); + } + return Collections.unmodifiableMap(map); + } +} \ No newline at end of file diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java new file mode 100644 index 00000000000..b75485fa7fe --- /dev/null +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java @@ -0,0 +1,125 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.hdfs; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.concurrent.Executor; + +public class HdfsBlobStore extends AbstractComponent implements BlobStore { + + private final FileSystemFactory ffs; + private final Path rootHdfsPath; + private final ThreadPool threadPool; + private final int bufferSizeInBytes; + + public HdfsBlobStore(Settings settings, FileSystemFactory ffs, Path path, ThreadPool threadPool) throws IOException { + super(settings); + this.ffs = ffs; + this.rootHdfsPath = path; + this.threadPool = threadPool; + + this.bufferSizeInBytes = (int) settings.getAsBytesSize("buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).bytes(); + + mkdirs(path); + } + + private void mkdirs(Path path) throws IOException { + SecurityUtils.execute(ffs, new FsCallback() { + @Override + public Void doInHdfs(FileSystem fs) throws IOException { + if (!fs.exists(path)) { + fs.mkdirs(path); + } + return null; + } + }); + } + + @Override + public String toString() { + return rootHdfsPath.toUri().toString(); + } + + public FileSystemFactory fileSystemFactory() { + return ffs; + } + + public Path path() { + return rootHdfsPath; + } + + public Executor executor() { + return threadPool.executor(ThreadPool.Names.SNAPSHOT); + } + + public int bufferSizeInBytes() { + return bufferSizeInBytes; + } + + @Override + public BlobContainer blobContainer(BlobPath path) { + return new HdfsBlobContainer(path, this, buildHdfsPath(path)); + } + + @Override + public void delete(BlobPath path) throws IOException { + SecurityUtils.execute(ffs, new FsCallback() { + @Override + public Void doInHdfs(FileSystem fs) throws IOException { + fs.delete(translateToHdfsPath(path), true); + return null; + } + }); + } + + private Path buildHdfsPath(BlobPath blobPath) { + final Path path = translateToHdfsPath(blobPath); + try { + mkdirs(path); + } catch (IOException ex) { + throw new ElasticsearchException("failed to create blob container", ex); + } + return path; + } + + private Path translateToHdfsPath(BlobPath blobPath) { + Path path = path(); + for (String p : blobPath) { + path = new Path(path, p); + } + return path; + } + + @Override + public void close() { + // + } +} \ No newline at end of file diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java new file mode 100644 index 00000000000..11081445fd4 --- /dev/null +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -0,0 +1,259 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.hdfs; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URL; +import java.nio.file.Files; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.snapshots.IndexShardRepository; +import org.elasticsearch.repositories.RepositoryName; +import org.elasticsearch.repositories.RepositorySettings; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.threadpool.ThreadPool; + +public class HdfsRepository extends BlobStoreRepository implements FileSystemFactory { + + public final static String TYPE = "hdfs"; + + private final HdfsBlobStore blobStore; + private final BlobPath basePath; + private final ByteSizeValue chunkSize; + private final boolean compress; + private final RepositorySettings repositorySettings; + private FileSystem fs; + + @Inject + public HdfsRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository, ThreadPool threadPool) throws IOException { + super(name.getName(), repositorySettings, indexShardRepository); + + this.repositorySettings = repositorySettings; + + String path = repositorySettings.settings().get("path", settings.get("path")); + if (path == null) { + throw new IllegalArgumentException("no 'path' defined for hdfs snapshot/restore"); + } + + // get configuration + fs = getFileSystem(); + Path hdfsPath = SecurityUtils.execute(fs, new FsCallback() { + @Override + public Path doInHdfs(FileSystem fs) throws IOException { + return fs.makeQualified(new Path(path)); + } + }); + this.basePath = BlobPath.cleanPath(); + + logger.debug("Using file-system [{}] for URI [{}], path [{}]", fs, fs.getUri(), hdfsPath); + blobStore = new HdfsBlobStore(settings, this, hdfsPath, threadPool); + this.chunkSize = repositorySettings.settings().getAsBytesSize("chunk_size", settings.getAsBytesSize("chunk_size", null)); + this.compress = repositorySettings.settings().getAsBoolean("compress", settings.getAsBoolean("compress", false)); + } + + // as the FileSystem is long-lived and might go away, make sure to check it before it's being used. + @Override + public FileSystem getFileSystem() throws IOException { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + // unprivileged code such as scripts do not have SpecialPermission + sm.checkPermission(new SpecialPermission()); + } + + try { + return AccessController.doPrivileged(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws IOException { + return doGetFileSystem(); + } + }, SecurityUtils.AccBridge.acc()); + } catch (PrivilegedActionException pae) { + Throwable th = pae.getCause(); + if (th instanceof Error) { + throw (Error) th; + } + if (th instanceof RuntimeException) { + throw (RuntimeException) th; + } + if (th instanceof IOException) { + throw (IOException) th; + } + throw new ElasticsearchException(pae); + } + } + + private FileSystem doGetFileSystem() throws IOException { + // check if the fs is still alive + // make a cheap call that triggers little to no security checks + if (fs != null) { + try { + fs.isFile(fs.getWorkingDirectory()); + } catch (IOException ex) { + if (ex.getMessage().contains("Filesystem closed")) { + fs = null; + } + else { + throw ex; + } + } + } + if (fs == null) { + Thread th = Thread.currentThread(); + ClassLoader oldCL = th.getContextClassLoader(); + try { + th.setContextClassLoader(getClass().getClassLoader()); + return initFileSystem(repositorySettings); + } catch (IOException ex) { + throw ex; + } finally { + th.setContextClassLoader(oldCL); + } + } + return fs; + } + + private FileSystem initFileSystem(RepositorySettings repositorySettings) throws IOException { + + Configuration cfg = new Configuration(repositorySettings.settings().getAsBoolean("load_defaults", settings.getAsBoolean("load_defaults", true))); + cfg.setClassLoader(this.getClass().getClassLoader()); + cfg.reloadConfiguration(); + + String confLocation = repositorySettings.settings().get("conf_location", settings.get("conf_location")); + if (Strings.hasText(confLocation)) { + for (String entry : Strings.commaDelimitedListToStringArray(confLocation)) { + addConfigLocation(cfg, entry.trim()); + } + } + + Map map = repositorySettings.settings().getByPrefix("conf.").getAsMap(); + for (Entry entry : map.entrySet()) { + cfg.set(entry.getKey(), entry.getValue()); + } + + try { + UserGroupInformation.setConfiguration(cfg); + } catch (Throwable th) { + throw new ElasticsearchGenerationException(String.format(Locale.ROOT, "Cannot initialize Hadoop"), th); + } + + String uri = repositorySettings.settings().get("uri", settings.get("uri")); + URI actualUri = (uri != null ? URI.create(uri) : FileSystem.getDefaultUri(cfg)); + String user = repositorySettings.settings().get("user", settings.get("user")); + + try { + // disable FS cache + String disableFsCache = String.format(Locale.ROOT, "fs.%s.impl.disable.cache", actualUri.getScheme()); + cfg.setBoolean(disableFsCache, true); + + return (user != null ? FileSystem.get(actualUri, cfg, user) : FileSystem.get(actualUri, cfg)); + } catch (Exception ex) { + throw new ElasticsearchGenerationException(String.format(Locale.ROOT, "Cannot create Hdfs file-system for uri [%s]", actualUri), ex); + } + } + + @SuppressForbidden(reason = "pick up Hadoop config (which can be on HDFS)") + private void addConfigLocation(Configuration cfg, String confLocation) { + URL cfgURL = null; + // it's an URL + if (!confLocation.contains(":")) { + cfgURL = cfg.getClassLoader().getResource(confLocation); + + // fall back to file + if (cfgURL == null) { + java.nio.file.Path path = PathUtils.get(confLocation); + if (!Files.isReadable(path)) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, + "Cannot find classpath resource or file 'conf_location' [%s] defined for hdfs snapshot/restore", + confLocation)); + } + String pathLocation = path.toUri().toString(); + logger.debug("Adding path [{}] as file [{}]", confLocation, pathLocation); + confLocation = pathLocation; + } + else { + logger.debug("Resolving path [{}] to classpath [{}]", confLocation, cfgURL); + } + } + else { + logger.debug("Adding path [{}] as URL", confLocation); + } + + if (cfgURL == null) { + try { + cfgURL = new URL(confLocation); + } catch (MalformedURLException ex) { + throw new IllegalArgumentException(String.format(Locale.ROOT, + "Invalid 'conf_location' URL [%s] defined for hdfs snapshot/restore", confLocation), ex); + } + } + + cfg.addResource(cfgURL); + } + + @Override + protected BlobStore blobStore() { + return blobStore; + } + + @Override + protected BlobPath basePath() { + return basePath; + } + + @Override + protected boolean isCompress() { + return compress; + } + + @Override + protected ByteSizeValue chunkSize() { + return chunkSize; + } + + @Override + protected void doClose() throws ElasticsearchException { + super.doClose(); + + IOUtils.closeStream(fs); + fs = null; + } +} \ No newline at end of file diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/SecurityUtils.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/SecurityUtils.java new file mode 100644 index 00000000000..6a0d4ffa818 --- /dev/null +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/SecurityUtils.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.hdfs; + +import java.io.IOException; +import java.security.AccessControlContext; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; + +import org.apache.hadoop.fs.FileSystem; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.plugin.hadoop.hdfs.Utils; + +class SecurityUtils { + + abstract static class AccBridge extends Utils { + static AccessControlContext acc() { + return Utils.hadoopACC(); + } + } + + static V execute(FileSystemFactory ffs, FsCallback callback) throws IOException { + return execute(ffs.getFileSystem(), callback); + } + + static V execute(FileSystem fs, FsCallback callback) throws IOException { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + // unprivileged code such as scripts do not have SpecialPermission + sm.checkPermission(new SpecialPermission()); + } + + try { + return AccessController.doPrivileged(new PrivilegedExceptionAction() { + @Override + public V run() throws IOException { + return callback.doInHdfs(fs); + } + }, AccBridge.acc()); + } catch (PrivilegedActionException pae) { + Throwable th = pae.getCause(); + if (th instanceof Error) { + throw (Error) th; + } + if (th instanceof RuntimeException) { + throw (RuntimeException) th; + } + if (th instanceof IOException) { + throw (IOException) th; + } + throw new ElasticsearchException(pae); + } + } +} diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/TestingFs.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/TestingFs.java new file mode 100644 index 00000000000..46cb0a263fe --- /dev/null +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/TestingFs.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.hdfs; + +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RawLocalFileSystem; +import org.elasticsearch.common.SuppressForbidden; + +import java.io.File; +import java.io.IOException; + +/** + * Extends LFS to improve some operations to keep the security permissions at + * bay. In particular mkdir is smarter and doesn't have to walk all the file + * hierarchy but rather only limits itself to the parent/working dir and creates + * a file only when necessary. + */ +public class TestingFs extends LocalFileSystem { + + private static class ImprovedRawLocalFileSystem extends RawLocalFileSystem { + @Override + @SuppressForbidden(reason = "the Hadoop API depends on java.io.File") + public boolean mkdirs(Path f) throws IOException { + File wd = pathToFile(getWorkingDirectory()); + File local = pathToFile(f); + if (wd.equals(local) || local.exists()) { + return true; + } + return mkdirs(f.getParent()) && local.mkdir(); + } + } + + public TestingFs() { + super(new ImprovedRawLocalFileSystem()); + // use the build path instead of the starting dir as that one has read permissions + //setWorkingDirectory(new Path(getClass().getProtectionDomain().getCodeSource().getLocation().toString())); + setWorkingDirectory(new Path(System.getProperty("java.io.tmpdir"))); + } +} diff --git a/plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 00000000000..d26acd121e4 --- /dev/null +++ b/plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +grant { + // used by the plugin to get the TCCL to properly initialize all of Hadoop components + permission java.lang.RuntimePermission "getClassLoader"; + + // used for DomainCombiner + permission java.security.SecurityPermission "createAccessControlContext"; + + // set TCCL used for bootstrapping Hadoop Configuration and JAAS + permission java.lang.RuntimePermission "setContextClassLoader"; + + // + // Hadoop 1 + // + + // UserGroupInformation (UGI) + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + + // UGI triggers JAAS + permission javax.security.auth.AuthPermission "getSubject"; + + // JAAS libraries are not loaded with the proper context in Hadoop, hence why the permission is needed here + permission java.lang.RuntimePermission "loadLibrary.jaas_nt"; + + // which triggers the use of the Kerberos library + permission java.lang.RuntimePermission "accessClassInPackage.sun.security.krb5"; + + // plus LoginContext + permission javax.security.auth.AuthPermission "modifyPrincipals"; + + permission javax.security.auth.AuthPermission "modifyPublicCredentials"; + + permission javax.security.auth.AuthPermission "modifyPrivateCredentials"; + + // + // Hadoop 2 + // + + // UGI (Ugi Metrics) + permission java.lang.RuntimePermission "accessDeclaredMembers"; + + // Shell initialization - reading system props + permission java.util.PropertyPermission "*", "read,write"; + + permission javax.security.auth.PrivateCredentialPermission "org.apache.hadoop.security.Credentials \"*\"", "read"; + + // HftpFileSystem (all present FS are loaded and initialized at startup ...) + permission java.lang.RuntimePermission "setFactory"; +}; \ No newline at end of file diff --git a/plugins/repository-hdfs/src/main/resources/hadoop-libs/README.asciidoc b/plugins/repository-hdfs/src/main/resources/hadoop-libs/README.asciidoc new file mode 100644 index 00000000000..e9f85f3cdf7 --- /dev/null +++ b/plugins/repository-hdfs/src/main/resources/hadoop-libs/README.asciidoc @@ -0,0 +1 @@ +Folder containing the required Hadoop client libraries and dependencies. \ No newline at end of file diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsRepositoryRestIT.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsRepositoryRestIT.java new file mode 100644 index 00000000000..fd87e18cbce --- /dev/null +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsRepositoryRestIT.java @@ -0,0 +1,30 @@ +package org.elasticsearch.plugin.hadoop.hdfs; + +import java.io.IOException; +import java.util.Collection; + +import org.elasticsearch.plugin.hadoop.hdfs.HdfsPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.RestTestCandidate; +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +public class HdfsRepositoryRestIT extends ESRestTestCase { + + @Override + protected Collection> nodePlugins() { + return pluginList(HdfsPlugin.class); + } + + public HdfsRepositoryRestIT(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return ESRestTestCase.createParameters(0, 1); + } +} diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTestPlugin.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTestPlugin.java new file mode 100644 index 00000000000..4b4e2aa05ef --- /dev/null +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTestPlugin.java @@ -0,0 +1,15 @@ +package org.elasticsearch.plugin.hadoop.hdfs; + +import java.net.URL; +import java.util.Collections; +import java.util.List; + +import org.elasticsearch.plugin.hadoop.hdfs.HdfsPlugin; + +public class HdfsTestPlugin extends HdfsPlugin { + + @Override + protected List getHadoopClassLoaderPath(String baseLib) { + return Collections.emptyList(); + } +} diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTests.java new file mode 100644 index 00000000000..d1b23e92538 --- /dev/null +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTests.java @@ -0,0 +1,218 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.plugin.hadoop.hdfs; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +import java.util.Collection; + +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.repositories.hdfs.TestingFs; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.ESIntegTestCase.ThirdParty; +import org.elasticsearch.test.store.MockFSDirectoryService; +import org.junit.After; +import org.junit.Before; + +/** + * You must specify {@code -Dtests.thirdparty=true} + */ +@ThirdParty +@ClusterScope(scope = Scope.SUITE, numDataNodes = 1, transportClientRatio = 0.0) +public class HdfsTests extends ESIntegTestCase { + + @Override + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false) + .put(MockFSDirectoryService.RANDOM_NO_DELETE_OPEN_FILE, false) + .build(); + } + + @Override + protected Settings nodeSettings(int ordinal) { + Settings.Builder settings = Settings.builder() + .put(super.nodeSettings(ordinal)) + .put("path.home", createTempDir()) + .put("path.repo", "") + .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false) + .put(MockFSDirectoryService.RANDOM_NO_DELETE_OPEN_FILE, false); + return settings.build(); + } + + @Override + protected Collection> nodePlugins() { + return pluginList(HdfsTestPlugin.class); + } + + private String path; + + @Before + public final void wipeBefore() throws Exception { + wipeRepositories(); + path = "build/data/repo-" + randomInt(); + } + + @After + public final void wipeAfter() throws Exception { + wipeRepositories(); + } + + public void testSimpleWorkflow() { + Client client = client(); + logger.info("--> creating hdfs repository with path [{}]", path); + + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") + .setType("hdfs") + .setSettings(Settings.settingsBuilder() + //.put("uri", "hdfs://127.0.0.1:51227") + .put("conf.fs.es-hdfs.impl", TestingFs.class.getName()) + .put("uri", "es-hdfs://./build/") + .put("path", path) + .put("conf", "additional-cfg.xml, conf-2.xml") + .put("chunk_size", randomIntBetween(100, 1000) + "k") + .put("compress", randomBoolean()) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + createIndex("test-idx-1", "test-idx-2", "test-idx-3"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); + index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); + index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + assertThat(count(client, "test-idx-1"), equalTo(100L)); + assertThat(count(client, "test-idx-2"), equalTo(100L)); + assertThat(count(client, "test-idx-3"), equalTo(100L)); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + + assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); + + logger.info("--> delete some data"); + for (int i = 0; i < 50; i++) { + client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get(); + } + for (int i = 50; i < 100; i++) { + client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get(); + } + for (int i = 0; i < 100; i += 2) { + client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get(); + } + refresh(); + assertThat(count(client, "test-idx-1"), equalTo(50L)); + assertThat(count(client, "test-idx-2"), equalTo(50L)); + assertThat(count(client, "test-idx-3"), equalTo(50L)); + + logger.info("--> close indices"); + client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get(); + + logger.info("--> restore all indices from the snapshot"); + RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + + ensureGreen(); + assertThat(count(client, "test-idx-1"), equalTo(100L)); + assertThat(count(client, "test-idx-2"), equalTo(100L)); + assertThat(count(client, "test-idx-3"), equalTo(50L)); + + // Test restore after index deletion + logger.info("--> delete indices"); + wipeIndices("test-idx-1", "test-idx-2"); + logger.info("--> restore one index after deletion"); + restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + ensureGreen(); + assertThat(count(client, "test-idx-1"), equalTo(100L)); + ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); + assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false)); + } + + private void wipeIndices(String... indices) { + cluster().wipeIndices(indices); + } + + // RepositoryVerificationException.class + public void testWrongPath() { + Client client = client(); + logger.info("--> creating hdfs repository with path [{}]", path); + + try { + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") + .setType("hdfs") + .setSettings(Settings.settingsBuilder() + // .put("uri", "hdfs://127.0.0.1:51227/") + .put("conf.fs.es-hdfs.impl", TestingFs.class.getName()) + .put("uri", "es-hdfs:///") + .put("path", path + "a@b$c#11:22") + .put("chunk_size", randomIntBetween(100, 1000) + "k") + .put("compress", randomBoolean())) + .get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + createIndex("test-idx-1", "test-idx-2", "test-idx-3"); + ensureGreen(); + fail("Path name is invalid"); + } catch (RepositoryException re) { + // expected + } + } + + /** + * Deletes repositories, supports wildcard notation. + */ + public static void wipeRepositories(String... repositories) { + // if nothing is provided, delete all + if (repositories.length == 0) { + repositories = new String[]{"*"}; + } + for (String repository : repositories) { + try { + client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet(); + } catch (RepositoryMissingException ex) { + // ignore + } + } + } + + private long count(Client client, String index) { + return client.prepareSearch(index).setSize(0).get().getHits().totalHits(); + } +} \ No newline at end of file diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/MiniHDFSCluster.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/MiniHDFSCluster.java new file mode 100644 index 00000000000..0d700615a1a --- /dev/null +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/MiniHDFSCluster.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugin.hadoop.hdfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.elasticsearch.common.SuppressForbidden; + +import java.io.File; + +public class MiniHDFSCluster { + + @SuppressForbidden(reason = "Hadoop is messy") + public static void main(String[] args) throws Exception { + FileUtil.fullyDelete(new File(System.getProperty("test.build.data", "build/test/data"), "dfs/")); + // MiniHadoopClusterManager.main(new String[] { "-nomr" }); + Configuration cfg = new Configuration(); + cfg.set(DataNode.DATA_DIR_PERMISSION_KEY, "666"); + cfg.set("dfs.replication", "0"); + MiniDFSCluster dfsCluster = new MiniDFSCluster(cfg, 1, true, null); + FileSystem fs = dfsCluster.getFileSystem(); + System.out.println(fs.getClass()); + System.out.println(fs.getUri()); + System.out.println(dfsCluster.getHftpFileSystem().getClass()); + + // dfsCluster.shutdown(); + } +} diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/UtilsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/UtilsTests.java new file mode 100644 index 00000000000..2f492eee343 --- /dev/null +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/UtilsTests.java @@ -0,0 +1,11 @@ +package org.elasticsearch.plugin.hadoop.hdfs; + +import org.elasticsearch.test.ESTestCase; + +public class UtilsTests extends ESTestCase { + + public void testDetectLibFolder() { + String location = HdfsPlugin.class.getProtectionDomain().getCodeSource().getLocation().toString(); + assertEquals(location, Utils.detectLibFolder()); + } +} diff --git a/plugins/repository-hdfs/src/test/resources/additional-cfg.xml b/plugins/repository-hdfs/src/test/resources/additional-cfg.xml new file mode 100644 index 00000000000..b1b6611e924 --- /dev/null +++ b/plugins/repository-hdfs/src/test/resources/additional-cfg.xml @@ -0,0 +1,12 @@ + + + + + foo + foo + + + paradise + lost + + diff --git a/plugins/repository-hdfs/src/test/resources/conf-2.xml b/plugins/repository-hdfs/src/test/resources/conf-2.xml new file mode 100644 index 00000000000..b1b6611e924 --- /dev/null +++ b/plugins/repository-hdfs/src/test/resources/conf-2.xml @@ -0,0 +1,12 @@ + + + + + foo + foo + + + paradise + lost + + diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/10_basic.yaml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/10_basic.yaml new file mode 100644 index 00000000000..b7bc644a832 --- /dev/null +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/10_basic.yaml @@ -0,0 +1,16 @@ +# Integration tests for HDFS Repository plugin +# +# Check plugin is installed +# +"HDFS Repository loaded": + - do: + cluster.state: {} + + # Get master node id + - set: { master_node: master } + + - do: + nodes.info: {} + + - match: { nodes.$master.plugins.0.name: repository-hdfs } + - match: { nodes.$master.plugins.0.jvm: true } diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/20_repository.disabled b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/20_repository.disabled new file mode 100644 index 00000000000..f1f5f7a65e0 --- /dev/null +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/20_repository.disabled @@ -0,0 +1,25 @@ +# Integration tests for HDFS Repository plugin +# +# Check plugin is installed +# +"HDFS Repository Config": + - do: + snapshot.create_repository: + repository: test_repo_hdfs_1 + verify: false + body: + type: hdfs + settings: + # local HDFS implementation + conf.fs.es-hdfs.impl: "org.elasticsearch.repositories.hdfs.TestingFs" + uri: "es-hdfs://./build/" + path: "build/data/repo-hdfs" + + # Get repositry + - do: + snapshot.get_repository: + repository: test_repo_hdfs_1 + + - is_true: test_repo_hdfs_1 + - is_true: test_repo_hdfs_1.settings.uri + - match: {test_repo_hdfs_1.settings.path : "build/data/repo-hdfs"} diff --git a/settings.gradle b/settings.gradle index e928e53b690..e9fb0a043aa 100644 --- a/settings.gradle +++ b/settings.gradle @@ -29,6 +29,7 @@ List projects = [ 'plugins:mapper-murmur3', 'plugins:mapper-size', 'plugins:repository-azure', + 'plugins:repository-hdfs', 'plugins:repository-s3', 'plugins:jvm-example', 'plugins:site-example', From 6b39ff608122caa66089f98aa30c89461378e7d3 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 23 Nov 2015 14:56:50 -0500 Subject: [PATCH 054/167] Only trace log shard not available exceptions This commit changes the behavior of the logging in TransportBroadcastByNodeAction#onNodeFailure to only trace log exceptions that are considered shard-not-available exceptions. This makes the logging consistent with how these exceptions are handled in the response. Relates #14927 --- .../node/TransportBroadcastByNodeAction.java | 31 ++++++++++++++++--- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index bc78f13433f..4799eed9154 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -19,8 +19,16 @@ package org.elasticsearch.action.support.broadcast.node; -import org.elasticsearch.action.*; -import org.elasticsearch.action.support.*; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.NoShardAvailableActionException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; @@ -37,7 +45,14 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.NodeShouldNotConnectException; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; @@ -384,7 +399,15 @@ public abstract class TransportBroadcastByNodeAction Date: Mon, 14 Dec 2015 17:20:22 -0500 Subject: [PATCH 055/167] Explicitly log cluster state update failures This commit adds explicit logging at the DEBUG level for cluster state update failures. Currently this responsibility is left to the cluster state task listener, but we should expliclty log these with a generic message to address cases where the listener might not. Relates #14899, relates #15016, relates #15023 --- .../service/InternalClusterService.java | 48 +++++++++++++++++-- 1 file changed, 43 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index d4b15861846..afdfea65328 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -20,8 +20,19 @@ package org.elasticsearch.cluster.service; import org.elasticsearch.Version; -import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.AckedClusterStateTaskListener; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState.Builder; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalNodeMasterListener; +import org.elasticsearch.cluster.TimeoutClusterStateListener; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; @@ -42,7 +53,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.*; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; +import org.elasticsearch.common.util.concurrent.PrioritizedRunnable; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryService; @@ -50,8 +67,19 @@ import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.*; -import java.util.concurrent.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.Executor; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; @@ -292,6 +320,7 @@ public class InternalClusterService extends AbstractLifecycleComponent threadPool.generic().execute(() -> { if (updateTask.processed.getAndSet(true) == false) { + logger.debug("cluster state update task [{}] timed out after [{}]", source, config.timeout()); listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source)); }})); } else { @@ -413,6 +442,9 @@ public class InternalClusterService extends AbstractLifecycleComponent finalBatchResult = batchResult; + assert toExecute.stream().map(updateTask -> updateTask.task).allMatch(finalBatchResult.executionResults::containsKey); ClusterState newClusterState = batchResult.resultingState; final ArrayList> proccessedListeners = new ArrayList<>(); @@ -421,7 +453,13 @@ public class InternalClusterService extends AbstractLifecycleComponent proccessedListeners.add(updateTask), ex -> updateTask.listener.onFailure(updateTask.source, ex)); + executionResult.handle( + () -> proccessedListeners.add(updateTask), + ex -> { + logger.debug("cluster state update task [{}] failed", ex, updateTask.source); + updateTask.listener.onFailure(updateTask.source, ex); + } + ); } if (previousClusterState == newClusterState) { From 2e721a0328a1741a9080dc3eb625255a0cc320b1 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 14 Dec 2015 18:54:35 -0500 Subject: [PATCH 056/167] Fix IntelliJ query builder type inference issues This commit addresses two type inference issues that the IntelliJ source editor struggles with when registering query builder prototypes in o/e/i/q/IndicesQueriesRegistry.java and o/e/i/q/f/ScoreFunctionParserMapper.java. --- .../query/functionscore/ScoreFunctionParserMapper.java | 9 ++++----- .../indices/query/IndicesQueriesRegistry.java | 8 +++++--- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java index c528c0007f2..e7ce9b90e2b 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java @@ -19,13 +19,11 @@ package org.elasticsearch.index.query.functionscore; -import java.util.Map; - import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.xcontent.XContentLocation; -import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.functionscore.exp.ExponentialDecayFunctionParser; import org.elasticsearch.index.query.functionscore.fieldvaluefactor.FieldValueFactorFunctionParser; import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionParser; @@ -74,11 +72,12 @@ public class ScoreFunctionParserMapper { return functionParsers.get(parserName); } - private static void addParser(ScoreFunctionParser scoreFunctionParser, Map> map, NamedWriteableRegistry namedWriteableRegistry) { + private static void addParser(ScoreFunctionParser scoreFunctionParser, Map> map, NamedWriteableRegistry namedWriteableRegistry) { for (String name : scoreFunctionParser.getNames()) { map.put(name, scoreFunctionParser); } - namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, scoreFunctionParser.getBuilderPrototype()); + @SuppressWarnings("unchecked") NamedWriteable sfb = scoreFunctionParser.getBuilderPrototype(); + namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, sfb); } } diff --git a/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java b/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java index 0cec415d63b..08b7b34e91a 100644 --- a/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java +++ b/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.query; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.EmptyQueryBuilder; @@ -40,11 +41,12 @@ public class IndicesQueriesRegistry extends AbstractComponent { public IndicesQueriesRegistry(Settings settings, Set injectedQueryParsers, NamedWriteableRegistry namedWriteableRegistry) { super(settings); Map> queryParsers = new HashMap<>(); - for (QueryParser queryParser : injectedQueryParsers) { + for (@SuppressWarnings("unchecked") QueryParser queryParser : injectedQueryParsers) { for (String name : queryParser.names()) { queryParsers.put(name, queryParser); } - namedWriteableRegistry.registerPrototype(QueryBuilder.class, queryParser.getBuilderPrototype()); + @SuppressWarnings("unchecked") NamedWriteable qb = queryParser.getBuilderPrototype(); + namedWriteableRegistry.registerPrototype(QueryBuilder.class, qb); } // EmptyQueryBuilder is not registered as query parser but used internally. // We need to register it with the NamedWriteableRegistry in order to serialize it @@ -58,4 +60,4 @@ public class IndicesQueriesRegistry extends AbstractComponent { public Map> queryParsers() { return queryParsers; } -} \ No newline at end of file +} From 2d42e99c7acd79817ac95beb92bbd2a509e5016d Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Tue, 15 Dec 2015 00:05:14 -0500 Subject: [PATCH 057/167] smoke test plugins does not test any plugins Currently the build has a bug and it loads 0 plugins. --- qa/smoke-test-plugins/build.gradle | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/qa/smoke-test-plugins/build.gradle b/qa/smoke-test-plugins/build.gradle index 70611aed371..9d8e3950a83 100644 --- a/qa/smoke-test-plugins/build.gradle +++ b/qa/smoke-test-plugins/build.gradle @@ -22,15 +22,16 @@ import org.elasticsearch.gradle.MavenFilteringHack apply plugin: 'elasticsearch.rest-test' ext.pluginsCount = 0 -project.rootProject.subprojects.findAll { it.path.startsWith(':projects:') }.each { subproj -> +project.rootProject.subprojects.findAll { it.path.startsWith(':plugins:') }.each { subproj -> integTest { cluster { // need to get a non-decorated project object, so must re-lookup the project by path plugin subproj.name, project(subproj.path) } } - pluginCount += 1 + pluginsCount += 1 } +assert pluginsCount > 0 ext.expansions = [ 'expected.plugins.count': pluginsCount From 60d35c81af8662c5771854e7e4fbc4a3dd3d78c1 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 14 Dec 2015 15:33:08 -0800 Subject: [PATCH 058/167] Plugins: Expose http.type setting, and collapse al(most all) modules relating to transport/http This change adds back the http.type setting. It also cleans up all the transport related guice code to be consolidated within the NetworkModule (as transport and http related stuff is what and how ES exposes over the network). The setter methods previously used by some plugins to override eg the TransportService or HttpServerTransport are removed, and those plugins should now register a custom implementation of the class with a name and set that using the appropriate config setting. Note that I think ActionModule should also be moved into here, to sit along side the rest actions, but I left that for a followup. closes #14148 --- .../transport/ClientTransportModule.java | 37 -- .../client/transport/TransportClient.java | 11 +- .../common/network/NetworkModule.java | 347 +++++++++++++++++- .../elasticsearch/http/HttpServerModule.java | 59 --- .../java/org/elasticsearch/node/Node.java | 17 +- .../org/elasticsearch/rest/RestModule.java | 51 --- .../rest/action/RestActionModule.java | 273 -------------- .../transport/TransportModule.java | 122 ------ .../TransportClientHeadersTests.java | 8 +- .../common/inject/ModuleTestCase.java | 18 + .../common/network/NetworkModuleTests.java | 176 +++++++++ .../plugins/PluggableTransportModuleIT.java | 112 ------ .../TestResponseHeaderPlugin.java | 6 +- .../transport/TransportModuleTests.java | 48 +++ .../transport/netty/NettyTransportIT.java | 10 +- .../NettyTransportMultiPortIntegrationIT.java | 8 +- .../netty/NettyTransportPublishAddressIT.java | 7 +- .../messy/tests/IndicesRequestTests.java | 29 +- .../deletebyquery/DeleteByQueryPlugin.java | 10 +- .../test/ESBackcompatTestCase.java | 10 +- .../org/elasticsearch/test/ExternalNode.java | 6 +- .../transport/AssertingLocalTransport.java | 8 +- .../test/transport/MockTransportService.java | 8 +- 23 files changed, 648 insertions(+), 733 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java delete mode 100644 core/src/main/java/org/elasticsearch/http/HttpServerModule.java delete mode 100644 core/src/main/java/org/elasticsearch/rest/RestModule.java delete mode 100644 core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java delete mode 100644 core/src/main/java/org/elasticsearch/transport/TransportModule.java create mode 100644 core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java delete mode 100644 core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleIT.java create mode 100644 core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java diff --git a/core/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java b/core/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java deleted file mode 100644 index 895b3d844f6..00000000000 --- a/core/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport; - -import org.elasticsearch.client.support.Headers; -import org.elasticsearch.client.transport.support.TransportProxyClient; -import org.elasticsearch.common.inject.AbstractModule; - -/** - * - */ -public class ClientTransportModule extends AbstractModule { - - @Override - protected void configure() { - bind(Headers.class).asEagerSingleton(); - bind(TransportProxyClient.class).asEagerSingleton(); - bind(TransportClientNodesService.class).asEagerSingleton(); - } -} diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 33cf3479419..3b8be668f43 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -32,7 +32,6 @@ import org.elasticsearch.client.support.Headers; import org.elasticsearch.client.transport.support.TransportProxyClient; import org.elasticsearch.cluster.ClusterNameModule; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.Module; @@ -43,19 +42,15 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.EnvironmentModule; import org.elasticsearch.indices.breaker.CircuitBreakerModule; import org.elasticsearch.monitor.MonitorService; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsModule; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.search.SearchModule; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolModule; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.netty.NettyTransport; @@ -69,7 +64,7 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; * The transport client allows to create a client that is not part of the cluster, but simply connects to one * or more nodes directly by adding their respective addresses using {@link #addTransportAddress(org.elasticsearch.common.transport.TransportAddress)}. *

- * The transport client important modules used is the {@link org.elasticsearch.transport.TransportModule} which is + * The transport client important modules used is the {@link org.elasticsearch.common.network.NetworkModule} which is * started in client mode (only connects, no bind). */ public class TransportClient extends AbstractClient { @@ -143,10 +138,9 @@ public class TransportClient extends AbstractClient { } modules.add(new PluginsModule(pluginsService)); modules.add(new SettingsModule(this.settings, settingsFilter )); - modules.add(new NetworkModule(networkService)); + modules.add(new NetworkModule(networkService, this.settings, true)); modules.add(new ClusterNameModule(this.settings)); modules.add(new ThreadPoolModule(threadPool)); - modules.add(new TransportModule(this.settings)); modules.add(new SearchModule() { @Override protected void configure() { @@ -154,7 +148,6 @@ public class TransportClient extends AbstractClient { } }); modules.add(new ActionModule(true)); - modules.add(new ClientTransportModule()); modules.add(new CircuitBreakerModule(this.settings)); pluginsService.processModules(modules); diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java index c1f282ac234..f7eab3da2ac 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -19,21 +19,362 @@ package org.elasticsearch.common.network; +import org.elasticsearch.client.support.Headers; +import org.elasticsearch.client.transport.TransportClientNodesService; +import org.elasticsearch.client.transport.support.TransportProxyClient; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.ExtensionPoint; +import org.elasticsearch.http.HttpServer; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.netty.NettyHttpServerTransport; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction; +import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction; +import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction; +import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction; +import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction; +import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction; +import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction; +import org.elasticsearch.rest.action.admin.cluster.shards.RestClusterSearchShardsAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.create.RestCreateSnapshotAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSnapshotAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction; +import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction; +import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction; +import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction; +import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetIndicesAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.head.RestAliasesExistAction; +import org.elasticsearch.rest.action.admin.indices.alias.put.RestIndexPutAliasAction; +import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction; +import org.elasticsearch.rest.action.admin.indices.cache.clear.RestClearIndicesCacheAction; +import org.elasticsearch.rest.action.admin.indices.close.RestCloseIndexAction; +import org.elasticsearch.rest.action.admin.indices.create.RestCreateIndexAction; +import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction; +import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction; +import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction; +import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction; +import org.elasticsearch.rest.action.admin.indices.flush.RestSyncedFlushAction; +import org.elasticsearch.rest.action.admin.indices.forcemerge.RestForceMergeAction; +import org.elasticsearch.rest.action.admin.indices.get.RestGetIndicesAction; +import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction; +import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction; +import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction; +import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction; +import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction; +import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction; +import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction; +import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction; +import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction; +import org.elasticsearch.rest.action.admin.indices.shards.RestIndicesShardStoresAction; +import org.elasticsearch.rest.action.admin.indices.stats.RestIndicesStatsAction; +import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction; +import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction; +import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction; +import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction; +import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction; +import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction; +import org.elasticsearch.rest.action.bulk.RestBulkAction; +import org.elasticsearch.rest.action.cat.AbstractCatAction; +import org.elasticsearch.rest.action.cat.RestAliasAction; +import org.elasticsearch.rest.action.cat.RestAllocationAction; +import org.elasticsearch.rest.action.cat.RestCatAction; +import org.elasticsearch.rest.action.cat.RestFielddataAction; +import org.elasticsearch.rest.action.cat.RestHealthAction; +import org.elasticsearch.rest.action.cat.RestIndicesAction; +import org.elasticsearch.rest.action.cat.RestMasterAction; +import org.elasticsearch.rest.action.cat.RestNodeAttrsAction; +import org.elasticsearch.rest.action.cat.RestNodesAction; +import org.elasticsearch.rest.action.cat.RestPluginsAction; +import org.elasticsearch.rest.action.cat.RestRepositoriesAction; +import org.elasticsearch.rest.action.cat.RestSegmentsAction; +import org.elasticsearch.rest.action.cat.RestShardsAction; +import org.elasticsearch.rest.action.cat.RestSnapshotAction; +import org.elasticsearch.rest.action.cat.RestThreadPoolAction; +import org.elasticsearch.rest.action.delete.RestDeleteAction; +import org.elasticsearch.rest.action.explain.RestExplainAction; +import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction; +import org.elasticsearch.rest.action.get.RestGetAction; +import org.elasticsearch.rest.action.get.RestGetSourceAction; +import org.elasticsearch.rest.action.get.RestHeadAction; +import org.elasticsearch.rest.action.get.RestMultiGetAction; +import org.elasticsearch.rest.action.index.RestIndexAction; +import org.elasticsearch.rest.action.main.RestMainAction; +import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction; +import org.elasticsearch.rest.action.percolate.RestPercolateAction; +import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction; +import org.elasticsearch.rest.action.script.RestGetIndexedScriptAction; +import org.elasticsearch.rest.action.script.RestPutIndexedScriptAction; +import org.elasticsearch.rest.action.search.RestClearScrollAction; +import org.elasticsearch.rest.action.search.RestMultiSearchAction; +import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.rest.action.search.RestSearchScrollAction; +import org.elasticsearch.rest.action.suggest.RestSuggestAction; +import org.elasticsearch.rest.action.template.RestDeleteSearchTemplateAction; +import org.elasticsearch.rest.action.template.RestGetSearchTemplateAction; +import org.elasticsearch.rest.action.template.RestPutSearchTemplateAction; +import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction; +import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction; +import org.elasticsearch.rest.action.update.RestUpdateAction; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.local.LocalTransport; +import org.elasticsearch.transport.netty.NettyTransport; + +import java.util.Arrays; +import java.util.List; /** - * + * A module to handle registering and binding all network related classes. */ public class NetworkModule extends AbstractModule { - private final NetworkService networkService; + public static final String TRANSPORT_TYPE_KEY = "transport.type"; + public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type"; - public NetworkModule(NetworkService networkService) { + public static final String LOCAL_TRANSPORT = "local"; + public static final String NETTY_TRANSPORT = "netty"; + + public static final String HTTP_TYPE_KEY = "http.type"; + public static final String HTTP_ENABLED = "http.enabled"; + + private static final List> builtinRestHandlers = Arrays.asList( + RestMainAction.class, + + RestNodesInfoAction.class, + RestNodesStatsAction.class, + RestNodesHotThreadsAction.class, + RestClusterStatsAction.class, + RestClusterStateAction.class, + RestClusterHealthAction.class, + RestClusterUpdateSettingsAction.class, + RestClusterGetSettingsAction.class, + RestClusterRerouteAction.class, + RestClusterSearchShardsAction.class, + RestPendingClusterTasksAction.class, + RestPutRepositoryAction.class, + RestGetRepositoriesAction.class, + RestDeleteRepositoryAction.class, + RestVerifyRepositoryAction.class, + RestGetSnapshotsAction.class, + RestCreateSnapshotAction.class, + RestRestoreSnapshotAction.class, + RestDeleteSnapshotAction.class, + RestSnapshotsStatusAction.class, + + RestIndicesExistsAction.class, + RestTypesExistsAction.class, + RestGetIndicesAction.class, + RestIndicesStatsAction.class, + RestIndicesSegmentsAction.class, + RestIndicesShardStoresAction.class, + RestGetAliasesAction.class, + RestAliasesExistAction.class, + RestIndexDeleteAliasesAction.class, + RestIndexPutAliasAction.class, + RestIndicesAliasesAction.class, + RestGetIndicesAliasesAction.class, + RestCreateIndexAction.class, + RestDeleteIndexAction.class, + RestCloseIndexAction.class, + RestOpenIndexAction.class, + + RestUpdateSettingsAction.class, + RestGetSettingsAction.class, + + RestAnalyzeAction.class, + RestGetIndexTemplateAction.class, + RestPutIndexTemplateAction.class, + RestDeleteIndexTemplateAction.class, + RestHeadIndexTemplateAction.class, + + RestPutWarmerAction.class, + RestDeleteWarmerAction.class, + RestGetWarmerAction.class, + + RestPutMappingAction.class, + RestGetMappingAction.class, + RestGetFieldMappingAction.class, + + RestRefreshAction.class, + RestFlushAction.class, + RestSyncedFlushAction.class, + RestForceMergeAction.class, + RestUpgradeAction.class, + RestClearIndicesCacheAction.class, + + RestIndexAction.class, + RestGetAction.class, + RestGetSourceAction.class, + RestHeadAction.class, + RestMultiGetAction.class, + RestDeleteAction.class, + org.elasticsearch.rest.action.count.RestCountAction.class, + RestSuggestAction.class, + RestTermVectorsAction.class, + RestMultiTermVectorsAction.class, + RestBulkAction.class, + RestUpdateAction.class, + RestPercolateAction.class, + RestMultiPercolateAction.class, + + RestSearchAction.class, + RestSearchScrollAction.class, + RestClearScrollAction.class, + RestMultiSearchAction.class, + RestRenderSearchTemplateAction.class, + + RestValidateQueryAction.class, + + RestExplainAction.class, + + RestRecoveryAction.class, + + // Templates API + RestGetSearchTemplateAction.class, + RestPutSearchTemplateAction.class, + RestDeleteSearchTemplateAction.class, + + // Scripts API + RestGetIndexedScriptAction.class, + RestPutIndexedScriptAction.class, + RestDeleteIndexedScriptAction.class, + + RestFieldStatsAction.class, + + // no abstract cat action + RestCatAction.class + ); + + private static final List> builtinCatHandlers = Arrays.asList( + RestAllocationAction.class, + RestShardsAction.class, + RestMasterAction.class, + RestNodesAction.class, + RestIndicesAction.class, + RestSegmentsAction.class, + // Fully qualified to prevent interference with rest.action.count.RestCountAction + org.elasticsearch.rest.action.cat.RestCountAction.class, + // Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction + org.elasticsearch.rest.action.cat.RestRecoveryAction.class, + RestHealthAction.class, + org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class, + RestAliasAction.class, + RestThreadPoolAction.class, + RestPluginsAction.class, + RestFielddataAction.class, + RestNodeAttrsAction.class, + RestRepositoriesAction.class, + RestSnapshotAction.class + ); + + private final NetworkService networkService; + private final Settings settings; + private final boolean transportClient; + + private final ExtensionPoint.SelectedType transportServiceTypes = new ExtensionPoint.SelectedType<>("transport_service", TransportService.class); + private final ExtensionPoint.SelectedType transportTypes = new ExtensionPoint.SelectedType<>("transport", Transport.class); + private final ExtensionPoint.SelectedType httpTransportTypes = new ExtensionPoint.SelectedType<>("http_transport", HttpServerTransport.class); + private final ExtensionPoint.ClassSet restHandlers = new ExtensionPoint.ClassSet<>("rest_handler", RestHandler.class); + // we must separate the cat rest handlers so RestCatAction can collect them... + private final ExtensionPoint.ClassSet catHandlers = new ExtensionPoint.ClassSet<>("cat_handler", AbstractCatAction.class); + + /** + * Creates a network module that custom networking classes can be plugged into. + * + * @param networkService A constructed network service object to bind. + * @param settings The settings for the node + * @param transportClient True if only transport classes should be allowed to be registered, false otherwise. + */ + public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient) { this.networkService = networkService; + this.settings = settings; + this.transportClient = transportClient; + registerTransportService(NETTY_TRANSPORT, TransportService.class); + registerTransport(LOCAL_TRANSPORT, LocalTransport.class); + registerTransport(NETTY_TRANSPORT, NettyTransport.class); + + if (transportClient == false) { + registerHttpTransport(NETTY_TRANSPORT, NettyHttpServerTransport.class); + + for (Class catAction : builtinCatHandlers) { + catHandlers.registerExtension(catAction); + } + for (Class restAction : builtinRestHandlers) { + restHandlers.registerExtension(restAction); + } + } + } + + /** Adds a transport service implementation that can be selected by setting {@link #TRANSPORT_SERVICE_TYPE_KEY}. */ + public void registerTransportService(String name, Class clazz) { + transportServiceTypes.registerExtension(name, clazz); + } + + /** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */ + public void registerTransport(String name, Class clazz) { + transportTypes.registerExtension(name, clazz); + } + + /** Adds an http transport implementation that can be selected by setting {@link #HTTP_TYPE_KEY}. */ + // TODO: we need another name than "http transport"....so confusing with transportClient... + public void registerHttpTransport(String name, Class clazz) { + if (transportClient) { + throw new IllegalArgumentException("Cannot register http transport " + clazz.getName() + " for transport client"); + } + httpTransportTypes.registerExtension(name, clazz); + } + + /** Adds an additional rest action. */ + // TODO: change this further to eliminate the middle man, ie RestController, and just register method and path here + public void registerRestHandler(Class clazz) { + if (transportClient) { + throw new IllegalArgumentException("Cannot register rest handler " + clazz.getName() + " for transport client"); + } + if (AbstractCatAction.class.isAssignableFrom(clazz)) { + catHandlers.registerExtension(clazz.asSubclass(AbstractCatAction.class)); + } else { + restHandlers.registerExtension(clazz); + } } @Override protected void configure() { bind(NetworkService.class).toInstance(networkService); + bind(NamedWriteableRegistry.class).asEagerSingleton(); + + transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, NETTY_TRANSPORT); + String defaultTransport = DiscoveryNode.localNode(settings) ? LOCAL_TRANSPORT : NETTY_TRANSPORT; + transportTypes.bindType(binder(), settings, TRANSPORT_TYPE_KEY, defaultTransport); + + if (transportClient) { + bind(Headers.class).asEagerSingleton(); + bind(TransportProxyClient.class).asEagerSingleton(); + bind(TransportClientNodesService.class).asEagerSingleton(); + } else { + if (settings.getAsBoolean(HTTP_ENABLED, true)) { + bind(HttpServer.class).asEagerSingleton(); + httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_KEY, NETTY_TRANSPORT); + } + bind(RestController.class).asEagerSingleton(); + catHandlers.bind(binder()); + restHandlers.bind(binder()); + } } } diff --git a/core/src/main/java/org/elasticsearch/http/HttpServerModule.java b/core/src/main/java/org/elasticsearch/http/HttpServerModule.java deleted file mode 100644 index 49d67369643..00000000000 --- a/core/src/main/java/org/elasticsearch/http/HttpServerModule.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.http; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.http.netty.NettyHttpServerTransport; - -import java.util.Objects; - -/** - * - */ -public class HttpServerModule extends AbstractModule { - - private final Settings settings; - private final ESLogger logger; - - private Class httpServerTransportClass; - - public HttpServerModule(Settings settings) { - this.settings = settings; - this.logger = Loggers.getLogger(getClass(), settings); - this.httpServerTransportClass = NettyHttpServerTransport.class; - } - - @SuppressWarnings({"unchecked"}) - @Override - protected void configure() { - bind(HttpServerTransport.class).to(httpServerTransportClass).asEagerSingleton(); - bind(HttpServer.class).asEagerSingleton(); - } - - public void setHttpServerTransport(Class httpServerTransport, String source) { - Objects.requireNonNull(httpServerTransport, "Configured http server transport may not be null"); - Objects.requireNonNull(source, "Plugin, that changes transport may not be null"); - logger.info("Using [{}] as http transport, overridden by [{}]", httpServerTransportClass.getName(), source); - this.httpServerTransportClass = httpServerTransport; - } -} diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index d3f6367cac0..c05e1969c23 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -23,7 +23,6 @@ import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionModule; -import org.elasticsearch.bootstrap.Elasticsearch; import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.client.Client; import org.elasticsearch.client.node.NodeClientModule; @@ -33,7 +32,6 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.common.StopWatch; -import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Injector; @@ -62,7 +60,6 @@ import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.GatewayModule; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.http.HttpServer; -import org.elasticsearch.http.HttpServerModule; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; @@ -86,7 +83,6 @@ import org.elasticsearch.plugins.PluginsModule; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesModule; import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestModule; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchModule; @@ -95,7 +91,6 @@ import org.elasticsearch.snapshots.SnapshotShardsService; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolModule; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportService; import org.elasticsearch.tribe.TribeModule; import org.elasticsearch.tribe.TribeService; @@ -108,7 +103,6 @@ import java.net.Inet6Address; import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.charset.Charset; -import java.nio.file.CopyOption; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; @@ -185,20 +179,15 @@ public class Node implements Releasable { } modules.add(new PluginsModule(pluginsService)); modules.add(new SettingsModule(this.settings, settingsFilter)); - modules.add(new NodeModule(this, nodeSettingsService, monitorService)); - modules.add(new NetworkModule(networkService)); - modules.add(new ScriptModule(this.settings)); modules.add(new EnvironmentModule(environment)); + modules.add(new NodeModule(this, nodeSettingsService, monitorService)); + modules.add(new NetworkModule(networkService, settings, false)); + modules.add(new ScriptModule(this.settings)); modules.add(new NodeEnvironmentModule(nodeEnvironment)); modules.add(new ClusterNameModule(this.settings)); modules.add(new ThreadPoolModule(threadPool)); modules.add(new DiscoveryModule(this.settings)); modules.add(new ClusterModule(this.settings)); - modules.add(new RestModule(this.settings)); - modules.add(new TransportModule(settings)); - if (settings.getAsBoolean(HTTP_ENABLED, true)) { - modules.add(new HttpServerModule(settings)); - } modules.add(new IndicesModule()); modules.add(new SearchModule()); modules.add(new ActionModule(false)); diff --git a/core/src/main/java/org/elasticsearch/rest/RestModule.java b/core/src/main/java/org/elasticsearch/rest/RestModule.java deleted file mode 100644 index e7949172d0a..00000000000 --- a/core/src/main/java/org/elasticsearch/rest/RestModule.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.action.RestActionModule; - -import java.util.ArrayList; -import java.util.List; - -/** - * - */ -public class RestModule extends AbstractModule { - - private final Settings settings; - private List> restPluginsActions = new ArrayList<>(); - - public void addRestAction(Class restAction) { - restPluginsActions.add(restAction); - } - - public RestModule(Settings settings) { - this.settings = settings; - } - - - @Override - protected void configure() { - bind(RestController.class).asEagerSingleton(); - new RestActionModule(restPluginsActions).configure(binder()); - } -} diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java b/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java deleted file mode 100644 index f0e4d10d7c4..00000000000 --- a/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.inject.multibindings.Multibinder; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction; -import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction; -import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction; -import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction; -import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction; -import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction; -import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction; -import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction; -import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction; -import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction; -import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction; -import org.elasticsearch.rest.action.admin.cluster.shards.RestClusterSearchShardsAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.create.RestCreateSnapshotAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSnapshotAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction; -import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction; -import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction; -import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction; -import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction; -import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction; -import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetAliasesAction; -import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetIndicesAliasesAction; -import org.elasticsearch.rest.action.admin.indices.alias.head.RestAliasesExistAction; -import org.elasticsearch.rest.action.admin.indices.alias.put.RestIndexPutAliasAction; -import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction; -import org.elasticsearch.rest.action.admin.indices.cache.clear.RestClearIndicesCacheAction; -import org.elasticsearch.rest.action.admin.indices.close.RestCloseIndexAction; -import org.elasticsearch.rest.action.admin.indices.create.RestCreateIndexAction; -import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction; -import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction; -import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction; -import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction; -import org.elasticsearch.rest.action.admin.indices.flush.RestSyncedFlushAction; -import org.elasticsearch.rest.action.admin.indices.forcemerge.RestForceMergeAction; -import org.elasticsearch.rest.action.admin.indices.get.RestGetIndicesAction; -import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction; -import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction; -import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction; -import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction; -import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction; -import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction; -import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction; -import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction; -import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction; -import org.elasticsearch.rest.action.admin.indices.shards.RestIndicesShardStoresAction; -import org.elasticsearch.rest.action.admin.indices.stats.RestIndicesStatsAction; -import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteIndexTemplateAction; -import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction; -import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction; -import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction; -import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction; -import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction; -import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction; -import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction; -import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction; -import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction; -import org.elasticsearch.rest.action.bulk.RestBulkAction; -import org.elasticsearch.rest.action.cat.AbstractCatAction; -import org.elasticsearch.rest.action.cat.RestAliasAction; -import org.elasticsearch.rest.action.cat.RestAllocationAction; -import org.elasticsearch.rest.action.cat.RestCatAction; -import org.elasticsearch.rest.action.cat.RestFielddataAction; -import org.elasticsearch.rest.action.cat.RestHealthAction; -import org.elasticsearch.rest.action.cat.RestIndicesAction; -import org.elasticsearch.rest.action.cat.RestMasterAction; -import org.elasticsearch.rest.action.cat.RestNodeAttrsAction; -import org.elasticsearch.rest.action.cat.RestNodesAction; -import org.elasticsearch.rest.action.cat.RestPluginsAction; -import org.elasticsearch.rest.action.cat.RestRepositoriesAction; -import org.elasticsearch.rest.action.cat.RestSegmentsAction; -import org.elasticsearch.rest.action.cat.RestShardsAction; -import org.elasticsearch.rest.action.cat.RestSnapshotAction; -import org.elasticsearch.rest.action.cat.RestThreadPoolAction; -import org.elasticsearch.rest.action.delete.RestDeleteAction; -import org.elasticsearch.rest.action.explain.RestExplainAction; -import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction; -import org.elasticsearch.rest.action.get.RestGetAction; -import org.elasticsearch.rest.action.get.RestGetSourceAction; -import org.elasticsearch.rest.action.get.RestHeadAction; -import org.elasticsearch.rest.action.get.RestMultiGetAction; -import org.elasticsearch.rest.action.index.RestIndexAction; -import org.elasticsearch.rest.action.main.RestMainAction; -import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction; -import org.elasticsearch.rest.action.percolate.RestPercolateAction; -import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction; -import org.elasticsearch.rest.action.script.RestGetIndexedScriptAction; -import org.elasticsearch.rest.action.script.RestPutIndexedScriptAction; -import org.elasticsearch.rest.action.search.RestClearScrollAction; -import org.elasticsearch.rest.action.search.RestMultiSearchAction; -import org.elasticsearch.rest.action.search.RestSearchAction; -import org.elasticsearch.rest.action.search.RestSearchScrollAction; -import org.elasticsearch.rest.action.suggest.RestSuggestAction; -import org.elasticsearch.rest.action.template.RestDeleteSearchTemplateAction; -import org.elasticsearch.rest.action.template.RestGetSearchTemplateAction; -import org.elasticsearch.rest.action.template.RestPutSearchTemplateAction; -import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction; -import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction; -import org.elasticsearch.rest.action.update.RestUpdateAction; - -import java.util.ArrayList; -import java.util.List; - -/** - * - */ -public class RestActionModule extends AbstractModule { - private List> restPluginsActions = new ArrayList<>(); - - public RestActionModule(List> restPluginsActions) { - this.restPluginsActions = restPluginsActions; - } - - @Override - protected void configure() { - for (Class restAction : restPluginsActions) { - bind(restAction).asEagerSingleton(); - } - - bind(RestMainAction.class).asEagerSingleton(); - - bind(RestNodesInfoAction.class).asEagerSingleton(); - bind(RestNodesStatsAction.class).asEagerSingleton(); - bind(RestNodesHotThreadsAction.class).asEagerSingleton(); - bind(RestClusterStatsAction.class).asEagerSingleton(); - bind(RestClusterStateAction.class).asEagerSingleton(); - bind(RestClusterHealthAction.class).asEagerSingleton(); - bind(RestClusterUpdateSettingsAction.class).asEagerSingleton(); - bind(RestClusterGetSettingsAction.class).asEagerSingleton(); - bind(RestClusterRerouteAction.class).asEagerSingleton(); - bind(RestClusterSearchShardsAction.class).asEagerSingleton(); - bind(RestPendingClusterTasksAction.class).asEagerSingleton(); - bind(RestPutRepositoryAction.class).asEagerSingleton(); - bind(RestGetRepositoriesAction.class).asEagerSingleton(); - bind(RestDeleteRepositoryAction.class).asEagerSingleton(); - bind(RestVerifyRepositoryAction.class).asEagerSingleton(); - bind(RestGetSnapshotsAction.class).asEagerSingleton(); - bind(RestCreateSnapshotAction.class).asEagerSingleton(); - bind(RestRestoreSnapshotAction.class).asEagerSingleton(); - bind(RestDeleteSnapshotAction.class).asEagerSingleton(); - bind(RestSnapshotsStatusAction.class).asEagerSingleton(); - - bind(RestIndicesExistsAction.class).asEagerSingleton(); - bind(RestTypesExistsAction.class).asEagerSingleton(); - bind(RestGetIndicesAction.class).asEagerSingleton(); - bind(RestIndicesStatsAction.class).asEagerSingleton(); - bind(RestIndicesSegmentsAction.class).asEagerSingleton(); - bind(RestIndicesShardStoresAction.class).asEagerSingleton(); - bind(RestGetAliasesAction.class).asEagerSingleton(); - bind(RestAliasesExistAction.class).asEagerSingleton(); - bind(RestIndexDeleteAliasesAction.class).asEagerSingleton(); - bind(RestIndexPutAliasAction.class).asEagerSingleton(); - bind(RestIndicesAliasesAction.class).asEagerSingleton(); - bind(RestGetIndicesAliasesAction.class).asEagerSingleton(); - bind(RestCreateIndexAction.class).asEagerSingleton(); - bind(RestDeleteIndexAction.class).asEagerSingleton(); - bind(RestCloseIndexAction.class).asEagerSingleton(); - bind(RestOpenIndexAction.class).asEagerSingleton(); - - bind(RestUpdateSettingsAction.class).asEagerSingleton(); - bind(RestGetSettingsAction.class).asEagerSingleton(); - - bind(RestAnalyzeAction.class).asEagerSingleton(); - bind(RestGetIndexTemplateAction.class).asEagerSingleton(); - bind(RestPutIndexTemplateAction.class).asEagerSingleton(); - bind(RestDeleteIndexTemplateAction.class).asEagerSingleton(); - bind(RestHeadIndexTemplateAction.class).asEagerSingleton(); - - bind(RestPutWarmerAction.class).asEagerSingleton(); - bind(RestDeleteWarmerAction.class).asEagerSingleton(); - bind(RestGetWarmerAction.class).asEagerSingleton(); - - bind(RestPutMappingAction.class).asEagerSingleton(); - bind(RestGetMappingAction.class).asEagerSingleton(); - bind(RestGetFieldMappingAction.class).asEagerSingleton(); - - bind(RestRefreshAction.class).asEagerSingleton(); - bind(RestFlushAction.class).asEagerSingleton(); - bind(RestSyncedFlushAction.class).asEagerSingleton(); - bind(RestForceMergeAction.class).asEagerSingleton(); - bind(RestUpgradeAction.class).asEagerSingleton(); - bind(RestClearIndicesCacheAction.class).asEagerSingleton(); - - bind(RestIndexAction.class).asEagerSingleton(); - bind(RestGetAction.class).asEagerSingleton(); - bind(RestGetSourceAction.class).asEagerSingleton(); - bind(RestHeadAction.class).asEagerSingleton(); - bind(RestMultiGetAction.class).asEagerSingleton(); - bind(RestDeleteAction.class).asEagerSingleton(); - bind(org.elasticsearch.rest.action.count.RestCountAction.class).asEagerSingleton(); - bind(RestSuggestAction.class).asEagerSingleton(); - bind(RestTermVectorsAction.class).asEagerSingleton(); - bind(RestMultiTermVectorsAction.class).asEagerSingleton(); - bind(RestBulkAction.class).asEagerSingleton(); - bind(RestUpdateAction.class).asEagerSingleton(); - bind(RestPercolateAction.class).asEagerSingleton(); - bind(RestMultiPercolateAction.class).asEagerSingleton(); - - bind(RestSearchAction.class).asEagerSingleton(); - bind(RestSearchScrollAction.class).asEagerSingleton(); - bind(RestClearScrollAction.class).asEagerSingleton(); - bind(RestMultiSearchAction.class).asEagerSingleton(); - bind(RestRenderSearchTemplateAction.class).asEagerSingleton(); - - bind(RestValidateQueryAction.class).asEagerSingleton(); - - bind(RestExplainAction.class).asEagerSingleton(); - - bind(RestRecoveryAction.class).asEagerSingleton(); - - // Templates API - bind(RestGetSearchTemplateAction.class).asEagerSingleton(); - bind(RestPutSearchTemplateAction.class).asEagerSingleton(); - bind(RestDeleteSearchTemplateAction.class).asEagerSingleton(); - - // Scripts API - bind(RestGetIndexedScriptAction.class).asEagerSingleton(); - bind(RestPutIndexedScriptAction.class).asEagerSingleton(); - bind(RestDeleteIndexedScriptAction.class).asEagerSingleton(); - - - bind(RestFieldStatsAction.class).asEagerSingleton(); - - // cat API - Multibinder catActionMultibinder = Multibinder.newSetBinder(binder(), AbstractCatAction.class); - catActionMultibinder.addBinding().to(RestAllocationAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestShardsAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestMasterAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestNodesAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestIndicesAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestSegmentsAction.class).asEagerSingleton(); - // Fully qualified to prevent interference with rest.action.count.RestCountAction - catActionMultibinder.addBinding().to(org.elasticsearch.rest.action.cat.RestCountAction.class).asEagerSingleton(); - // Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction - catActionMultibinder.addBinding().to(org.elasticsearch.rest.action.cat.RestRecoveryAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestHealthAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestAliasAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestThreadPoolAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestPluginsAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestFielddataAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestNodeAttrsAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestRepositoriesAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestSnapshotAction.class).asEagerSingleton(); - // no abstract cat action - bind(RestCatAction.class).asEagerSingleton(); - } -} diff --git a/core/src/main/java/org/elasticsearch/transport/TransportModule.java b/core/src/main/java/org/elasticsearch/transport/TransportModule.java deleted file mode 100644 index abf90deee81..00000000000 --- a/core/src/main/java/org/elasticsearch/transport/TransportModule.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport; - -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.transport.local.LocalTransport; -import org.elasticsearch.transport.netty.NettyTransport; - -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; - -/** - * - */ -public class TransportModule extends AbstractModule { - - public static final String TRANSPORT_TYPE_KEY = "transport.type"; - public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type"; - - public static final String LOCAL_TRANSPORT = "local"; - public static final String NETTY_TRANSPORT = "netty"; - - private final ESLogger logger; - private final Settings settings; - - private final Map> transportServices = new HashMap<>(); - private final Map> transports = new HashMap<>(); - private Class configuredTransportService; - private Class configuredTransport; - private String configuredTransportServiceSource; - private String configuredTransportSource; - - public TransportModule(Settings settings) { - this.settings = settings; - this.logger = Loggers.getLogger(getClass(), settings); - addTransport(LOCAL_TRANSPORT, LocalTransport.class); - addTransport(NETTY_TRANSPORT, NettyTransport.class); - } - - public void addTransportService(String name, Class clazz) { - Class oldClazz = transportServices.put(name, clazz); - if (oldClazz != null) { - throw new IllegalArgumentException("Cannot register TransportService [" + name + "] to " + clazz.getName() + ", already registered to " + oldClazz.getName()); - } - } - - public void addTransport(String name, Class clazz) { - Class oldClazz = transports.put(name, clazz); - if (oldClazz != null) { - throw new IllegalArgumentException("Cannot register Transport [" + name + "] to " + clazz.getName() + ", already registered to " + oldClazz.getName()); - } - } - - @Override - protected void configure() { - if (configuredTransportService != null) { - logger.info("Using [{}] as transport service, overridden by [{}]", configuredTransportService.getName(), configuredTransportServiceSource); - bind(TransportService.class).to(configuredTransportService).asEagerSingleton(); - } else { - String typeName = settings.get(TRANSPORT_SERVICE_TYPE_KEY); - if (typeName == null) { - bind(TransportService.class).asEagerSingleton(); - } else { - if (transportServices.containsKey(typeName) == false) { - throw new IllegalArgumentException("Unknown TransportService type [" + typeName + "], known types are: " + transportServices.keySet()); - } - bind(TransportService.class).to(transportServices.get(typeName)).asEagerSingleton(); - } - } - - bind(NamedWriteableRegistry.class).asEagerSingleton(); - if (configuredTransport != null) { - logger.info("Using [{}] as transport, overridden by [{}]", configuredTransport.getName(), configuredTransportSource); - bind(Transport.class).to(configuredTransport).asEagerSingleton(); - } else { - String defaultType = DiscoveryNode.localNode(settings) ? LOCAL_TRANSPORT : NETTY_TRANSPORT; - String typeName = settings.get(TRANSPORT_TYPE_KEY, defaultType); - Class clazz = transports.get(typeName); - if (clazz == null) { - throw new IllegalArgumentException("Unknown Transport [" + typeName + "]"); - } - bind(Transport.class).to(clazz).asEagerSingleton(); - } - } - - public void setTransportService(Class transportService, String source) { - Objects.requireNonNull(transportService, "Configured transport service may not be null"); - Objects.requireNonNull(source, "Plugin, that changes transport service may not be null"); - this.configuredTransportService = transportService; - this.configuredTransportServiceSource = source; - } - - public void setTransport(Class transport, String source) { - Objects.requireNonNull(transport, "Configured transport may not be null"); - Objects.requireNonNull(source, "Plugin, that changes transport may not be null"); - this.configuredTransport = transport; - this.configuredTransportSource = source; - } -} \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java index f4b29768b91..f452bb5c36c 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -40,7 +41,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; @@ -114,12 +114,12 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase { public String description() { return "a mock transport service"; } - public void onModule(TransportModule transportModule) { - transportModule.addTransportService("internal", InternalTransportService.class); + public void onModule(NetworkModule transportModule) { + transportModule.registerTransportService("internal", InternalTransportService.class); } @Override public Settings additionalSettings() { - return Settings.builder().put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, "internal").build(); + return Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "internal").build(); } } diff --git a/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java b/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java index 7901694bd4b..7d696b0cd81 100644 --- a/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java @@ -60,6 +60,24 @@ public abstract class ModuleTestCase extends ESTestCase { fail("Did not find any binding to " + to.getName() + ". Found these bindings:\n" + s); } + /** Configures the module and asserts "clazz" is not bound to anything. */ + public void assertNotBound(Module module, Class clazz) { + List elements = Elements.getElements(module); + for (Element element : elements) { + if (element instanceof LinkedKeyBinding) { + LinkedKeyBinding binding = (LinkedKeyBinding) element; + if (clazz.equals(binding.getKey().getTypeLiteral().getType())) { + fail("Found binding for " + clazz.getName() + " to " + binding.getKey().getTypeLiteral().getType().getTypeName()); + } + } else if (element instanceof UntargettedBinding) { + UntargettedBinding binding = (UntargettedBinding) element; + if (clazz.equals(binding.getKey().getTypeLiteral().getType())) { + fail("Found binding for " + clazz.getName()); + } + } + } + } + /** * Attempts to configure the module, and asserts an {@link IllegalArgumentException} is * caught, containing the given messages diff --git a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java new file mode 100644 index 00000000000..798e82a979e --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java @@ -0,0 +1,176 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.network; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Table; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.ModuleTestCase; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.http.HttpInfo; +import org.elasticsearch.http.HttpServerAdapter; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.HttpStats; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.cat.AbstractCatAction; +import org.elasticsearch.rest.action.cat.RestNodesAction; +import org.elasticsearch.rest.action.main.RestMainAction; +import org.elasticsearch.test.transport.AssertingLocalTransport; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; + +public class NetworkModuleTests extends ModuleTestCase { + + static class FakeTransportService extends TransportService { + public FakeTransportService() { + super(null, null); + } + } + + static class FakeTransport extends AssertingLocalTransport { + public FakeTransport() { + super(null, null, null, null); + } + } + + static class FakeHttpTransport extends AbstractLifecycleComponent implements HttpServerTransport { + public FakeHttpTransport() { + super(null); + } + @Override + protected void doStart() {} + @Override + protected void doStop() {} + @Override + protected void doClose() {} + @Override + public BoundTransportAddress boundAddress() { + return null; + } + @Override + public HttpInfo info() { + return null; + } + @Override + public HttpStats stats() { + return null; + } + @Override + public void httpServerAdapter(HttpServerAdapter httpServerAdapter) {} + } + + static class FakeRestHandler extends BaseRestHandler { + public FakeRestHandler() { + super(null, null, null); + } + @Override + protected void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception {} + } + + static class FakeCatRestHandler extends AbstractCatAction { + public FakeCatRestHandler() { + super(null, null, null); + } + @Override + protected void doRequest(RestRequest request, RestChannel channel, Client client) {} + @Override + protected void documentation(StringBuilder sb) {} + @Override + protected Table getTableWithHeader(RestRequest request) { + return null; + } + } + + public void testRegisterTransportService() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "custom").build(); + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerTransportService("custom", FakeTransportService.class); + assertBinding(module, TransportService.class, FakeTransportService.class); + + // check it works with transport only as well + module = new NetworkModule(new NetworkService(settings), settings, true); + module.registerTransportService("custom", FakeTransportService.class); + assertBinding(module, TransportService.class, FakeTransportService.class); + } + + public void testRegisterTransport() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom").build(); + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerTransport("custom", FakeTransport.class); + assertBinding(module, Transport.class, FakeTransport.class); + + // check it works with transport only as well + module = new NetworkModule(new NetworkService(settings), settings, true); + module.registerTransport("custom", FakeTransport.class); + assertBinding(module, Transport.class, FakeTransport.class); + } + + public void testRegisterHttpTransport() { + Settings settings = Settings.builder().put(NetworkModule.HTTP_TYPE_KEY, "custom").build(); + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerHttpTransport("custom", FakeHttpTransport.class); + assertBinding(module, HttpServerTransport.class, FakeHttpTransport.class); + + // check registration not allowed for transport only + module = new NetworkModule(new NetworkService(settings), settings, true); + try { + module.registerHttpTransport("custom", FakeHttpTransport.class); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Cannot register http transport")); + assertTrue(e.getMessage().contains("for transport client")); + } + + // not added if http is disabled + settings = Settings.builder().put(NetworkModule.HTTP_ENABLED, false).build(); + module = new NetworkModule(new NetworkService(settings), settings, false); + assertNotBound(module, HttpServerTransport.class); + } + + public void testRegisterRestHandler() { + Settings settings = Settings.EMPTY; + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerRestHandler(FakeRestHandler.class); + // also check a builtin is bound + assertSetMultiBinding(module, RestHandler.class, FakeRestHandler.class, RestMainAction.class); + + // check registration not allowed for transport only + module = new NetworkModule(new NetworkService(settings), settings, true); + try { + module.registerRestHandler(FakeRestHandler.class); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Cannot register rest handler")); + assertTrue(e.getMessage().contains("for transport client")); + } + } + + public void testRegisterCatRestHandler() { + Settings settings = Settings.EMPTY; + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerRestHandler(FakeCatRestHandler.class); + // also check a builtin is bound + assertSetMultiBinding(module, AbstractCatAction.class, FakeCatRestHandler.class, RestNodesAction.class); + } +} diff --git a/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleIT.java b/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleIT.java deleted file mode 100644 index 7831b7ca994..00000000000 --- a/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleIT.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.plugins; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.transport.AssertingLocalTransport; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportModule; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; - -import java.io.IOException; -import java.util.Collection; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; - -/** - * - */ -@ClusterScope(scope = Scope.SUITE, numDataNodes = 2) -public class PluggableTransportModuleIT extends ESIntegTestCase { - public static final AtomicInteger SENT_REQUEST_COUNTER = new AtomicInteger(0); - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .put(DiscoveryModule.DISCOVERY_TYPE_KEY, "local") - .build(); - } - - @Override - protected Collection> nodePlugins() { - return pluginList(CountingSentRequestsPlugin.class); - } - - @Override - protected Collection> transportClientPlugins() { - return pluginList(CountingSentRequestsPlugin.class); - } - - public void testThatPluginFunctionalityIsLoadedWithoutConfiguration() throws Exception { - for (Transport transport : internalCluster().getInstances(Transport.class)) { - assertThat(transport, instanceOf(CountingAssertingLocalTransport.class)); - } - - int countBeforeRequest = SENT_REQUEST_COUNTER.get(); - internalCluster().clientNodeClient().admin().cluster().prepareHealth().get(); - int countAfterRequest = SENT_REQUEST_COUNTER.get(); - assertThat("Expected send request counter to be greather than zero", countAfterRequest, is(greaterThan(countBeforeRequest))); - } - - public static class CountingSentRequestsPlugin extends Plugin { - @Override - public String name() { - return "counting-pipelines-plugin"; - } - - @Override - public String description() { - return "counting-pipelines-plugin"; - } - - public void onModule(TransportModule transportModule) { - transportModule.setTransport(CountingAssertingLocalTransport.class, this.name()); - } - } - - public static final class CountingAssertingLocalTransport extends AssertingLocalTransport { - - @Inject - public CountingAssertingLocalTransport(Settings settings, ThreadPool threadPool, Version version, NamedWriteableRegistry namedWriteableRegistry) { - super(settings, threadPool, version, namedWriteableRegistry); - } - - @Override - public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { - SENT_REQUEST_COUNTER.incrementAndGet(); - super.sendRequest(node, requestId, action, request, options); - } - } -} diff --git a/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java b/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java index b9282cf05ad..a16f318140f 100644 --- a/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java +++ b/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java @@ -19,8 +19,8 @@ package org.elasticsearch.plugins.responseheader; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.rest.RestModule; public class TestResponseHeaderPlugin extends Plugin { @@ -34,7 +34,7 @@ public class TestResponseHeaderPlugin extends Plugin { return "test-plugin-custom-header-desc"; } - public void onModule(RestModule restModule) { - restModule.addRestAction(TestResponseHeaderRestAction.class); + public void onModule(NetworkModule module) { + module.registerRestHandler(TestResponseHeaderRestAction.class); } } diff --git a/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java b/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java new file mode 100644 index 00000000000..d587ab05e45 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.inject.ModuleTestCase; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.transport.AssertingLocalTransport; +import org.elasticsearch.threadpool.ThreadPool; + +/** Unit tests for module registering custom transport and transport service */ +public class TransportModuleTests extends ModuleTestCase { + + + + static class FakeTransport extends AssertingLocalTransport { + @Inject + public FakeTransport(Settings settings, ThreadPool threadPool, Version version, NamedWriteableRegistry namedWriteableRegistry) { + super(settings, threadPool, version, namedWriteableRegistry); + } + } + + static class FakeTransportService extends TransportService { + @Inject + public FakeTransportService(Settings settings, Transport transport, ThreadPool threadPool) { + super(settings, transport, threadPool); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java index c84a9eb9a55..78caef498d1 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java @@ -21,13 +21,14 @@ package org.elasticsearch.transport.netty; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; @@ -40,7 +41,6 @@ import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ActionNotFoundTransportException; import org.elasticsearch.transport.RequestHandlerRegistry; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportRequest; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelPipeline; @@ -66,7 +66,7 @@ public class NettyTransportIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return settingsBuilder().put(super.nodeSettings(nodeOrdinal)) .put("node.mode", "network") - .put(TransportModule.TRANSPORT_TYPE_KEY, "exception-throwing").build(); + .put(NetworkModule.TRANSPORT_TYPE_KEY, "exception-throwing").build(); } @Override @@ -99,8 +99,8 @@ public class NettyTransportIT extends ESIntegTestCase { public String description() { return "an exception throwing transport for testing"; } - public void onModule(TransportModule transportModule) { - transportModule.addTransport("exception-throwing", ExceptionThrowingNettyTransport.class); + public void onModule(NetworkModule module) { + module.registerTransport("exception-throwing", ExceptionThrowingNettyTransport.class); } } diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationIT.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationIT.java index 59ef26c42af..ee49012291d 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationIT.java @@ -19,11 +19,12 @@ package org.elasticsearch.transport.netty; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -31,7 +32,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.junit.annotations.Network; -import org.elasticsearch.transport.TransportModule; import java.net.InetAddress; import java.util.Locale; @@ -60,7 +60,7 @@ public class NettyTransportMultiPortIntegrationIT extends ESIntegTestCase { Settings.Builder builder = settingsBuilder() .put(super.nodeSettings(nodeOrdinal)) .put("network.host", "127.0.0.1") - .put(TransportModule.TRANSPORT_TYPE_KEY, "netty") + .put(NetworkModule.TRANSPORT_TYPE_KEY, "netty") .put("node.mode", "network") .put("transport.profiles.client1.port", randomPortRange) .put("transport.profiles.client1.publish_host", "127.0.0.7") @@ -72,7 +72,7 @@ public class NettyTransportMultiPortIntegrationIT extends ESIntegTestCase { public void testThatTransportClientCanConnect() throws Exception { Settings settings = settingsBuilder() .put("cluster.name", internalCluster().getClusterName()) - .put(TransportModule.TRANSPORT_TYPE_KEY, "netty") + .put(NetworkModule.TRANSPORT_TYPE_KEY, "netty") .put("path.home", createTempDir().toString()) .build(); try (TransportClient transportClient = TransportClient.builder().settings(settings).build()) { diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportPublishAddressIT.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportPublishAddressIT.java index 3437701f6c9..ea67ce32717 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportPublishAddressIT.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportPublishAddressIT.java @@ -21,22 +21,19 @@ package org.elasticsearch.transport.netty; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.transport.TransportModule; import java.net.Inet4Address; -import java.net.Inet6Address; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.not; /** * Checks that Elasticsearch produces a sane publish_address when it binds to @@ -48,7 +45,7 @@ public class NettyTransportPublishAddressIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(TransportModule.TRANSPORT_TYPE_KEY, "netty") + .put(NetworkModule.TRANSPORT_TYPE_KEY, "netty") .put("node.mode", "network").build(); } diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java index 66a764dd75a..516514599ae 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java @@ -85,6 +85,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; @@ -95,16 +96,32 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportService; import org.junit.After; import org.junit.Before; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.function.Supplier; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.emptyIterable; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.instanceOf; @ClusterScope(scope = Scope.SUITE, numClientNodes = 1, minNumDataNodes = 2) public class IndicesRequestTests extends ESIntegTestCase { @@ -127,7 +144,7 @@ public class IndicesRequestTests extends ESIntegTestCase { protected Settings nodeSettings(int ordinal) { // must set this independently of the plugin so it overrides MockTransportService return Settings.builder().put(super.nodeSettings(ordinal)) - .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, "intercepting").build(); + .put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "intercepting").build(); } @Override @@ -756,8 +773,8 @@ public class IndicesRequestTests extends ESIntegTestCase { public String description() { return "an intercepting transport service for testing"; } - public void onModule(TransportModule transportModule) { - transportModule.addTransportService("intercepting", InterceptingTransportService.class); + public void onModule(NetworkModule module) { + module.registerTransportService("intercepting", InterceptingTransportService.class); } } diff --git a/plugins/delete-by-query/src/main/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryPlugin.java b/plugins/delete-by-query/src/main/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryPlugin.java index b189745408f..8395223f669 100644 --- a/plugins/delete-by-query/src/main/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryPlugin.java +++ b/plugins/delete-by-query/src/main/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryPlugin.java @@ -22,14 +22,10 @@ package org.elasticsearch.plugin.deletebyquery; import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.deletebyquery.DeleteByQueryAction; import org.elasticsearch.action.deletebyquery.TransportDeleteByQueryAction; -import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.rest.RestModule; import org.elasticsearch.rest.action.deletebyquery.RestDeleteByQueryAction; -import java.util.Collection; -import java.util.Collections; - public class DeleteByQueryPlugin extends Plugin { public static final String NAME = "delete-by-query"; @@ -48,8 +44,8 @@ public class DeleteByQueryPlugin extends Plugin { actionModule.registerAction(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class); } - public void onModule(RestModule restModule) { - restModule.addRestAction(RestDeleteByQueryAction.class); + public void onModule(NetworkModule module) { + module.registerRestHandler(RestDeleteByQueryAction.class); } } diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java index 3e5c903a1ba..ade424599fe 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java @@ -26,14 +26,11 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.listeners.LoggingListener; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportModule; import java.io.IOException; import java.lang.annotation.ElementType; @@ -43,10 +40,7 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import java.nio.file.Files; import java.nio.file.Path; -import java.util.Collection; -import java.util.Collections; import java.util.Map; -import java.util.Random; import static org.hamcrest.Matchers.is; @@ -238,7 +232,7 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase { protected Settings commonNodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder().put(requiredSettings()); - builder.put(TransportModule.TRANSPORT_TYPE_KEY, "netty"); // run same transport / disco as external + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, "netty"); // run same transport / disco as external builder.put("node.mode", "network"); return builder.build(); } diff --git a/test-framework/src/main/java/org/elasticsearch/test/ExternalNode.java b/test-framework/src/main/java/org/elasticsearch/test/ExternalNode.java index 6ab39a5b139..05f194fc26a 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ExternalNode.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ExternalNode.java @@ -28,11 +28,11 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import org.elasticsearch.transport.TransportModule; import java.io.Closeable; import java.io.IOException; @@ -111,9 +111,9 @@ final class ExternalNode implements Closeable { case "path.home": case "node.mode": case "node.local": - case TransportModule.TRANSPORT_TYPE_KEY: + case NetworkModule.TRANSPORT_TYPE_KEY: case DiscoveryModule.DISCOVERY_TYPE_KEY: - case TransportModule.TRANSPORT_SERVICE_TYPE_KEY: + case NetworkModule.TRANSPORT_SERVICE_TYPE_KEY: case InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING: continue; default: diff --git a/test-framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java b/test-framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java index 64cc401cb5f..8b395003576 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java +++ b/test-framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java @@ -23,6 +23,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -30,7 +31,6 @@ import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; @@ -51,12 +51,12 @@ public class AssertingLocalTransport extends LocalTransport { public String description() { return "an asserting transport for testing"; } - public void onModule(TransportModule transportModule) { - transportModule.addTransport("mock", AssertingLocalTransport.class); + public void onModule(NetworkModule module) { + module.registerTransport("mock", AssertingLocalTransport.class); } @Override public Settings additionalSettings() { - return Settings.builder().put(TransportModule.TRANSPORT_TYPE_KEY, "mock").build(); + return Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "mock").build(); } } diff --git a/test-framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test-framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index e1efd6c3745..d636341e42f 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test-framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; @@ -38,7 +39,6 @@ import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; @@ -75,12 +75,12 @@ public class MockTransportService extends TransportService { public String description() { return "a mock transport service for testing"; } - public void onModule(TransportModule transportModule) { - transportModule.addTransportService("mock", MockTransportService.class); + public void onModule(NetworkModule module) { + module.registerTransportService("mock", MockTransportService.class); } @Override public Settings additionalSettings() { - return Settings.builder().put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, "mock").build(); + return Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "mock").build(); } } From ec908ddfd652cc66639653d98021b6cce9894f38 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 10 Dec 2015 12:20:19 +0100 Subject: [PATCH 059/167] Use transport service to handle RetryOnReplicaException and execute replica action on the current node. Transport service will delegate to threadpool internally. --- .../TransportReplicationAction.java | 8 +- .../TransportChannelResponseHandler.java | 76 +++++++++++++++++++ 2 files changed, 82 insertions(+), 2 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 26c439c0a3d..d17cc02c5b0 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -300,11 +300,15 @@ public abstract class TransportReplicationAction handler = TransportChannelResponseHandler.emptyResponseHandler(logger, channel, extraMessage); + transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler); } @Override diff --git a/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java b/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java new file mode 100644 index 00000000000..8c042cd1937 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; + +/** + * Base class for delegating transport response to a transport channel + */ +public abstract class TransportChannelResponseHandler implements TransportResponseHandler { + + /** + * Convenience method for delegating an empty response to the provided changed + */ + public static TransportChannelResponseHandler emptyResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError) { + return new TransportChannelResponseHandler(logger, channel, extraInfoOnError) { + @Override + public TransportResponse.Empty newInstance() { + return TransportResponse.Empty.INSTANCE; + } + }; + } + + private final ESLogger logger; + private final TransportChannel channel; + private final String extraInfoOnError; + + protected TransportChannelResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError) { + this.logger = logger; + this.channel = channel; + this.extraInfoOnError = extraInfoOnError; + } + + @Override + public void handleResponse(T response) { + try { + channel.sendResponse(response); + } catch (IOException e) { + handleException(new TransportException(e)); + } + } + + @Override + public void handleException(TransportException exp) { + try { + channel.sendResponse(exp); + } catch (IOException e) { + logger.debug("failed to send failure {}", e, extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"); + } + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } +} From 50eeafa75c122096377f37bd9553e7ac9e4d45b5 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 8 Dec 2015 15:14:50 +0100 Subject: [PATCH 060/167] Make mappings immutable. Today mappings are mutable because of two APIs: - Mapper.merge, which expects changes to be performed in-place - IncludeInAll, which allows to change whether values should be put in the `_all` field in place. This commit changes both APIs to return a modified copy instead of modifying in place so that mappings can be immutable. For now, only the type-level object is immutable, but in the future we can imagine making them immutable at the index-level so that mapping updates could be completely atomic at the index level. Close #9365 --- .../metadata/MetaDataMappingService.java | 8 +- .../index/mapper/DocumentMapper.java | 16 ++- .../index/mapper/DocumentParser.java | 4 +- .../index/mapper/FieldMapper.java | 112 ++++++++------- .../elasticsearch/index/mapper/Mapper.java | 4 +- .../index/mapper/MapperService.java | 22 +-- .../index/mapper/MapperUtils.java | 46 ------ .../elasticsearch/index/mapper/Mapping.java | 31 ++-- .../index/mapper/MergeResult.java | 81 ----------- .../index/mapper/MetadataFieldMapper.java | 4 + .../index/mapper/ParseContext.java | 2 +- .../index/mapper/ParsedDocument.java | 2 +- .../index/mapper/core/ByteFieldMapper.java | 3 +- .../mapper/core/CompletionFieldMapper.java | 8 +- .../index/mapper/core/DateFieldMapper.java | 3 +- .../index/mapper/core/DoubleFieldMapper.java | 3 +- .../index/mapper/core/FloatFieldMapper.java | 3 +- .../index/mapper/core/IntegerFieldMapper.java | 3 +- .../index/mapper/core/LongFieldMapper.java | 3 +- .../index/mapper/core/NumberFieldMapper.java | 52 ++++--- .../index/mapper/core/ShortFieldMapper.java | 3 +- .../index/mapper/core/StringFieldMapper.java | 48 ++++--- .../mapper/core/TokenCountFieldMapper.java | 15 +- .../mapper/geo/BaseGeoPointFieldMapper.java | 15 +- .../mapper/geo/GeoPointFieldMapperLegacy.java | 16 +-- .../index/mapper/geo/GeoShapeFieldMapper.java | 14 +- .../index/mapper/internal/AllFieldMapper.java | 26 +++- .../index/mapper/internal/IdFieldMapper.java | 3 +- .../mapper/internal/IndexFieldMapper.java | 9 +- .../mapper/internal/ParentFieldMapper.java | 15 +- .../mapper/internal/RoutingFieldMapper.java | 3 +- .../mapper/internal/SourceFieldMapper.java | 26 ++-- .../index/mapper/internal/TTLFieldMapper.java | 15 +- .../mapper/internal/TimestampFieldMapper.java | 49 +++---- .../mapper/internal/TypeFieldMapper.java | 3 +- .../index/mapper/internal/UidFieldMapper.java | 3 +- .../mapper/internal/VersionFieldMapper.java | 3 +- .../index/mapper/ip/IpFieldMapper.java | 3 +- .../index/mapper/object/ObjectMapper.java | 132 ++++++++---------- .../index/mapper/object/RootObjectMapper.java | 42 ++++-- .../shard/TranslogRecoveryPerformer.java | 2 +- .../mapper/copyto/CopyToMapperTests.java | 6 +- .../core/TokenCountFieldMapperTests.java | 7 +- .../mapper/date/SimpleDateMappingTests.java | 3 +- .../mapper/externalvalues/ExternalMapper.java | 3 +- .../ExternalMetadataMapper.java | 5 +- .../mapper/geo/GeoPointFieldMapperTests.java | 1 - .../mapper/geo/GeoShapeFieldMapperTests.java | 4 - .../mapper/merge/TestMergeMapperTests.java | 37 +++-- .../merge/JavaMultiFieldMergeTests.java | 13 +- .../source/DefaultSourceMappingTests.java | 21 +-- .../string/SimpleStringMappingTests.java | 4 +- .../timestamp/TimestampMappingTests.java | 38 +++-- .../index/mapper/ttl/TTLMappingTests.java | 42 +++--- .../mapper/update/UpdateMappingTests.java | 14 +- .../search/child/ChildQuerySearchIT.java | 2 +- .../mapper/attachments/AttachmentMapper.java | 2 +- .../mapper/murmur3/Murmur3FieldMapper.java | 3 +- .../index/mapper/size/SizeFieldMapper.java | 9 +- 59 files changed, 446 insertions(+), 623 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/index/mapper/MergeResult.java diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 957125703b6..bbaeb5a11d7 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -37,7 +37,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.percolator.PercolatorService; @@ -251,11 +250,8 @@ public class MetaDataMappingService extends AbstractComponent { newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null); if (existingMapper != null) { // first, simulate - MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes()); - // if we have conflicts, throw an exception - if (mergeResult.hasConflicts()) { - throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(mergeResult.buildConflicts()) + "}"); - } + // this will just throw exceptions in case of problems + existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes()); } else { // TODO: can we find a better place for this validation? // The reason this validation is here is that the mapper service doesn't learn about diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index c4fec8cf095..24374806717 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -52,6 +52,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -117,7 +118,7 @@ public class DocumentMapper implements ToXContent { private volatile CompressedXContent mappingSource; - private final Mapping mapping; + private volatile Mapping mapping; private final DocumentParser documentParser; @@ -352,16 +353,19 @@ public class DocumentMapper implements ToXContent { mapperService.addMappers(type, objectMappers, fieldMappers); } - public MergeResult merge(Mapping mapping, boolean simulate, boolean updateAllTypes) { + public void merge(Mapping mapping, boolean simulate, boolean updateAllTypes) { try (ReleasableLock lock = mappingWriteLock.acquire()) { mapperService.checkMappersCompatibility(type, mapping, updateAllTypes); - final MergeResult mergeResult = new MergeResult(simulate, updateAllTypes); - this.mapping.merge(mapping, mergeResult); + // do the merge even if simulate == false so that we get exceptions + Mapping merged = this.mapping.merge(mapping, updateAllTypes); if (simulate == false) { - addMappers(mergeResult.getNewObjectMappers(), mergeResult.getNewFieldMappers(), updateAllTypes); + this.mapping = merged; + Collection objectMappers = new ArrayList<>(); + Collection fieldMappers = new ArrayList<>(Arrays.asList(merged.metadataMappers)); + MapperUtils.collect(merged.root, objectMappers, fieldMappers); + addMappers(objectMappers, fieldMappers, updateAllTypes); refreshSource(); } - return mergeResult; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index b0ad972d575..656ee2c600d 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -267,7 +267,7 @@ class DocumentParser implements Closeable { if (update == null) { update = newUpdate; } else { - MapperUtils.merge(update, newUpdate); + update = update.merge(newUpdate, false); } } } @@ -759,7 +759,7 @@ class DocumentParser implements Closeable { private static M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException { final Mapper update = parseObjectOrField(context, mapper); if (update != null) { - MapperUtils.merge(mapper, update); + mapper = (M) mapper.merge(update, false); } return mapper; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index ced3f08b229..9997f8608d2 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -47,7 +47,7 @@ import java.util.List; import java.util.Locale; import java.util.stream.StreamSupport; -public abstract class FieldMapper extends Mapper { +public abstract class FieldMapper extends Mapper implements Cloneable { public abstract static class Builder extends Mapper.Builder { @@ -84,8 +84,13 @@ public abstract class FieldMapper extends Mapper { * if the fieldType has a non-null option we are all good it might have been set through a different * call. */ - final IndexOptions options = getDefaultIndexOption(); - assert options != IndexOptions.NONE : "default IndexOptions is NONE can't enable indexing"; + IndexOptions options = getDefaultIndexOption(); + if (options == IndexOptions.NONE) { + // can happen when an existing type on the same index has disabled indexing + // since we inherit the default field type from the first mapper that is + // created on an index + throw new IllegalArgumentException("mapper [" + name + "] has different [index] values from other types of the same index"); + } fieldType.setIndexOptions(options); } } else { @@ -270,7 +275,7 @@ public abstract class FieldMapper extends Mapper { protected MappedFieldTypeReference fieldTypeRef; protected final MappedFieldType defaultFieldType; - protected final MultiFields multiFields; + protected MultiFields multiFields; protected CopyTo copyTo; protected final boolean indexCreatedBefore2x; @@ -359,26 +364,41 @@ public abstract class FieldMapper extends Mapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected FieldMapper clone() { + try { + return (FieldMapper) super.clone(); + } catch (CloneNotSupportedException e) { + throw new AssertionError(e); + } + } + + @Override + public FieldMapper merge(Mapper mergeWith, boolean updateAllTypes) { + FieldMapper merged = clone(); + merged.doMerge(mergeWith, updateAllTypes); + return merged; + } + + /** + * Merge changes coming from {@code mergeWith} in place. + * @param updateAllTypes TODO + */ + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { if (!this.getClass().equals(mergeWith.getClass())) { String mergedType = mergeWith.getClass().getSimpleName(); if (mergeWith instanceof FieldMapper) { mergedType = ((FieldMapper) mergeWith).contentType(); } - mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]"); - // different types, return - return; + throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]"); } FieldMapper fieldMergeWith = (FieldMapper) mergeWith; - multiFields.merge(mergeWith, mergeResult); + multiFields = multiFields.merge(fieldMergeWith.multiFields); - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - // apply changeable values - MappedFieldType fieldType = fieldMergeWith.fieldType().clone(); - fieldType.freeze(); - fieldTypeRef.set(fieldType); - this.copyTo = fieldMergeWith.copyTo; - } + // apply changeable values + MappedFieldType fieldType = fieldMergeWith.fieldType().clone(); + fieldType.freeze(); + fieldTypeRef.set(fieldType); + this.copyTo = fieldMergeWith.copyTo; } @Override @@ -565,18 +585,20 @@ public abstract class FieldMapper extends Mapper { } private final ContentPath.Type pathType; - private volatile ImmutableOpenMap mappers; + private final ImmutableOpenMap mappers; - public MultiFields(ContentPath.Type pathType, ImmutableOpenMap mappers) { + private MultiFields(ContentPath.Type pathType, ImmutableOpenMap mappers) { this.pathType = pathType; - this.mappers = mappers; + ImmutableOpenMap.Builder builder = new ImmutableOpenMap.Builder<>(); // we disable the all in multi-field mappers - for (ObjectCursor cursor : mappers.values()) { + for (ObjectObjectCursor cursor : mappers) { FieldMapper mapper = cursor.value; if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll(); + mapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll(); } + builder.put(cursor.key, mapper); } + this.mappers = builder.build(); } public void parse(FieldMapper mainField, ParseContext context) throws IOException { @@ -598,47 +620,29 @@ public abstract class FieldMapper extends Mapper { context.path().pathType(origPathType); } - // No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge - public void merge(Mapper mergeWith, MergeResult mergeResult) { - FieldMapper mergeWithMultiField = (FieldMapper) mergeWith; + public MultiFields merge(MultiFields mergeWith) { + if (pathType != mergeWith.pathType) { + throw new IllegalArgumentException("Can't change path type from [" + pathType + "] to [" + mergeWith.pathType + "]"); + } + ImmutableOpenMap.Builder newMappersBuilder = ImmutableOpenMap.builder(mappers); - List newFieldMappers = null; - ImmutableOpenMap.Builder newMappersBuilder = null; - - for (ObjectCursor cursor : mergeWithMultiField.multiFields.mappers.values()) { + for (ObjectCursor cursor : mergeWith.mappers.values()) { FieldMapper mergeWithMapper = cursor.value; - Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName()); + FieldMapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName()); if (mergeIntoMapper == null) { - // no mapping, simply add it if not simulating - if (!mergeResult.simulate()) { - // we disable the all in multi-field mappers - if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll(); - } - if (newMappersBuilder == null) { - newMappersBuilder = ImmutableOpenMap.builder(mappers); - } - newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper); - if (mergeWithMapper instanceof FieldMapper) { - if (newFieldMappers == null) { - newFieldMappers = new ArrayList<>(2); - } - newFieldMappers.add(mergeWithMapper); - } + // we disable the all in multi-field mappers + if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) { + mergeWithMapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll(); } + newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper); } else { - mergeIntoMapper.merge(mergeWithMapper, mergeResult); + FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper, false); + newMappersBuilder.put(merged.simpleName(), merged); // override previous definition } } - // first add all field mappers - if (newFieldMappers != null) { - mergeResult.addFieldMappers(newFieldMappers); - } - // now publish mappers - if (newMappersBuilder != null) { - mappers = newMappersBuilder.build(); - } + ImmutableOpenMap mappers = newMappersBuilder.build(); + return new MultiFields(pathType, mappers); } public Iterator iterator() { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 33a4dabd3be..4c3aa3c56bb 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -174,5 +174,7 @@ public abstract class Mapper implements ToXContent, Iterable { /** Returns the canonical name which uniquely identifies the mapper against other mappers in a type. */ public abstract String name(); - public abstract void merge(Mapper mergeWith, MergeResult mergeResult); + /** Return the merge of {@code mergeWith} into this. + * Both {@code this} and {@code mergeWith} will be left unmodified. */ + public abstract Mapper merge(Mapper mergeWith, boolean updateAllTypes); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 938f610d6db..1d2961c482a 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -251,14 +251,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { DocumentMapper oldMapper = mappers.get(mapper.type()); if (oldMapper != null) { - // simulate first - MergeResult result = oldMapper.merge(mapper.mapping(), true, updateAllTypes); - if (result.hasConflicts()) { - throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(result.buildConflicts()) + "}"); - } - // then apply for real - result = oldMapper.merge(mapper.mapping(), false, updateAllTypes); - assert result.hasConflicts() == false; // we already simulated + oldMapper.merge(mapper.mapping(), false, updateAllTypes); return oldMapper; } else { Tuple, Collection> newMappers = checkMappersCompatibility( @@ -305,12 +298,9 @@ public class MapperService extends AbstractIndexComponent implements Closeable { for (ObjectMapper newObjectMapper : objectMappers) { ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath()); if (existingObjectMapper != null) { - MergeResult result = new MergeResult(true, updateAllTypes); - existingObjectMapper.merge(newObjectMapper, result); - if (result.hasConflicts()) { - throw new IllegalArgumentException("Mapper for [" + newObjectMapper.fullPath() + "] conflicts with existing mapping in other types" + - Arrays.toString(result.buildConflicts())); - } + // simulate a merge and ignore the result, we are just interested + // in exceptions here + existingObjectMapper.merge(newObjectMapper, updateAllTypes); } } fieldTypes.checkCompatibility(type, fieldMappers, updateAllTypes); @@ -320,9 +310,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { String type, Mapping mapping, boolean updateAllTypes) { List objectMappers = new ArrayList<>(); List fieldMappers = new ArrayList<>(); - for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) { - fieldMappers.add(metadataMapper); - } + Collections.addAll(fieldMappers, mapping.metadataMappers); MapperUtils.collect(mapping.root, objectMappers, fieldMappers); checkMappersCompatibility(type, objectMappers, fieldMappers, updateAllTypes); return new Tuple<>(objectMappers, fieldMappers); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java index d46c32a932b..04508827f77 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java @@ -27,52 +27,6 @@ import java.util.Collection; public enum MapperUtils { ; - private static MergeResult newStrictMergeResult() { - return new MergeResult(false, false) { - - @Override - public void addFieldMappers(Collection fieldMappers) { - // no-op - } - - @Override - public void addObjectMappers(Collection objectMappers) { - // no-op - } - - @Override - public Collection getNewFieldMappers() { - throw new UnsupportedOperationException("Strict merge result does not support new field mappers"); - } - - @Override - public Collection getNewObjectMappers() { - throw new UnsupportedOperationException("Strict merge result does not support new object mappers"); - } - - @Override - public void addConflict(String mergeFailure) { - throw new MapperParsingException("Merging dynamic updates triggered a conflict: " + mergeFailure); - } - }; - } - - /** - * Merge {@code mergeWith} into {@code mergeTo}. Note: this method only - * merges mappings, not lookup structures. Conflicts are returned as exceptions. - */ - public static void merge(Mapper mergeInto, Mapper mergeWith) { - mergeInto.merge(mergeWith, newStrictMergeResult()); - } - - /** - * Merge {@code mergeWith} into {@code mergeTo}. Note: this method only - * merges mappings, not lookup structures. Conflicts are returned as exceptions. - */ - public static void merge(Mapping mergeInto, Mapping mergeWith) { - mergeInto.merge(mergeWith, newStrictMergeResult()); - } - /** Split mapper and its descendants into object and field mappers. */ public static void collect(Mapper mapper, Collection objectMappers, Collection fieldMappers) { if (mapper instanceof RootObjectMapper) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java index bac42162552..a16024211bf 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -47,19 +47,19 @@ public final class Mapping implements ToXContent { final RootObjectMapper root; final MetadataFieldMapper[] metadataMappers; final Map, MetadataFieldMapper> metadataMappersMap; - volatile Map meta; + final Map meta; public Mapping(Version indexCreated, RootObjectMapper rootObjectMapper, MetadataFieldMapper[] metadataMappers, Map meta) { this.indexCreated = indexCreated; - this.root = rootObjectMapper; this.metadataMappers = metadataMappers; Map, MetadataFieldMapper> metadataMappersMap = new HashMap<>(); for (MetadataFieldMapper metadataMapper : metadataMappers) { if (indexCreated.before(Version.V_2_0_0_beta1) && LEGACY_INCLUDE_IN_OBJECT.contains(metadataMapper.name())) { - root.putMapper(metadataMapper); + rootObjectMapper = rootObjectMapper.copyAndPutMapper(metadataMapper); } metadataMappersMap.put(metadataMapper.getClass(), metadataMapper); } + this.root = rootObjectMapper; // keep root mappers sorted for consistent serialization Arrays.sort(metadataMappers, new Comparator() { @Override @@ -90,21 +90,20 @@ public final class Mapping implements ToXContent { } /** @see DocumentMapper#merge(Mapping, boolean, boolean) */ - public void merge(Mapping mergeWith, MergeResult mergeResult) { - assert metadataMappers.length == mergeWith.metadataMappers.length; - - root.merge(mergeWith.root, mergeResult); - for (MetadataFieldMapper metadataMapper : metadataMappers) { - MetadataFieldMapper mergeWithMetadataMapper = mergeWith.metadataMapper(metadataMapper.getClass()); - if (mergeWithMetadataMapper != null) { - metadataMapper.merge(mergeWithMetadataMapper, mergeResult); + public Mapping merge(Mapping mergeWith, boolean updateAllTypes) { + RootObjectMapper mergedRoot = root.merge(mergeWith.root, updateAllTypes); + Map, MetadataFieldMapper> mergedMetaDataMappers = new HashMap<>(metadataMappersMap); + for (MetadataFieldMapper metaMergeWith : mergeWith.metadataMappers) { + MetadataFieldMapper mergeInto = mergedMetaDataMappers.get(metaMergeWith.getClass()); + MetadataFieldMapper merged; + if (mergeInto == null) { + merged = metaMergeWith; + } else { + merged = mergeInto.merge(metaMergeWith, updateAllTypes); } + mergedMetaDataMappers.put(merged.getClass(), merged); } - - if (mergeResult.simulate() == false) { - // let the merge with attributes to override the attributes - meta = mergeWith.meta; - } + return new Mapping(indexCreated, mergedRoot, mergedMetaDataMappers.values().toArray(new MetadataFieldMapper[0]), mergeWith.meta); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MergeResult.java b/core/src/main/java/org/elasticsearch/index/mapper/MergeResult.java deleted file mode 100644 index f5698a0ed18..00000000000 --- a/core/src/main/java/org/elasticsearch/index/mapper/MergeResult.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.index.mapper.object.ObjectMapper; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -/** A container for tracking results of a mapping merge. */ -public class MergeResult { - - private final boolean simulate; - private final boolean updateAllTypes; - - private final List conflicts = new ArrayList<>(); - private final List newFieldMappers = new ArrayList<>(); - private final List newObjectMappers = new ArrayList<>(); - - public MergeResult(boolean simulate, boolean updateAllTypes) { - this.simulate = simulate; - this.updateAllTypes = updateAllTypes; - } - - public void addFieldMappers(Collection fieldMappers) { - assert simulate() == false; - newFieldMappers.addAll(fieldMappers); - } - - public void addObjectMappers(Collection objectMappers) { - assert simulate() == false; - newObjectMappers.addAll(objectMappers); - } - - public Collection getNewFieldMappers() { - return newFieldMappers; - } - - public Collection getNewObjectMappers() { - return newObjectMappers; - } - - public boolean simulate() { - return simulate; - } - - public boolean updateAllTypes() { - return updateAllTypes; - } - - public void addConflict(String mergeFailure) { - conflicts.add(mergeFailure); - } - - public boolean hasConflicts() { - return conflicts.isEmpty() == false; - } - - public String[] buildConflicts() { - return conflicts.toArray(Strings.EMPTY_ARRAY); - } -} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java index fc6d1fa9e1a..2f3b40126ed 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java @@ -70,4 +70,8 @@ public abstract class MetadataFieldMapper extends FieldMapper { */ public abstract void postParse(ParseContext context) throws IOException; + @Override + public MetadataFieldMapper merge(Mapper mergeWith, boolean updateAllTypes) { + return (MetadataFieldMapper) super.merge(mergeWith, updateAllTypes); + } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java index edf75621c1e..0a88e29c8d6 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java @@ -595,7 +595,7 @@ public abstract class ParseContext { if (dynamicMappingsUpdate == null) { dynamicMappingsUpdate = mapper; } else { - MapperUtils.merge(dynamicMappingsUpdate, mapper); + dynamicMappingsUpdate = dynamicMappingsUpdate.merge(mapper, false); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java index ed8314c6f7d..aa35e699b2d 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -128,7 +128,7 @@ public class ParsedDocument { if (dynamicMappingsUpdate == null) { dynamicMappingsUpdate = update; } else { - MapperUtils.merge(dynamicMappingsUpdate, update); + dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update, false); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java index 61b22a1ee26..44b4cbcd35e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java @@ -77,8 +77,7 @@ public class ByteFieldMapper extends NumberFieldMapper { setupFieldType(context); ByteFieldMapper fieldMapper = new ByteFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (ByteFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java index 5b4df635a34..9d465b4cffc 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java @@ -605,11 +605,9 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith; - if (!mergeResult.simulate()) { - this.maxInputLength = fieldMergeWith.maxInputLength; - } + this.maxInputLength = fieldMergeWith.maxInputLength; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index 27b96b27a44..7a99e6b50c0 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -123,8 +123,7 @@ public class DateFieldMapper extends NumberFieldMapper { fieldType.setNullValue(nullValue); DateFieldMapper fieldMapper = new DateFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (DateFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index 0e512bf4281..861d33e560e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -80,8 +80,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { setupFieldType(context); DoubleFieldMapper fieldMapper = new DoubleFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (DoubleFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index 9a607ffd415..ad88c745dfd 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -81,8 +81,7 @@ public class FloatFieldMapper extends NumberFieldMapper { setupFieldType(context); FloatFieldMapper fieldMapper = new FloatFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (FloatFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java index 868cfeb4380..18995498113 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java @@ -85,8 +85,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { IntegerFieldMapper fieldMapper = new IntegerFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (IntegerFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java index 4130c902586..9d9557c41f4 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java @@ -84,8 +84,7 @@ public class LongFieldMapper extends NumberFieldMapper { setupFieldType(context); LongFieldMapper fieldMapper = new LongFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (LongFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java index 87a63de99ec..04dd1a21335 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java @@ -183,22 +183,41 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM } @Override - public void includeInAll(Boolean includeInAll) { + protected NumberFieldMapper clone() { + return (NumberFieldMapper) super.clone(); + } + + @Override + public Mapper includeInAll(Boolean includeInAll) { if (includeInAll != null) { - this.includeInAll = includeInAll; + NumberFieldMapper clone = clone(); + clone.includeInAll = includeInAll; + return clone; + } else { + return this; } } @Override - public void includeInAllIfNotSet(Boolean includeInAll) { + public Mapper includeInAllIfNotSet(Boolean includeInAll) { if (includeInAll != null && this.includeInAll == null) { - this.includeInAll = includeInAll; + NumberFieldMapper clone = clone(); + clone.includeInAll = includeInAll; + return clone; + } else { + return this; } } @Override - public void unsetIncludeInAll() { - includeInAll = null; + public Mapper unsetIncludeInAll() { + if (includeInAll != null) { + NumberFieldMapper clone = clone(); + clone.includeInAll = null; + return clone; + } else { + return this; + } } @Override @@ -254,21 +273,16 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); NumberFieldMapper nfmMergeWith = (NumberFieldMapper) mergeWith; - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - this.includeInAll = nfmMergeWith.includeInAll; - if (nfmMergeWith.ignoreMalformed.explicit()) { - this.ignoreMalformed = nfmMergeWith.ignoreMalformed; - } - if (nfmMergeWith.coerce.explicit()) { - this.coerce = nfmMergeWith.coerce; - } + this.includeInAll = nfmMergeWith.includeInAll; + if (nfmMergeWith.ignoreMalformed.explicit()) { + this.ignoreMalformed = nfmMergeWith.ignoreMalformed; + } + if (nfmMergeWith.coerce.explicit()) { + this.coerce = nfmMergeWith.coerce; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java index 81ed6cc3bac..e455959c530 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java @@ -81,8 +81,7 @@ public class ShortFieldMapper extends NumberFieldMapper { ShortFieldMapper fieldMapper = new ShortFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (ShortFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java index 0a921ad85eb..061d3a2e343 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java @@ -35,7 +35,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.internal.AllFieldMapper; @@ -150,8 +149,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc StringFieldMapper fieldMapper = new StringFieldMapper( name, fieldType, defaultFieldType, positionIncrementGap, ignoreAbove, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return fieldMapper.includeInAll(includeInAll); } } @@ -257,22 +255,41 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc } @Override - public void includeInAll(Boolean includeInAll) { + protected StringFieldMapper clone() { + return (StringFieldMapper) super.clone(); + } + + @Override + public StringFieldMapper includeInAll(Boolean includeInAll) { if (includeInAll != null) { - this.includeInAll = includeInAll; + StringFieldMapper clone = clone(); + clone.includeInAll = includeInAll; + return clone; + } else { + return this; } } @Override - public void includeInAllIfNotSet(Boolean includeInAll) { + public StringFieldMapper includeInAllIfNotSet(Boolean includeInAll) { if (includeInAll != null && this.includeInAll == null) { - this.includeInAll = includeInAll; + StringFieldMapper clone = clone(); + clone.includeInAll = includeInAll; + return clone; + } else { + return this; } } @Override - public void unsetIncludeInAll() { - includeInAll = null; + public StringFieldMapper unsetIncludeInAll() { + if (includeInAll != null) { + StringFieldMapper clone = clone(); + clone.includeInAll = null; + return clone; + } else { + return this; + } } @Override @@ -359,15 +376,10 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } - if (!mergeResult.simulate()) { - this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll; - this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); + this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll; + this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java index 8348892e44a..a485c3727fc 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java @@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.StringFieldMapper.ValueAndBoost; @@ -81,8 +80,7 @@ public class TokenCountFieldMapper extends IntegerFieldMapper { TokenCountFieldMapper fieldMapper = new TokenCountFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), analyzer, multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (TokenCountFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override @@ -190,14 +188,9 @@ public class TokenCountFieldMapper extends IntegerFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } - if (!mergeResult.simulate()) { - this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); + this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java index 0b57d866ddd..2b1d091be3b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper; @@ -388,17 +387,11 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } - + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); BaseGeoPointFieldMapper gpfmMergeWith = (BaseGeoPointFieldMapper) mergeWith; - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - if (gpfmMergeWith.ignoreMalformed.explicit()) { - this.ignoreMalformed = gpfmMergeWith.ignoreMalformed; - } + if (gpfmMergeWith.ignoreMalformed.explicit()) { + this.ignoreMalformed = gpfmMergeWith.ignoreMalformed; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java index 84e6bde07ac..7e5a8738384 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java @@ -39,7 +39,6 @@ import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper.CustomNumericDocValuesField; @@ -297,23 +296,18 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); GeoPointFieldMapperLegacy gpfmMergeWith = (GeoPointFieldMapperLegacy) mergeWith; if (gpfmMergeWith.coerce.explicit()) { if (coerce.explicit() && coerce.value() != gpfmMergeWith.coerce.value()) { - mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] has different [coerce]"); + throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] has different [coerce]"); } } - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - if (gpfmMergeWith.coerce.explicit()) { - this.coerce = gpfmMergeWith.coerce; - } + if (gpfmMergeWith.coerce.explicit()) { + this.coerce = gpfmMergeWith.coerce; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index 71b6d89610f..7c100a306c2 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import java.io.IOException; @@ -475,17 +474,12 @@ public class GeoShapeFieldMapper extends FieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)mergeWith; - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - if (gsfm.coerce.explicit()) { - this.coerce = gsfm.coerce; - } + if (gsfm.coerce.explicit()) { + this.coerce = gsfm.coerce; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java index 645c36a4855..4676c63e793 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java @@ -36,7 +36,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryShardContext; @@ -58,11 +57,24 @@ public class AllFieldMapper extends MetadataFieldMapper { public interface IncludeInAll { - void includeInAll(Boolean includeInAll); + /** + * If {@code includeInAll} is not null then return a copy of this mapper + * that will include values in the _all field according to {@code includeInAll}. + */ + Mapper includeInAll(Boolean includeInAll); - void includeInAllIfNotSet(Boolean includeInAll); + /** + * If {@code includeInAll} is not null and not set on this mapper yet, then + * return a copy of this mapper that will include values in the _all field + * according to {@code includeInAll}. + */ + Mapper includeInAllIfNotSet(Boolean includeInAll); - void unsetIncludeInAll(); + /** + * If {@code includeInAll} was already set on this mapper then return a copy + * of this mapper that has {@code includeInAll} not set. + */ + Mapper unsetIncludeInAll(); } public static final String NAME = "_all"; @@ -309,11 +321,11 @@ public class AllFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { if (((AllFieldMapper)mergeWith).enabled() != this.enabled() && ((AllFieldMapper)mergeWith).enabledState != Defaults.ENABLED) { - mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled()); + throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled()); } - super.merge(mergeWith, mergeResult); + super.doMerge(mergeWith, updateAllTypes); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java index 16b6c4c56da..a0b7cddae76 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java @@ -44,7 +44,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.Uid; @@ -331,7 +330,7 @@ public class IdFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // do nothing here, no merging, but also no exception } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java index 962332b5c4b..167807f3b2a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryShardContext; @@ -279,12 +278,10 @@ public class IndexFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { IndexFieldMapper indexFieldMapperMergeWith = (IndexFieldMapper) mergeWith; - if (!mergeResult.simulate()) { - if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) { - this.enabledState = indexFieldMapperMergeWith.enabledState; - } + if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) { + this.enabledState = indexFieldMapperMergeWith.enabledState; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java index 760259a1802..6142bf475ec 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.Uid; @@ -371,11 +370,11 @@ public class ParentFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith; if (Objects.equals(parentType, fieldMergeWith.parentType) == false) { - mergeResult.addConflict("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]"); + throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]"); } List conflicts = new ArrayList<>(); @@ -383,13 +382,13 @@ public class ParentFieldMapper extends MetadataFieldMapper { parentJoinFieldType.checkCompatibility(fieldMergeWith.parentJoinFieldType, conflicts, true); // same here if (childJoinFieldType != null) { // TODO: this can be set to false when the old parent/child impl is removed, we can do eager global ordinals loading per type. - childJoinFieldType.checkCompatibility(fieldMergeWith.childJoinFieldType, conflicts, mergeResult.updateAllTypes() == false); + childJoinFieldType.checkCompatibility(fieldMergeWith.childJoinFieldType, conflicts, updateAllTypes == false); } - for (String conflict : conflicts) { - mergeResult.addConflict(conflict); + if (conflicts.isEmpty() == false) { + throw new IllegalArgumentException("Merge conflicts: " + conflicts); } - if (active() && mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { + if (active()) { childJoinFieldType = fieldMergeWith.childJoinFieldType.clone(); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java index 18d0645d2d5..e791ad376c3 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java @@ -31,7 +31,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; @@ -249,7 +248,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // do nothing here, no merging, but also no exception } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java index f9bcb31b406..4d47c3bf446 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java @@ -41,11 +41,11 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -310,18 +310,20 @@ public class SourceFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith; - if (mergeResult.simulate()) { - if (this.enabled != sourceMergeWith.enabled) { - mergeResult.addConflict("Cannot update enabled setting for [_source]"); - } - if (Arrays.equals(includes(), sourceMergeWith.includes()) == false) { - mergeResult.addConflict("Cannot update includes setting for [_source]"); - } - if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) { - mergeResult.addConflict("Cannot update excludes setting for [_source]"); - } + List conflicts = new ArrayList<>(); + if (this.enabled != sourceMergeWith.enabled) { + conflicts.add("Cannot update enabled setting for [_source]"); + } + if (Arrays.equals(includes(), sourceMergeWith.includes()) == false) { + conflicts.add("Cannot update includes setting for [_source]"); + } + if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) { + conflicts.add("Cannot update excludes setting for [_source]"); + } + if (conflicts.isEmpty() == false) { + throw new IllegalArgumentException("Can't merge because of conflicts: " + conflicts); } } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java index 9a18befe622..7a17e56e7dd 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java @@ -32,7 +32,6 @@ import org.elasticsearch.index.analysis.NumericLongAnalyzer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.SourceToParse; @@ -258,21 +257,19 @@ public class TTLFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { TTLFieldMapper ttlMergeWith = (TTLFieldMapper) mergeWith; - if (((TTLFieldMapper) mergeWith).enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with - if (this.enabledState == EnabledAttributeMapper.ENABLED && ((TTLFieldMapper) mergeWith).enabledState == EnabledAttributeMapper.DISABLED) { - mergeResult.addConflict("_ttl cannot be disabled once it was enabled."); + if (ttlMergeWith.enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with + if (this.enabledState == EnabledAttributeMapper.ENABLED && ttlMergeWith.enabledState == EnabledAttributeMapper.DISABLED) { + throw new IllegalArgumentException("_ttl cannot be disabled once it was enabled."); } else { - if (!mergeResult.simulate()) { - this.enabledState = ttlMergeWith.enabledState; - } + this.enabledState = ttlMergeWith.enabledState; } } if (ttlMergeWith.defaultTTL != -1) { // we never build the default when the field is disabled so we should also not set it // (it does not make a difference though as everything that is not build in toXContent will also not be set in the cluster) - if (!mergeResult.simulate() && (enabledState == EnabledAttributeMapper.ENABLED)) { + if (enabledState == EnabledAttributeMapper.ENABLED) { this.defaultTTL = ttlMergeWith.defaultTTL; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java index 468243d63cf..0657d67857b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java @@ -33,13 +33,13 @@ import org.elasticsearch.index.analysis.NumericDateAnalyzer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.elasticsearch.index.mapper.core.LongFieldMapper; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -379,31 +379,32 @@ public class TimestampFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { TimestampFieldMapper timestampFieldMapperMergeWith = (TimestampFieldMapper) mergeWith; - super.merge(mergeWith, mergeResult); - if (!mergeResult.simulate()) { - if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) { - this.enabledState = timestampFieldMapperMergeWith.enabledState; - } - } else { - if (timestampFieldMapperMergeWith.defaultTimestamp() == null && defaultTimestamp == null) { - return; - } - if (defaultTimestamp == null) { - mergeResult.addConflict("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); - } else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) { - mergeResult.addConflict("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null"); - } else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) { - mergeResult.addConflict("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); - } - if (this.path != null) { - if (path.equals(timestampFieldMapperMergeWith.path()) == false) { - mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path())); - } - } else if (timestampFieldMapperMergeWith.path() != null) { - mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing"); + super.doMerge(mergeWith, updateAllTypes); + if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) { + this.enabledState = timestampFieldMapperMergeWith.enabledState; + } + if (timestampFieldMapperMergeWith.defaultTimestamp() == null && defaultTimestamp == null) { + return; + } + List conflicts = new ArrayList<>(); + if (defaultTimestamp == null) { + conflicts.add("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); + } else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) { + conflicts.add("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null"); + } else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) { + conflicts.add("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); + } + if (this.path != null) { + if (path.equals(timestampFieldMapperMergeWith.path()) == false) { + conflicts.add("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path())); } + } else if (timestampFieldMapperMergeWith.path() != null) { + conflicts.add("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing"); + } + if (conflicts.isEmpty() == false) { + throw new IllegalArgumentException("Conflicts: " + conflicts); } } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java index d4acc3c5975..a140593943f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java @@ -40,7 +40,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.Uid; @@ -225,7 +224,7 @@ public class TypeFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // do nothing here, no merging, but also no exception } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java index ef4c48e62e3..1cf3b9d9ac3 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java @@ -33,7 +33,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -225,7 +224,7 @@ public class UidFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // do nothing here, no merging, but also no exception } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java index 292a622ab73..d9659f40c22 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java @@ -30,7 +30,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -166,7 +165,7 @@ public class VersionFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // nothing to do } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index e57ceaf8ca8..d8a7c752e6f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -122,8 +122,7 @@ public class IpFieldMapper extends NumberFieldMapper { setupFieldType(context); IpFieldMapper fieldMapper = new IpFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (IpFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java index 88f89719050..cbbc8563576 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java @@ -160,7 +160,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, context.path().remove(); ObjectMapper objectMapper = createMapper(name, context.path().fullPathAsText(name), enabled, nested, dynamic, pathType, mappers, context.indexSettings()); - objectMapper.includeInAllIfNotSet(includeInAll); + objectMapper = objectMapper.includeInAllIfNotSet(includeInAll); return (Y) objectMapper; } @@ -389,41 +389,53 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, } @Override - public void includeInAll(Boolean includeInAll) { + public ObjectMapper includeInAll(Boolean includeInAll) { if (includeInAll == null) { - return; + return this; } - this.includeInAll = includeInAll; + + ObjectMapper clone = clone(); + clone.includeInAll = includeInAll; // when called from outside, apply this on all the inner mappers - for (Mapper mapper : mappers.values()) { + for (Mapper mapper : clone.mappers.values()) { if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).includeInAll(includeInAll); + clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).includeInAll(includeInAll)); } } + return clone; } @Override - public void includeInAllIfNotSet(Boolean includeInAll) { - if (this.includeInAll == null) { - this.includeInAll = includeInAll; + public ObjectMapper includeInAllIfNotSet(Boolean includeInAll) { + if (includeInAll == null || this.includeInAll != null) { + return this; } + + ObjectMapper clone = clone(); + clone.includeInAll = includeInAll; // when called from outside, apply this on all the inner mappers - for (Mapper mapper : mappers.values()) { + for (Mapper mapper : clone.mappers.values()) { if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll); + clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll)); } } + return clone; } @Override - public void unsetIncludeInAll() { - includeInAll = null; + public ObjectMapper unsetIncludeInAll() { + if (includeInAll == null) { + return this; + } + ObjectMapper clone = clone(); + clone.includeInAll = null; // when called from outside, apply this on all the inner mappers for (Mapper mapper : mappers.values()) { if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll(); + clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll()); } } + return clone; } public Nested nested() { @@ -434,14 +446,9 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, return this.nestedTypeFilter; } - /** - * Put a new mapper. - * NOTE: this method must be called under the current {@link DocumentMapper} - * lock if concurrent updates are expected. - */ - public void putMapper(Mapper mapper) { + protected void putMapper(Mapper mapper) { if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll); + mapper = ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll); } mappers = mappers.copyAndPut(mapper.simpleName(), mapper); } @@ -464,64 +471,43 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, } @Override - public void merge(final Mapper mergeWith, final MergeResult mergeResult) { + public ObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) { if (!(mergeWith instanceof ObjectMapper)) { - mergeResult.addConflict("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); - return; + throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); } ObjectMapper mergeWithObject = (ObjectMapper) mergeWith; - - if (nested().isNested()) { - if (!mergeWithObject.nested().isNested()) { - mergeResult.addConflict("object mapping [" + name() + "] can't be changed from nested to non-nested"); - return; - } - } else { - if (mergeWithObject.nested().isNested()) { - mergeResult.addConflict("object mapping [" + name() + "] can't be changed from non-nested to nested"); - return; - } - } - - if (!mergeResult.simulate()) { - if (mergeWithObject.dynamic != null) { - this.dynamic = mergeWithObject.dynamic; - } - } - - doMerge(mergeWithObject, mergeResult); - - List mappersToPut = new ArrayList<>(); - List newObjectMappers = new ArrayList<>(); - List newFieldMappers = new ArrayList<>(); - for (Mapper mapper : mergeWithObject) { - Mapper mergeWithMapper = mapper; - Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName()); - if (mergeIntoMapper == null) { - // no mapping, simply add it if not simulating - if (!mergeResult.simulate()) { - mappersToPut.add(mergeWithMapper); - MapperUtils.collect(mergeWithMapper, newObjectMappers, newFieldMappers); - } - } else if (mergeIntoMapper instanceof MetadataFieldMapper == false) { - // root mappers can only exist here for backcompat, and are merged in Mapping - mergeIntoMapper.merge(mergeWithMapper, mergeResult); - } - } - if (!newFieldMappers.isEmpty()) { - mergeResult.addFieldMappers(newFieldMappers); - } - if (!newObjectMappers.isEmpty()) { - mergeResult.addObjectMappers(newObjectMappers); - } - // add the mappers only after the administration have been done, so it will not be visible to parser (which first try to read with no lock) - for (Mapper mapper : mappersToPut) { - putMapper(mapper); - } + ObjectMapper merged = clone(); + merged.doMerge(mergeWithObject, updateAllTypes); + return merged; } - protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) { + protected void doMerge(final ObjectMapper mergeWith, boolean updateAllTypes) { + if (nested().isNested()) { + if (!mergeWith.nested().isNested()) { + throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from nested to non-nested"); + } + } else { + if (mergeWith.nested().isNested()) { + throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from non-nested to nested"); + } + } + if (mergeWith.dynamic != null) { + this.dynamic = mergeWith.dynamic; + } + + for (Mapper mergeWithMapper : mergeWith) { + Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName()); + Mapper merged; + if (mergeIntoMapper == null) { + // no mapping, simply add it + merged = mergeWithMapper; + } else { + // root mappers can only exist here for backcompat, and are merged in Mapping + merged = mergeIntoMapper.merge(mergeWithMapper, updateAllTypes); + } + putMapper(merged); + } } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java index a0c989abd7d..c6c64e432b7 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java @@ -205,6 +205,14 @@ public class RootObjectMapper extends ObjectMapper { this.numericDetection = numericDetection; } + /** Return a copy of this mapper that has the given {@code mapper} as a + * sub mapper. */ + public RootObjectMapper copyAndPutMapper(Mapper mapper) { + RootObjectMapper clone = (RootObjectMapper) clone(); + clone.putMapper(mapper); + return clone; + } + @Override public ObjectMapper mappingUpdate(Mapper mapper) { RootObjectMapper update = (RootObjectMapper) super.mappingUpdate(mapper); @@ -253,25 +261,29 @@ public class RootObjectMapper extends ObjectMapper { } @Override - protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) { + public RootObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) { + return (RootObjectMapper) super.merge(mergeWith, updateAllTypes); + } + + @Override + protected void doMerge(ObjectMapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith; - if (!mergeResult.simulate()) { - // merge them - List mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates)); - for (DynamicTemplate template : mergeWithObject.dynamicTemplates) { - boolean replaced = false; - for (int i = 0; i < mergedTemplates.size(); i++) { - if (mergedTemplates.get(i).name().equals(template.name())) { - mergedTemplates.set(i, template); - replaced = true; - } - } - if (!replaced) { - mergedTemplates.add(template); + // merge them + List mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates)); + for (DynamicTemplate template : mergeWithObject.dynamicTemplates) { + boolean replaced = false; + for (int i = 0; i < mergedTemplates.size(); i++) { + if (mergedTemplates.get(i).name().equals(template.name())) { + mergedTemplates.set(i, template); + replaced = true; } } - this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]); + if (!replaced) { + mergedTemplates.add(template); + } } + this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java index ac46f6725de..88e55600bc9 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java +++ b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java @@ -110,7 +110,7 @@ public class TranslogRecoveryPerformer { if (currentUpdate == null) { recoveredTypes.put(type, update); } else { - MapperUtils.merge(currentUpdate, update); + currentUpdate = currentUpdate.merge(update, false); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java index d94ae2b6735..2fe0cf9f218 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; @@ -39,7 +38,6 @@ import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; -import java.util.Arrays; import java.util.List; import java.util.Map; @@ -321,9 +319,7 @@ public class CopyToMapperTests extends ESSingleNodeTestCase { DocumentMapper docMapperAfter = parser.parse(mappingAfter); - MergeResult mergeResult = docMapperBefore.merge(docMapperAfter.mapping(), true, false); - - assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); + docMapperBefore.merge(docMapperAfter.mapping(), true, false); docMapperBefore.merge(docMapperAfter.mapping(), false, false); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java index ba9303e8b58..1cb41480cb7 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java @@ -23,7 +23,6 @@ import org.apache.lucene.analysis.*; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; @@ -60,13 +59,11 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase { .endObject().endObject().string(); DocumentMapper stage2 = parser.parse(stage2Mapping); - MergeResult mergeResult = stage1.merge(stage2.mapping(), true, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); + stage1.merge(stage2.mapping(), true, false); // Just simulated so merge hasn't happened yet assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("keyword")); - mergeResult = stage1.merge(stage2.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); + stage1.merge(stage2.mapping(), false, false); // Just simulated so merge hasn't happened yet assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("standard")); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java index fb67401e334..4772958bdb7 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java @@ -371,9 +371,8 @@ public class SimpleDateMappingTests extends ESSingleNodeTestCase { Map config = getConfigurationViaXContent(initialDateFieldMapper); assertThat(config.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy")); - MergeResult mergeResult = defaultMapper.merge(mergeMapper.mapping(), false, false); + defaultMapper.merge(mergeMapper.mapping(), false, false); - assertThat("Merging resulting in conflicts: " + Arrays.asList(mergeResult.buildConflicts()), mergeResult.hasConflicts(), is(false)); assertThat(defaultMapper.mappers().getMapper("field"), is(instanceOf(DateFieldMapper.class))); DateFieldMapper mergedFieldMapper = (DateFieldMapper) defaultMapper.mappers().getMapper("field"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java index e5d08db8d9f..d07bf21a4be 100755 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java @@ -34,7 +34,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.BinaryFieldMapper; import org.elasticsearch.index.mapper.core.BooleanFieldMapper; @@ -219,7 +218,7 @@ public class ExternalMapper extends FieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // ignore this for now } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java index dae8bc67fda..2731e30a84e 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java @@ -28,7 +28,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.BooleanFieldMapper; @@ -66,9 +65,9 @@ public class ExternalMetadataMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + public void doMerge(Mapper mergeWith, boolean updateAllTypes) { if (!(mergeWith instanceof ExternalMetadataMapper)) { - mergeResult.addConflict("Trying to merge " + mergeWith + " with " + this); + throw new IllegalArgumentException("Trying to merge " + mergeWith + " with " + this); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java index 93fd71599c4..4efa12fca00 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java @@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.test.ESSingleNodeTestCase; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java index 54e9e96f8ad..596efdcc273 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java @@ -30,17 +30,13 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.isIn; public class GeoShapeFieldMapperTests extends ESSingleNodeTestCase { public void testDefaultConfiguration() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java index 1a66879c448..b2faf44e657 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; @@ -39,6 +38,7 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -59,15 +59,12 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { .endObject().endObject().endObject().string(); DocumentMapper stage2 = parser.parse(stage2Mapping); - MergeResult mergeResult = stage1.merge(stage2.mapping(), true, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); + stage1.merge(stage2.mapping(), true, false); // since we are simulating, we should not have the age mapping assertThat(stage1.mappers().smartNameFieldMapper("age"), nullValue()); assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), nullValue()); // now merge, don't simulate - mergeResult = stage1.merge(stage2.mapping(), false, false); - // there is still merge failures - assertThat(mergeResult.hasConflicts(), equalTo(false)); + stage1.merge(stage2.mapping(), false, false); // but we have the age in assertThat(stage1.mappers().smartNameFieldMapper("age"), notNullValue()); assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), notNullValue()); @@ -83,8 +80,7 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { DocumentMapper withDynamicMapper = parser.parse(withDynamicMapping); assertThat(withDynamicMapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); - MergeResult mergeResult = mapper.merge(withDynamicMapper.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); + mapper.merge(withDynamicMapper.mapping(), false, false); assertThat(mapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); } @@ -99,14 +95,19 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { .endObject().endObject().endObject().string(); DocumentMapper nestedMapper = parser.parse(nestedMapping); - MergeResult mergeResult = objectMapper.merge(nestedMapper.mapping(), true, false); - assertThat(mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.buildConflicts().length, equalTo(1)); - assertThat(mergeResult.buildConflicts()[0], equalTo("object mapping [obj] can't be changed from non-nested to nested")); + try { + objectMapper.merge(nestedMapper.mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("object mapping [obj] can't be changed from non-nested to nested")); + } - mergeResult = nestedMapper.merge(objectMapper.mapping(), true, false); - assertThat(mergeResult.buildConflicts().length, equalTo(1)); - assertThat(mergeResult.buildConflicts()[0], equalTo("object mapping [obj] can't be changed from nested to non-nested")); + try { + nestedMapper.merge(objectMapper.mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("object mapping [obj] can't be changed from nested to non-nested")); + } } public void testMergeSearchAnalyzer() throws Exception { @@ -122,9 +123,8 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { DocumentMapper changed = parser.parse(mapping2); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace")); - MergeResult mergeResult = existing.merge(changed.mapping(), false, false); + existing.merge(changed.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("keyword")); } @@ -141,9 +141,8 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { DocumentMapper changed = parser.parse(mapping2); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace")); - MergeResult mergeResult = existing.merge(changed.mapping(), false, false); + existing.merge(changed.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("standard")); assertThat(((StringFieldMapper) (existing.mappers().getMapper("field"))).getIgnoreAbove(), equalTo(14)); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java index 30890dcd22a..83e10bd826c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java @@ -27,15 +27,11 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.test.ESSingleNodeTestCase; -import java.util.Arrays; - import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -62,8 +58,7 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json"); DocumentMapper docMapper2 = parser.parse(mapping); - MergeResult mergeResult = docMapper.merge(docMapper2.mapping(), true, false); - assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); + docMapper.merge(docMapper2.mapping(), true, false); docMapper.merge(docMapper2.mapping(), false, false); @@ -84,8 +79,7 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json"); DocumentMapper docMapper3 = parser.parse(mapping); - mergeResult = docMapper.merge(docMapper3.mapping(), true, false); - assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); + docMapper.merge(docMapper3.mapping(), true, false); docMapper.merge(docMapper3.mapping(), false, false); @@ -100,8 +94,7 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json"); DocumentMapper docMapper4 = parser.parse(mapping); - mergeResult = docMapper.merge(docMapper4.mapping(), true, false); - assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); + docMapper.merge(docMapper4.mapping(), true, false); docMapper.merge(docMapper4.mapping(), false, false); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java index 364e9f2063f..c30ea9bc6c6 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java @@ -34,11 +34,9 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.VersionUtils; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class DefaultSourceMappingTests extends ESSingleNodeTestCase { @@ -194,13 +192,18 @@ public class DefaultSourceMappingTests extends ESSingleNodeTestCase { void assertConflicts(String mapping1, String mapping2, DocumentMapperParser parser, String... conflicts) throws IOException { DocumentMapper docMapper = parser.parse(mapping1); docMapper = parser.parse(docMapper.mappingSource().string()); - MergeResult mergeResult = docMapper.merge(parser.parse(mapping2).mapping(), true, false); - - List expectedConflicts = new ArrayList<>(Arrays.asList(conflicts)); - for (String conflict : mergeResult.buildConflicts()) { - assertTrue("found unexpected conflict [" + conflict + "]", expectedConflicts.remove(conflict)); + if (conflicts.length == 0) { + docMapper.merge(parser.parse(mapping2).mapping(), true, false); + } else { + try { + docMapper.merge(parser.parse(mapping2).mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + for (String conflict : conflicts) { + assertThat(e.getMessage(), containsString(conflict)); + } + } } - assertTrue("missing conflicts: " + Arrays.toString(expectedConflicts.toArray()), expectedConflicts.isEmpty()); } public void testEnabledNotUpdateable() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java index 9ac039a49fb..cadd9dd673c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java @@ -40,7 +40,6 @@ import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.core.StringFieldMapper; @@ -493,8 +492,7 @@ public class SimpleStringMappingTests extends ESSingleNodeTestCase { String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", "string").startObject("norms").field("enabled", false).endObject() .endObject().endObject().endObject().endObject().string(); - MergeResult mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), false, false); - assertFalse(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts()); + defaultMapper.merge(parser.parse(updatedMapping).mapping(), false, false); doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() diff --git a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java index 53a3bf7bb6e..d545452db0f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java @@ -42,7 +42,6 @@ import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; @@ -515,8 +514,7 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { .startObject("_timestamp").field("enabled", randomBoolean()).startObject("fielddata").field("loading", "eager").field("format", "array").endObject().field("store", "yes").endObject() .endObject().endObject().string(); - MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), false, false); - assertThat(mergeResult.buildConflicts().length, equalTo(0)); + docMapper.merge(parser.parse(mapping).mapping(), false, false); assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getLoading(), equalTo(MappedFieldType.Loading.EAGER)); assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getFormat(indexSettings), equalTo("array")); } @@ -618,9 +616,9 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { .field("index", indexValues.remove(randomInt(2))) .endObject() .endObject().endObject().string(); - DocumentMapperParser parser = createIndex("test", BWC_SETTINGS).mapperService().documentMapperParser(); + MapperService mapperService = createIndex("test", BWC_SETTINGS).mapperService(); - DocumentMapper docMapper = parser.parse(mapping); + mapperService.merge("type", new CompressedXContent(mapping), true, false); mapping = XContentFactory.jsonBuilder().startObject() .startObject("type") .startObject("_timestamp") @@ -628,18 +626,11 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { .endObject() .endObject().endObject().string(); - MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), true, false); - List expectedConflicts = new ArrayList<>(); - expectedConflicts.add("mapper [_timestamp] has different [index] values"); - expectedConflicts.add("mapper [_timestamp] has different [tokenize] values"); - if (indexValues.get(0).equals("not_analyzed") == false) { - // if the only index value left is not_analyzed, then the doc values setting will be the same, but in the - // other two cases, it will change - expectedConflicts.add("mapper [_timestamp] has different [doc_values] values"); - } - - for (String conflict : mergeResult.buildConflicts()) { - assertThat(conflict, isIn(expectedConflicts)); + try { + mapperService.merge("type", new CompressedXContent(mapping), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("mapper [_timestamp] has different [index] values")); } } @@ -686,10 +677,15 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { void assertConflict(String mapping1, String mapping2, DocumentMapperParser parser, String conflict) throws IOException { DocumentMapper docMapper = parser.parse(mapping1); docMapper = parser.parse(docMapper.mappingSource().string()); - MergeResult mergeResult = docMapper.merge(parser.parse(mapping2).mapping(), true, false); - assertThat(mergeResult.buildConflicts().length, equalTo(conflict == null ? 0 : 1)); - if (conflict != null) { - assertThat(mergeResult.buildConflicts()[0], containsString(conflict)); + if (conflict == null) { + docMapper.merge(parser.parse(mapping2).mapping(), true, false); + } else { + try { + docMapper.merge(parser.parse(mapping2).mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString(conflict)); + } } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java index efe07615532..444d692079a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.internal.TTLFieldMapper; @@ -116,9 +115,8 @@ public class TTLMappingTests extends ESSingleNodeTestCase { DocumentMapper mapperWithoutTtl = parser.parse(mappingWithoutTtl); DocumentMapper mapperWithTtl = parser.parse(mappingWithTtl); - MergeResult mergeResult = mapperWithoutTtl.merge(mapperWithTtl.mapping(), false, false); + mapperWithoutTtl.merge(mapperWithTtl.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(mapperWithoutTtl.TTLFieldMapper().enabled(), equalTo(true)); } @@ -141,9 +139,8 @@ public class TTLMappingTests extends ESSingleNodeTestCase { DocumentMapper initialMapper = parser.parse(mappingWithTtl); DocumentMapper updatedMapper = parser.parse(updatedMapping); - MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), true, false); + initialMapper.merge(updatedMapper.mapping(), true, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(initialMapper.TTLFieldMapper().enabled(), equalTo(true)); } @@ -154,9 +151,13 @@ public class TTLMappingTests extends ESSingleNodeTestCase { DocumentMapper initialMapper = parser.parse(mappingWithTtl); DocumentMapper updatedMapper = parser.parse(mappingWithTtlDisabled); - MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), true, false); + try { + initialMapper.merge(updatedMapper.mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + // expected + } - assertThat(mergeResult.hasConflicts(), equalTo(true)); assertThat(initialMapper.TTLFieldMapper().enabled(), equalTo(true)); } @@ -189,23 +190,20 @@ public class TTLMappingTests extends ESSingleNodeTestCase { public void testNoConflictIfNothingSetAndDisabledLater() throws Exception { IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type"); XContentBuilder mappingWithTtlDisabled = getMappingWithTtlDisabled("7d"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDisabled.string()), true).mapping(), randomBoolean(), false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDisabled.string()), true).mapping(), randomBoolean(), false); } public void testNoConflictIfNothingSetAndEnabledLater() throws Exception { IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type"); XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), randomBoolean(), false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), randomBoolean(), false); } public void testMergeWithOnlyDefaultSet() throws Exception { XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithTtlEnabled); XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false, false); CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":360000},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); } @@ -216,8 +214,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { CompressedXContent mappingAfterCreation = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterCreation, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false, false); CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); } @@ -228,8 +225,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithTtl); CompressedXContent mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); XContentBuilder mappingWithTtlDifferentDefault = getMappingWithTtlEnabled("7d"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDifferentDefault.string()), true).mapping(), true, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDifferentDefault.string()), true).mapping(), true, false); // make sure simulate flag actually worked - no mappings applied CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(mappingBeforeMerge)); @@ -240,8 +236,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled(); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true, false); // make sure simulate flag actually worked - no mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(mappingBeforeMerge)); @@ -252,8 +247,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true, false); // make sure simulate flag actually worked - no mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(mappingBeforeMerge)); @@ -263,8 +257,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { mappingWithoutTtl = getMappingWithTtlDisabled("6d"); indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false, false); // make sure simulate flag actually worked - mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":604800000},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); @@ -273,8 +266,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { // check if switching simulate flag off works if nothing was applied in the beginning indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type"); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false, false); // make sure simulate flag actually worked - mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":604800000},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java index abf5f4819cd..e843088c545 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -77,9 +76,7 @@ public class UpdateMappingTests extends ESSingleNodeTestCase { private void testNoConflictWhileMergingAndMappingChanged(XContentBuilder mapping, XContentBuilder mappingUpdate, XContentBuilder expectedMapping) throws IOException { IndexService indexService = createIndex("test", Settings.settingsBuilder().build(), "type", mapping); // simulate like in MetaDataMappingService#putMapping - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), false, false); - // assure we have no conflicts - assertThat(mergeResult.buildConflicts().length, equalTo(0)); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), false, false); // make sure mappings applied CompressedXContent mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterUpdate.toString(), equalTo(expectedMapping.string())); @@ -101,9 +98,12 @@ public class UpdateMappingTests extends ESSingleNodeTestCase { IndexService indexService = createIndex("test", Settings.settingsBuilder().build(), "type", mapping); CompressedXContent mappingBeforeUpdate = indexService.mapperService().documentMapper("type").mappingSource(); // simulate like in MetaDataMappingService#putMapping - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), true, false); - // assure we have conflicts - assertThat(mergeResult.buildConflicts().length, equalTo(1)); + try { + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + // expected + } // make sure simulate flag actually worked - no mappings applied CompressedXContent mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterUpdate, equalTo(mappingBeforeUpdate)); diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java index 4be2b36fbe6..63c142f1e74 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java @@ -1176,7 +1176,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { .endObject().endObject()).get(); fail(); } catch (IllegalArgumentException e) { - assertThat(e.toString(), containsString("Merge failed with failures {[The _parent field's type option can't be changed: [null]->[parent]")); + assertThat(e.toString(), containsString("The _parent field's type option can't be changed: [null]->[parent]")); } } diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java index eb0e143c946..0ba636db72b 100644 --- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java +++ b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java @@ -602,7 +602,7 @@ public class AttachmentMapper extends FieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // ignore this for now } diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index 60c31c3f765..03b00d2ac39 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -66,8 +66,7 @@ public class Murmur3FieldMapper extends LongFieldMapper { Murmur3FieldMapper fieldMapper = new Murmur3FieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (Murmur3FieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java index aaf46553a75..fb5d47bdf69 100644 --- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java @@ -28,7 +28,6 @@ import org.elasticsearch.index.analysis.NumericIntegerAnalyzer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.IntegerFieldMapper; @@ -177,12 +176,10 @@ public class SizeFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { SizeFieldMapper sizeFieldMapperMergeWith = (SizeFieldMapper) mergeWith; - if (!mergeResult.simulate()) { - if (sizeFieldMapperMergeWith.enabledState != enabledState && !sizeFieldMapperMergeWith.enabledState.unset()) { - this.enabledState = sizeFieldMapperMergeWith.enabledState; - } + if (sizeFieldMapperMergeWith.enabledState != enabledState && !sizeFieldMapperMergeWith.enabledState.unset()) { + this.enabledState = sizeFieldMapperMergeWith.enabledState; } } } From 5d5c6591aac41fe32b7e26d66756f466d4746d10 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 4 Dec 2015 16:40:37 +0100 Subject: [PATCH 061/167] Validate that fields are defined only once. There are two ways that a field can be defined twice: - by reusing the name of a meta mapper in the root object (`_id`, `_routing`, etc.) - by defining a sub-field both explicitly in the mapping and through the code in a field mapper (like ExternalMapper does) This commit adds new checks in order to make sure this never happens. Close #15057 --- .../index/mapper/MapperService.java | 47 +++++++++++++++++-- .../elasticsearch/index/mapper/Mapping.java | 8 +++- .../ExternalValuesMapperIntegrationIT.java | 4 +- .../mapper/update/UpdateMappingTests.java | 45 ++++++++++++++++++ .../messy/tests/SearchFieldsTests.java | 6 +-- 5 files changed, 98 insertions(+), 12 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 938f610d6db..3a38c465d3d 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -32,7 +32,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.search.Queries; @@ -92,7 +91,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { private final ReleasableLock mappingWriteLock = new ReleasableLock(mappingLock.writeLock()); private volatile FieldTypeLookup fieldTypes; - private volatile ImmutableOpenMap fullPathObjectMappers = ImmutableOpenMap.of(); + private volatile Map fullPathObjectMappers = new HashMap<>(); private boolean hasNested = false; // updated dynamically to true when a nested object is added private final DocumentMapperParser documentParser; @@ -300,8 +299,41 @@ public class MapperService extends AbstractIndexComponent implements Closeable { return true; } + private void checkFieldUniqueness(String type, Collection objectMappers, Collection fieldMappers) { + final Set objectFullNames = new HashSet<>(); + for (ObjectMapper objectMapper : objectMappers) { + final String fullPath = objectMapper.fullPath(); + if (objectFullNames.add(fullPath) == false) { + throw new IllegalArgumentException("Object mapper [" + fullPath + "] is defined twice in mapping for type [" + type + "]"); + } + } + + if (indexSettings.getIndexVersionCreated().before(Version.V_3_0_0)) { + // Before 3.0 some metadata mappers are also registered under the root object mapper + // So we avoid false positives by deduplicating mappers + // given that we check exact equality, this would still catch the case that a mapper + // is defined under the root object + Collection uniqueFieldMappers = Collections.newSetFromMap(new IdentityHashMap<>()); + uniqueFieldMappers.addAll(fieldMappers); + fieldMappers = uniqueFieldMappers; + } + + final Set fieldNames = new HashSet<>(); + for (FieldMapper fieldMapper : fieldMappers) { + final String name = fieldMapper.name(); + if (objectFullNames.contains(name)) { + throw new IllegalArgumentException("Field [" + name + "] is defined both as an object and a field in [" + type + "]"); + } else if (fieldNames.add(name) == false) { + throw new IllegalArgumentException("Field [" + name + "] is defined twice in [" + type + "]"); + } + } + } + protected void checkMappersCompatibility(String type, Collection objectMappers, Collection fieldMappers, boolean updateAllTypes) { assert mappingLock.isWriteLockedByCurrentThread(); + + checkFieldUniqueness(type, objectMappers, fieldMappers); + for (ObjectMapper newObjectMapper : objectMappers) { ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath()); if (existingObjectMapper != null) { @@ -313,6 +345,13 @@ public class MapperService extends AbstractIndexComponent implements Closeable { } } } + + for (FieldMapper fieldMapper : fieldMappers) { + if (fullPathObjectMappers.containsKey(fieldMapper.name())) { + throw new IllegalArgumentException("Field [{}] is defined as a field in mapping [" + fieldMapper.name() + "] but this name is already used for an object in other types"); + } + } + fieldTypes.checkCompatibility(type, fieldMappers, updateAllTypes); } @@ -330,14 +369,14 @@ public class MapperService extends AbstractIndexComponent implements Closeable { protected void addMappers(String type, Collection objectMappers, Collection fieldMappers) { assert mappingLock.isWriteLockedByCurrentThread(); - ImmutableOpenMap.Builder fullPathObjectMappers = ImmutableOpenMap.builder(this.fullPathObjectMappers); + Map fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers); for (ObjectMapper objectMapper : objectMappers) { fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper); if (objectMapper.nested().isNested()) { hasNested = true; } } - this.fullPathObjectMappers = fullPathObjectMappers.build(); + this.fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers); this.fieldTypes = this.fieldTypes.copyAndAddAll(type, fieldMappers); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java index bac42162552..a89171e4f29 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -27,10 +27,12 @@ import org.elasticsearch.index.mapper.object.RootObjectMapper; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.List; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -41,7 +43,9 @@ import static java.util.Collections.unmodifiableMap; */ public final class Mapping implements ToXContent { - public static final List LEGACY_INCLUDE_IN_OBJECT = Arrays.asList("_all", "_id", "_parent", "_routing", "_timestamp", "_ttl"); + // Set of fields that were included into the root object mapper before 2.0 + public static final Set LEGACY_INCLUDE_IN_OBJECT = Collections.unmodifiableSet(new HashSet<>( + Arrays.asList("_all", "_id", "_parent", "_routing", "_timestamp", "_ttl"))); final Version indexCreated; final RootObjectMapper root; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java index 4cf7b405217..7e519c3b722 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java @@ -87,7 +87,7 @@ public class ExternalValuesMapperIntegrationIT extends ESIntegTestCase { .startObject("f") .field("type", ExternalMapperPlugin.EXTERNAL_UPPER) .startObject("fields") - .startObject("f") + .startObject("g") .field("type", "string") .field("store", "yes") .startObject("fields") @@ -107,7 +107,7 @@ public class ExternalValuesMapperIntegrationIT extends ESIntegTestCase { refresh(); SearchResponse response = client().prepareSearch("test-idx") - .setQuery(QueryBuilders.termQuery("f.f.raw", "FOO BAR")) + .setQuery(QueryBuilders.termQuery("f.g.raw", "FOO BAR")) .execute().actionGet(); assertThat(response.getHits().totalHits(), equalTo((long) 1)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java index abf5f4819cd..fbcef46d4e6 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java @@ -202,6 +202,51 @@ public class UpdateMappingTests extends ESSingleNodeTestCase { assertNull(mapperService.documentMapper("type2").mapping().root().getMapper("foo")); } + public void testReuseMetaField() throws IOException { + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("_id").field("type", "string").endObject() + .endObject().endObject().endObject(); + MapperService mapperService = createIndex("test", Settings.settingsBuilder().build()).mapperService(); + + try { + mapperService.merge("type", new CompressedXContent(mapping.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); + } + + try { + mapperService.merge("type", new CompressedXContent(mapping.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); + } + } + + public void testReuseMetaFieldBackCompat() throws IOException { + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("_id").field("type", "string").endObject() + .endObject().endObject().endObject(); + // the logic is different for 2.x indices since they record some meta mappers (including _id) + // in the root object + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_1_0).build(); + MapperService mapperService = createIndex("test", settings).mapperService(); + + try { + mapperService.merge("type", new CompressedXContent(mapping.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); + } + + try { + mapperService.merge("type", new CompressedXContent(mapping.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); + } + } + public void testIndexFieldParsingBackcompat() throws IOException { IndexService indexService = createIndex("test", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build()); XContentBuilder indexMapping = XContentFactory.jsonBuilder(); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java index 8153d207b7c..5a56e0f6999 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java @@ -392,8 +392,7 @@ public class SearchFieldsTests extends ESIntegTestCase { createIndex("test"); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet(); - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") - .startObject("_source").field("enabled", false).endObject() + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_source").field("enabled", false).endObject().startObject("properties") .startObject("byte_field").field("type", "byte").field("store", "yes").endObject() .startObject("short_field").field("type", "short").field("store", "yes").endObject() .startObject("integer_field").field("type", "integer").field("store", "yes").endObject() @@ -556,8 +555,7 @@ public class SearchFieldsTests extends ESIntegTestCase { createIndex("test"); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet(); - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") - .startObject("_source").field("enabled", false).endObject() + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_source").field("enabled", false).endObject().startObject("properties") .startObject("string_field").field("type", "string").endObject() .startObject("byte_field").field("type", "byte").endObject() .startObject("short_field").field("type", "short").endObject() From 061446b25ac21fe5b5352a4a1c8a4b042f81575b Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Tue, 15 Dec 2015 10:56:53 +0100 Subject: [PATCH 062/167] Merge pull request #15304 from cjohansen/patch-1 Fix typo --- docs/reference/mapping/types/nested.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index 07f87037b07..cca87853d83 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -66,7 +66,7 @@ GET my_index/_search ==== Using `nested` fields for arrays of objects If you need to index arrays of objects and to maintain the independence of -each object in the array, you should used the `nested` datatype instead of the +each object in the array, you should use the `nested` datatype instead of the <> datatype. Internally, nested objects index each object in the array as a separate hidden document, meaning that each nested object can be queried independently of the others, with the <>: From 76713611e9d62eb9e53a403077ecfbcc907a32d3 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 11 Dec 2015 15:54:52 +0100 Subject: [PATCH 063/167] Format the term with the formatter in LongTerms.Bucket.getKeyAsString in order to be consistent with the json response field `key_as_string` of the terms aggregation. --- .../search/aggregations/bucket/terms/LongTerms.java | 2 +- .../search/aggregations/bucket/BooleanTermsIT.java | 12 ++++++++---- .../search/aggregations/bucket/NestedIT.java | 8 ++++---- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index 26c2eee2f6b..c270517cd9d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -91,7 +91,7 @@ public class LongTerms extends InternalTerms { @Override public String getKeyAsString() { - return String.valueOf(term); + return formatter.format(term); } @Override diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java index 0a660b85374..aad2c9bb3ed 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java @@ -99,20 +99,22 @@ public class BooleanTermsIT extends ESIntegTestCase { final int bucketCount = numSingleFalses > 0 && numSingleTrues > 0 ? 2 : numSingleFalses + numSingleTrues > 0 ? 1 : 0; assertThat(terms.getBuckets().size(), equalTo(bucketCount)); - Terms.Bucket bucket = terms.getBucketByKey("0"); + Terms.Bucket bucket = terms.getBucketByKey("false"); if (numSingleFalses == 0) { assertNull(bucket); } else { assertNotNull(bucket); assertEquals(numSingleFalses, bucket.getDocCount()); + assertEquals("false", bucket.getKeyAsString()); } - bucket = terms.getBucketByKey("1"); + bucket = terms.getBucketByKey("true"); if (numSingleTrues == 0) { assertNull(bucket); } else { assertNotNull(bucket); assertEquals(numSingleTrues, bucket.getDocCount()); + assertEquals("true", bucket.getKeyAsString()); } } @@ -131,20 +133,22 @@ public class BooleanTermsIT extends ESIntegTestCase { final int bucketCount = numMultiFalses > 0 && numMultiTrues > 0 ? 2 : numMultiFalses + numMultiTrues > 0 ? 1 : 0; assertThat(terms.getBuckets().size(), equalTo(bucketCount)); - Terms.Bucket bucket = terms.getBucketByKey("0"); + Terms.Bucket bucket = terms.getBucketByKey("false"); if (numMultiFalses == 0) { assertNull(bucket); } else { assertNotNull(bucket); assertEquals(numMultiFalses, bucket.getDocCount()); + assertEquals("false", bucket.getKeyAsString()); } - bucket = terms.getBucketByKey("1"); + bucket = terms.getBucketByKey("true"); if (numMultiTrues == 0) { assertNull(bucket); } else { assertNotNull(bucket); assertEquals(numMultiTrues, bucket.getDocCount()); + assertEquals("true", bucket.getKeyAsString()); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index a1f4b20dc1c..349b61fc37e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -423,10 +423,10 @@ public class NestedIT extends ESIntegTestCase { Terms startDate = response.getAggregations().get("startDate"); assertThat(startDate.getBuckets().size(), equalTo(2)); - Terms.Bucket bucket = startDate.getBucketByKey("1414800000000"); // 2014-11-01T00:00:00.000Z + Terms.Bucket bucket = startDate.getBucketByKey("2014-11-01T00:00:00.000Z"); assertThat(bucket.getDocCount(), equalTo(1l)); Terms endDate = bucket.getAggregations().get("endDate"); - bucket = endDate.getBucketByKey("1417305600000"); // 2014-11-30T00:00:00.000Z + bucket = endDate.getBucketByKey("2014-11-30T00:00:00.000Z"); assertThat(bucket.getDocCount(), equalTo(1l)); Terms period = bucket.getAggregations().get("period"); bucket = period.getBucketByKey("2014-11"); @@ -440,10 +440,10 @@ public class NestedIT extends ESIntegTestCase { Terms tags = nestedTags.getAggregations().get("tag"); assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty - bucket = startDate.getBucketByKey("1417392000000"); // 2014-12-01T00:00:00.000Z + bucket = startDate.getBucketByKey("2014-12-01T00:00:00.000Z"); assertThat(bucket.getDocCount(), equalTo(1l)); endDate = bucket.getAggregations().get("endDate"); - bucket = endDate.getBucketByKey("1419984000000"); // 2014-12-31T00:00:00.000Z + bucket = endDate.getBucketByKey("2014-12-31T00:00:00.000Z"); assertThat(bucket.getDocCount(), equalTo(1l)); period = bucket.getAggregations().get("period"); bucket = period.getBucketByKey("2014-12"); From 82eb498b29c8372801fc013f041c4145e57752cb Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Tue, 15 Dec 2015 12:27:20 +0100 Subject: [PATCH 064/167] Docs: Updated plugin author help for Gradle Relates to #15280 --- docs/plugins/authors.asciidoc | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/plugins/authors.asciidoc b/docs/plugins/authors.asciidoc index 75b7776ec09..9461ba8dd53 100644 --- a/docs/plugins/authors.asciidoc +++ b/docs/plugins/authors.asciidoc @@ -17,7 +17,7 @@ listed in this documentation for inspiration. ==================================== The example site plugin mentioned above contains all of the scaffolding needed -for integrating with Maven builds. If you don't plan on using Maven, then all +for integrating with Gradle builds. If you don't plan on using Gradle, then all you really need in your plugin is: * The `plugin-descriptor.properties` file @@ -33,14 +33,14 @@ All plugins, be they site or Java plugins, must contain a file called `plugin-descriptor.properties` in the root directory. The format for this file is described in detail here: -https://github.com/elastic/elasticsearch/blob/master/dev-tools/src/main/resources/plugin-metadata/plugin-descriptor.properties[`dev-tools/src/main/resources/plugin-metadata/plugin-descriptor.properties`]. +https://github.com/elastic/elasticsearch/blob/master/buildSrc/src/main/resources/plugin-descriptor.properties[`/buildSrc/src/main/resources/plugin-descriptor.properties`]. Either fill in this template yourself (see https://github.com/lmenezes/elasticsearch-kopf/blob/master/plugin-descriptor.properties[elasticsearch-kopf] -as an example) or, if you are using Elasticsearch's Maven build system, you -can fill in the necessary values in the `pom.xml` for your plugin. For +as an example) or, if you are using Elasticsearch's Gradle build system, you +can fill in the necessary values in the `build.gradle` file for your plugin. For instance, see -https://github.com/elastic/elasticsearch/blob/master/plugins/site-example/pom.xml[`plugins/site-example/pom.xml`]. +https://github.com/elastic/elasticsearch/blob/master/plugins/site-example/build.gradle[`/plugins/site-example/build.gradle`]. [float] ==== Mandatory elements for all plugins @@ -123,13 +123,13 @@ Read more in {ref}/integration-tests.html#changing-node-configuration[Changing N === Java Security permissions Some plugins may need additional security permissions. A plugin can include -the optional `plugin-security.policy` file containing `grant` statements for -additional permissions. Any additional permissions will be displayed to the user -with a large warning, and they will have to confirm them when installing the +the optional `plugin-security.policy` file containing `grant` statements for +additional permissions. Any additional permissions will be displayed to the user +with a large warning, and they will have to confirm them when installing the plugin interactively. So if possible, it is best to avoid requesting any spurious permissions! -If you are using the elasticsearch Maven build system, place this file in +If you are using the elasticsearch Gradle build system, place this file in `src/main/plugin-metadata` and it will be applied during unit tests as well. Keep in mind that the Java security model is stack-based, and the additional From 4426ed0a097e1a15df494e61087624540bde94a6 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Tue, 15 Dec 2015 14:51:42 +0200 Subject: [PATCH 065/167] [DOCS] Link docs on repository-hdfs plugin relates #15191 --- docs/plugins/repository-hdfs.asciidoc | 7 ++++--- docs/plugins/repository.asciidoc | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc index ea13e5ad3a6..6a90859c0ef 100644 --- a/docs/plugins/repository-hdfs.asciidoc +++ b/docs/plugins/repository-hdfs.asciidoc @@ -8,7 +8,7 @@ The HDFS repository plugin adds support for using HDFS File System as a reposito [float] ==== Installation -This plugin can be installed using the plugin manager: +This plugin can be installed using the plugin manager using _one_ of the following packages: [source,sh] ---------------------------------------------------------------- @@ -17,14 +17,14 @@ sudo bin/plugin install repository-hdfs-hadoop2 sudo bin/plugin install repository-hdfs-lite ---------------------------------------------------------------- -The plugin must be installed on every node in the cluster, and each node must +The chosen plugin must be installed on every node in the cluster, and each node must be restarted after installation. [[repository-hdfs-remove]] [float] ==== Removal -The plugin can be removed with the following command: +The plugin can be removed by specifying the _installed_ package using _one_ of the following commands: [source,sh] ---------------------------------------------------------------- @@ -76,6 +76,7 @@ additional permissions which are not provided by the plugin. In this case there are several workarounds: * add the permission into `plugin-security.policy` (available in the plugin folder) + * disable the security manager through `es.security.manager.enabled=false` configurations setting - NOT RECOMMENDED If you find yourself in such a situation, please let us know what Hadoop distro version and OS you are using and what permission is missing diff --git a/docs/plugins/repository.asciidoc b/docs/plugins/repository.asciidoc index 554fa34b033..5706fc74c12 100644 --- a/docs/plugins/repository.asciidoc +++ b/docs/plugins/repository.asciidoc @@ -18,10 +18,9 @@ The S3 repository plugin adds support for using S3 as a repository. The Azure repository plugin adds support for using Azure as a repository. -https://github.com/elastic/elasticsearch-hadoop/tree/master/repository-hdfs[Hadoop HDFS Repository]:: +<>:: -The Hadoop HDFS Repository plugin adds support for using an HDFS file system -as a repository. +The Hadoop HDFS Repository plugin adds support for using HDFS as a repository. [float] @@ -40,3 +39,5 @@ include::repository-azure.asciidoc[] include::repository-s3.asciidoc[] +include::repository-hdfs.asciidoc[] + From d94bba2d9c8a2a717daaf79058c4b16e398cb3eb Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 15 Dec 2015 14:55:23 +0100 Subject: [PATCH 066/167] Remove back compat for the `path` option. The `path` option allowed to index/store a field `a.b.c` under just `c` when set to `just_name`. This "feature" has been removed in 2.0 in favor of `copy_to` so we can remove the back compat in 3.x. --- .../index/mapper/ContentPath.java | 29 ++----------- .../index/mapper/DocumentParser.java | 14 +++---- .../index/mapper/FieldMapper.java | 40 +++--------------- .../index/mapper/core/TypeParsers.java | 31 ++------------ .../mapper/geo/BaseGeoPointFieldMapper.java | 28 ++----------- .../index/mapper/geo/GeoPointFieldMapper.java | 9 ++-- .../mapper/geo/GeoPointFieldMapperLegacy.java | 9 ++-- .../index/mapper/object/DynamicTemplate.java | 4 +- .../index/mapper/object/ObjectMapper.java | 41 +++---------------- .../index/mapper/object/RootObjectMapper.java | 8 ++-- .../mapper/externalvalues/ExternalMapper.java | 4 -- .../mapper/attachments/AttachmentMapper.java | 28 ++----------- 12 files changed, 42 insertions(+), 203 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java b/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java index 47c43720162..54c6ef20e3e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java @@ -19,16 +19,9 @@ package org.elasticsearch.index.mapper; -public class ContentPath { +public final class ContentPath { - public enum Type { - JUST_NAME, - FULL, - } - - private Type pathType; - - private final char delimiter; + private static final char DELIMITER = '.'; private final StringBuilder sb; @@ -47,7 +40,6 @@ public class ContentPath { * number of path elements to not be included in {@link #pathAsText(String)}. */ public ContentPath(int offset) { - this.delimiter = '.'; this.sb = new StringBuilder(); this.offset = offset; reset(); @@ -71,26 +63,11 @@ public class ContentPath { } public String pathAsText(String name) { - if (pathType == Type.JUST_NAME) { - return name; - } - return fullPathAsText(name); - } - - public String fullPathAsText(String name) { sb.setLength(0); for (int i = offset; i < index; i++) { - sb.append(path[i]).append(delimiter); + sb.append(path[i]).append(DELIMITER); } sb.append(name); return sb.toString(); } - - public Type pathType() { - return pathType; - } - - public void pathType(Type type) { - this.pathType = type; - } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 656ee2c600d..bb1749d2336 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -234,9 +234,6 @@ class DocumentParser implements Closeable { nestedDoc.add(new Field(TypeFieldMapper.NAME, mapper.nestedTypePathAsString(), TypeFieldMapper.Defaults.FIELD_TYPE)); } - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(mapper.pathType()); - // if we are at the end of the previous object, advance if (token == XContentParser.Token.END_OBJECT) { token = parser.nextToken(); @@ -272,7 +269,6 @@ class DocumentParser implements Closeable { } } // restore the enable path flag - context.path().pathType(origPathType); if (nested.isNested()) { ParseContext.Document nestedDoc = context.doc(); ParseContext.Document parentDoc = nestedDoc.getParent(); @@ -341,7 +337,7 @@ class DocumentParser implements Closeable { context.path().remove(); Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object"); if (builder == null) { - builder = MapperBuilders.object(currentFieldName).enabled(true).pathType(mapper.pathType()); + builder = MapperBuilders.object(currentFieldName).enabled(true); // if this is a non root object, then explicitly set the dynamic behavior if set if (!(mapper instanceof RootObjectMapper) && mapper.dynamic() != ObjectMapper.Defaults.DYNAMIC) { ((ObjectMapper.Builder) builder).dynamic(mapper.dynamic()); @@ -610,7 +606,7 @@ class DocumentParser implements Closeable { return null; } final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); - final MappedFieldType existingFieldType = context.mapperService().fullName(context.path().fullPathAsText(currentFieldName)); + final MappedFieldType existingFieldType = context.mapperService().fullName(context.path().pathAsText(currentFieldName)); Mapper.Builder builder = null; if (existingFieldType != null) { // create a builder of the same type @@ -695,7 +691,7 @@ class DocumentParser implements Closeable { if (paths.length > 1) { ObjectMapper parent = context.root(); for (int i = 0; i < paths.length-1; i++) { - mapper = context.docMapper().objectMappers().get(context.path().fullPathAsText(paths[i])); + mapper = context.docMapper().objectMappers().get(context.path().pathAsText(paths[i])); if (mapper == null) { // One mapping is missing, check if we are allowed to create a dynamic one. ObjectMapper.Dynamic dynamic = parent.dynamic(); @@ -713,12 +709,12 @@ class DocumentParser implements Closeable { if (!(parent instanceof RootObjectMapper) && parent.dynamic() != ObjectMapper.Defaults.DYNAMIC) { ((ObjectMapper.Builder) builder).dynamic(parent.dynamic()); } - builder = MapperBuilders.object(paths[i]).enabled(true).pathType(parent.pathType()); + builder = MapperBuilders.object(paths[i]).enabled(true); } Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); mapper = (ObjectMapper) builder.build(builderContext); if (mapper.nested() != ObjectMapper.Nested.NO) { - throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().fullPathAsText(paths[i]) + "]) through `copy_to`"); + throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to`"); } break; case FALSE: diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 9997f8608d2..6f447cdeb86 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -207,11 +207,6 @@ public abstract class FieldMapper extends Mapper implements Cloneable { return this; } - public T multiFieldPathType(ContentPath.Type pathType) { - multiFieldsBuilder.pathType(pathType); - return builder; - } - public T addMultiField(Mapper.Builder mapperBuilder) { multiFieldsBuilder.add(mapperBuilder); return builder; @@ -242,7 +237,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable { } protected String buildFullName(BuilderContext context) { - return context.path().fullPathAsText(name); + return context.path().pathAsText(name); } protected void setupFieldType(BuilderContext context) { @@ -540,18 +535,12 @@ public abstract class FieldMapper extends Mapper implements Cloneable { public static class MultiFields { public static MultiFields empty() { - return new MultiFields(ContentPath.Type.FULL, ImmutableOpenMap.of()); + return new MultiFields(ImmutableOpenMap.of()); } public static class Builder { private final ImmutableOpenMap.Builder mapperBuilders = ImmutableOpenMap.builder(); - private ContentPath.Type pathType = ContentPath.Type.FULL; - - public Builder pathType(ContentPath.Type pathType) { - this.pathType = pathType; - return this; - } public Builder add(Mapper.Builder builder) { mapperBuilders.put(builder.name(), builder); @@ -560,13 +549,9 @@ public abstract class FieldMapper extends Mapper implements Cloneable { @SuppressWarnings("unchecked") public MultiFields build(FieldMapper.Builder mainFieldBuilder, BuilderContext context) { - if (pathType == ContentPath.Type.FULL && mapperBuilders.isEmpty()) { + if (mapperBuilders.isEmpty()) { return empty(); - } else if (mapperBuilders.isEmpty()) { - return new MultiFields(pathType, ImmutableOpenMap.of()); } else { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); context.path().add(mainFieldBuilder.name()); ImmutableOpenMap.Builder mapperBuilders = this.mapperBuilders; for (ObjectObjectCursor cursor : this.mapperBuilders) { @@ -577,18 +562,15 @@ public abstract class FieldMapper extends Mapper implements Cloneable { mapperBuilders.put(key, mapper); } context.path().remove(); - context.path().pathType(origPathType); ImmutableOpenMap.Builder mappers = mapperBuilders.cast(); - return new MultiFields(pathType, mappers.build()); + return new MultiFields(mappers.build()); } } } - private final ContentPath.Type pathType; private final ImmutableOpenMap mappers; - private MultiFields(ContentPath.Type pathType, ImmutableOpenMap mappers) { - this.pathType = pathType; + private MultiFields(ImmutableOpenMap mappers) { ImmutableOpenMap.Builder builder = new ImmutableOpenMap.Builder<>(); // we disable the all in multi-field mappers for (ObjectObjectCursor cursor : mappers) { @@ -609,21 +591,14 @@ public abstract class FieldMapper extends Mapper implements Cloneable { context = context.createMultiFieldContext(); - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); - context.path().add(mainField.simpleName()); for (ObjectCursor cursor : mappers.values()) { cursor.value.parse(context); } context.path().remove(); - context.path().pathType(origPathType); } public MultiFields merge(MultiFields mergeWith) { - if (pathType != mergeWith.pathType) { - throw new IllegalArgumentException("Can't change path type from [" + pathType + "] to [" + mergeWith.pathType + "]"); - } ImmutableOpenMap.Builder newMappersBuilder = ImmutableOpenMap.builder(mappers); for (ObjectCursor cursor : mergeWith.mappers.values()) { @@ -642,7 +617,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable { } ImmutableOpenMap mappers = newMappersBuilder.build(); - return new MultiFields(pathType, mappers); + return new MultiFields(mappers); } public Iterator iterator() { @@ -650,9 +625,6 @@ public abstract class FieldMapper extends Mapper implements Cloneable { } public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (pathType != ContentPath.Type.FULL) { - builder.field("path", pathType.name().toLowerCase(Locale.ROOT)); - } if (!mappers.isEmpty()) { // sort the mappers so we get consistent serialization format Mapper[] sortedMappers = mappers.values().toArray(Mapper.class); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java index e530243657c..f6bd4946eb2 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java @@ -61,7 +61,6 @@ public class TypeParsers { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - ContentPath.Type pathType = null; FieldMapper.Builder mainFieldBuilder = null; List fields = null; String firstType = null; @@ -70,10 +69,7 @@ public class TypeParsers { Map.Entry entry = iterator.next(); String fieldName = Strings.toUnderscoreCase(entry.getKey()); Object fieldNode = entry.getValue(); - if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - pathType = parsePathType(name, fieldNode.toString()); - iterator.remove(); - } else if (fieldName.equals("fields")) { + if (fieldName.equals("fields")) { Map fieldsNode = (Map) fieldNode; for (Iterator> fieldsIterator = fieldsNode.entrySet().iterator(); fieldsIterator.hasNext();) { Map.Entry entry1 = fieldsIterator.next(); @@ -132,17 +128,10 @@ public class TypeParsers { } } - if (fields != null && pathType != null) { + if (fields != null) { for (Mapper.Builder field : fields) { mainFieldBuilder.addMultiField(field); } - mainFieldBuilder.multiFieldPathType(pathType); - } else if (fields != null) { - for (Mapper.Builder field : fields) { - mainFieldBuilder.addMultiField(field); - } - } else if (pathType != null) { - mainFieldBuilder.multiFieldPathType(pathType); } return mainFieldBuilder; } @@ -337,10 +326,7 @@ public class TypeParsers { public static boolean parseMultiField(FieldMapper.Builder builder, String name, Mapper.TypeParser.ParserContext parserContext, String propName, Object propNode) { parserContext = parserContext.createMultiFieldContext(parserContext); - if (propName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - builder.multiFieldPathType(parsePathType(name, propNode.toString())); - return true; - } else if (propName.equals("fields")) { + if (propName.equals("fields")) { final Map multiFieldsPropNodes; @@ -457,17 +443,6 @@ public class TypeParsers { } } - public static ContentPath.Type parsePathType(String name, String path) throws MapperParsingException { - path = Strings.toUnderscoreCase(path); - if ("just_name".equals(path)) { - return ContentPath.Type.JUST_NAME; - } else if ("full".equals(path)) { - return ContentPath.Type.FULL; - } else { - throw new MapperParsingException("wrong value for pathType [" + path + "] for object [" + name + "]"); - } - } - @SuppressWarnings("unchecked") public static void parseCopyFields(Object propNode, FieldMapper.Builder builder) { FieldMapper.CopyTo.Builder copyToBuilder = new FieldMapper.CopyTo.Builder(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java index 2b1d091be3b..f7910e1f5af 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; @@ -73,7 +72,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } public static class Defaults { - public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL; public static final boolean ENABLE_LATLON = false; public static final boolean ENABLE_GEOHASH = false; public static final boolean ENABLE_GEOHASH_PREFIX = false; @@ -82,7 +80,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } public abstract static class Builder extends FieldMapper.Builder { - protected ContentPath.Type pathType = Defaults.PATH_TYPE; protected boolean enableLatLon = Defaults.ENABLE_LATLON; @@ -105,12 +102,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr return (GeoPointFieldType)fieldType; } - @Override - public T multiFieldPathType(ContentPath.Type pathType) { - this.pathType = pathType; - return builder; - } - @Override public T fieldDataSettings(Settings settings) { this.fieldDataSettings = settings; @@ -158,13 +149,10 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } public abstract Y build(BuilderContext context, String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, - Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, + Settings indexSettings, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit ignoreMalformed, CopyTo copyTo); public Y build(Mapper.BuilderContext context) { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); - GeoPointFieldType geoPointFieldType = (GeoPointFieldType)fieldType; DoubleFieldMapper latMapper = null; @@ -190,9 +178,8 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix); } context.path().remove(); - context.path().pathType(origPathType); - return build(context, name, fieldType, defaultFieldType, context.indexSettings(), origPathType, + return build(context, name, fieldType, defaultFieldType, context.indexSettings(), latMapper, lonMapper, geoHashMapper, multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo); } } @@ -364,17 +351,14 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr protected final DoubleFieldMapper lonMapper; - protected final ContentPath.Type pathType; - protected final StringFieldMapper geoHashMapper; protected Explicit ignoreMalformed; protected BaseGeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, - ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, + DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit ignoreMalformed, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); - this.pathType = pathType; this.latMapper = latMapper; this.lonMapper = lonMapper; this.geoHashMapper = geoHashMapper; @@ -434,8 +418,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr @Override public Mapper parse(ParseContext context) throws IOException { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); context.path().add(simpleName()); GeoPoint sparse = context.parseExternalValue(GeoPoint.class); @@ -480,7 +462,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } context.path().remove(); - context.path().pathType(origPathType); return null; } @@ -505,9 +486,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr @Override protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { super.doXContentBody(builder, includeDefaults, params); - if (includeDefaults || pathType != Defaults.PATH_TYPE) { - builder.field("path", pathType.name().toLowerCase(Locale.ROOT)); - } if (includeDefaults || fieldType().isLatLonEnabled() != GeoPointFieldMapper.Defaults.ENABLE_LATLON) { builder.field("lat_lon", fieldType().isLatLonEnabled()); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java index 286aca29727..fa61669e800 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -81,12 +80,12 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper { @Override public GeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType, - MappedFieldType defaultFieldType, Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper, + MappedFieldType defaultFieldType, Settings indexSettings, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit ignoreMalformed, CopyTo copyTo) { fieldType.setTokenized(false); setupFieldType(context); - return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, + return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields, ignoreMalformed, copyTo); } @@ -104,9 +103,9 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper { } public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, - ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, + DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit ignoreMalformed, CopyTo copyTo) { - super(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, geoHashMapper, multiFields, + super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields, ignoreMalformed, copyTo); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java index 7e5a8738384..735baa88533 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -110,14 +109,14 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement @Override public GeoPointFieldMapperLegacy build(BuilderContext context, String simpleName, MappedFieldType fieldType, - MappedFieldType defaultFieldType, Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper, + MappedFieldType defaultFieldType, Settings indexSettings, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit ignoreMalformed, CopyTo copyTo) { fieldType.setTokenized(false); setupFieldType(context); fieldType.setHasDocValues(false); defaultFieldType.setHasDocValues(false); - return new GeoPointFieldMapperLegacy(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, + return new GeoPointFieldMapperLegacy(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields, ignoreMalformed, coerce(context), copyTo); } @@ -287,10 +286,10 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement protected Explicit coerce; public GeoPointFieldMapperLegacy(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, - ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, + DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit ignoreMalformed, Explicit coerce, CopyTo copyTo) { - super(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, geoHashMapper, multiFields, + super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields, ignoreMalformed, copyTo); this.coerce = coerce; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java b/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java index c51264f3dba..58602f06dfa 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java @@ -125,13 +125,13 @@ public class DynamicTemplate { } public boolean match(ContentPath path, String name, String dynamicType) { - if (pathMatch != null && !patternMatch(pathMatch, path.fullPathAsText(name))) { + if (pathMatch != null && !patternMatch(pathMatch, path.pathAsText(name))) { return false; } if (match != null && !patternMatch(match, name)) { return false; } - if (pathUnmatch != null && patternMatch(pathUnmatch, path.fullPathAsText(name))) { + if (pathUnmatch != null && patternMatch(pathUnmatch, path.pathAsText(name))) { return false; } if (unmatch != null && patternMatch(unmatch, name)) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java index cbbc8563576..c2d9783fc9f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.CopyOnWriteHashMap; @@ -40,7 +39,6 @@ import java.util.*; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.index.mapper.MapperBuilders.object; -import static org.elasticsearch.index.mapper.core.TypeParsers.parsePathType; /** * @@ -54,7 +52,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, public static final boolean ENABLED = true; public static final Nested NESTED = Nested.NO; public static final Dynamic DYNAMIC = null; // not set, inherited from root - public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL; } public static enum Dynamic { @@ -104,8 +101,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, protected Dynamic dynamic = Defaults.DYNAMIC; - protected ContentPath.Type pathType = Defaults.PATH_TYPE; - protected Boolean includeInAll; protected final List mappersBuilders = new ArrayList<>(); @@ -130,11 +125,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, return builder; } - public T pathType(ContentPath.Type pathType) { - this.pathType = pathType; - return builder; - } - public T includeInAll(boolean includeInAll) { this.includeInAll = includeInAll; return builder; @@ -147,8 +137,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, @Override public Y build(BuilderContext context) { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); context.path().add(name); Map mappers = new HashMap<>(); @@ -156,17 +144,16 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, Mapper mapper = builder.build(context); mappers.put(mapper.simpleName(), mapper); } - context.path().pathType(origPathType); context.path().remove(); - ObjectMapper objectMapper = createMapper(name, context.path().fullPathAsText(name), enabled, nested, dynamic, pathType, mappers, context.indexSettings()); + ObjectMapper objectMapper = createMapper(name, context.path().pathAsText(name), enabled, nested, dynamic, mappers, context.indexSettings()); objectMapper = objectMapper.includeInAllIfNotSet(includeInAll); return (Y) objectMapper; } - protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map mappers, @Nullable Settings settings) { - return new ObjectMapper(name, fullPath, enabled, nested, dynamic, pathType, mappers); + protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map mappers, @Nullable Settings settings) { + return new ObjectMapper(name, fullPath, enabled, nested, dynamic, mappers); } } @@ -179,7 +166,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, Map.Entry entry = iterator.next(); String fieldName = Strings.toUnderscoreCase(entry.getKey()); Object fieldNode = entry.getValue(); - if (parseObjectOrDocumentTypeProperties(fieldName, fieldNode, parserContext, builder) || parseObjectProperties(name, fieldName, fieldNode, parserContext, builder)) { + if (parseObjectOrDocumentTypeProperties(fieldName, fieldNode, parserContext, builder)) { iterator.remove(); } } @@ -214,14 +201,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, return false; } - protected static boolean parseObjectProperties(String name, String fieldName, Object fieldNode, ParserContext parserContext, ObjectMapper.Builder builder) { - if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - builder.pathType(parsePathType(name, fieldNode.toString())); - return true; - } - return false; - } - protected static void parseNested(String name, Map node, ObjectMapper.Builder builder) { boolean nested = false; boolean nestedIncludeInParent = false; @@ -326,19 +305,16 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, private volatile Dynamic dynamic; - private final ContentPath.Type pathType; - private Boolean includeInAll; private volatile CopyOnWriteHashMap mappers; - ObjectMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map mappers) { + ObjectMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map mappers) { super(name); this.fullPath = fullPath; this.enabled = enabled; this.nested = nested; this.dynamic = dynamic; - this.pathType = pathType; if (mappers == null) { this.mappers = new CopyOnWriteHashMap<>(); } else { @@ -380,10 +356,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, return this.enabled; } - public ContentPath.Type pathType() { - return pathType; - } - public Mapper getMapper(String field) { return mappers.get(field); } @@ -535,9 +507,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, if (enabled != Defaults.ENABLED) { builder.field("enabled", enabled); } - if (pathType != Defaults.PATH_TYPE) { - builder.field("path", pathType.name().toLowerCase(Locale.ROOT)); - } if (includeInAll != null) { builder.field("include_in_all", includeInAll); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java index c6c64e432b7..2fd4e914718 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java @@ -95,7 +95,7 @@ public class RootObjectMapper extends ObjectMapper { @Override - protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map mappers, @Nullable Settings settings) { + protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map mappers, @Nullable Settings settings) { assert !nested.isNested(); FormatDateTimeFormatter[] dates = null; if (dynamicDateTimeFormatters == null) { @@ -106,7 +106,7 @@ public class RootObjectMapper extends ObjectMapper { } else { dates = dynamicDateTimeFormatters.toArray(new FormatDateTimeFormatter[dynamicDateTimeFormatters.size()]); } - return new RootObjectMapper(name, enabled, dynamic, pathType, mappers, + return new RootObjectMapper(name, enabled, dynamic, mappers, dates, dynamicTemplates.toArray(new DynamicTemplate[dynamicTemplates.size()]), dateDetection, numericDetection); @@ -196,9 +196,9 @@ public class RootObjectMapper extends ObjectMapper { private volatile DynamicTemplate dynamicTemplates[]; - RootObjectMapper(String name, boolean enabled, Dynamic dynamic, ContentPath.Type pathType, Map mappers, + RootObjectMapper(String name, boolean enabled, Dynamic dynamic, Map mappers, FormatDateTimeFormatter[] dynamicDateTimeFormatters, DynamicTemplate dynamicTemplates[], boolean dateDetection, boolean numericDetection) { - super(name, name, enabled, Nested.NO, dynamic, pathType, mappers); + super(name, name, enabled, Nested.NO, dynamic, mappers); this.dynamicTemplates = dynamicTemplates; this.dynamicDateTimeFormatters = dynamicDateTimeFormatters; this.dateDetection = dateDetection; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java index d07bf21a4be..e7b7c26b84f 100755 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java @@ -95,9 +95,6 @@ public class ExternalMapper extends FieldMapper { @Override public ExternalMapper build(BuilderContext context) { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(ContentPath.Type.FULL); - context.path().add(name); BinaryFieldMapper binMapper = binBuilder.build(context); BooleanFieldMapper boolMapper = boolBuilder.build(context); @@ -107,7 +104,6 @@ public class ExternalMapper extends FieldMapper { FieldMapper stringMapper = (FieldMapper)stringBuilder.build(context); context.path().remove(); - context.path().pathType(origPathType); setupFieldType(context); return new ExternalMapper(name, fieldType, generatedValue, mapperName, binMapper, boolMapper, pointMapper, shapeMapper, stringMapper, diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java index 0ba636db72b..66ecbe0850b 100644 --- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java +++ b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java @@ -37,7 +37,6 @@ import java.util.*; import static org.elasticsearch.index.mapper.MapperBuilders.*; import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; -import static org.elasticsearch.index.mapper.core.TypeParsers.parsePathType; /** *

@@ -65,7 +64,6 @@ public class AttachmentMapper extends FieldMapper {
     public static final String CONTENT_TYPE = "attachment";
 
     public static class Defaults {
-        public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL;
 
         public static final AttachmentFieldType FIELD_TYPE = new AttachmentFieldType();
         static {
@@ -108,8 +106,6 @@ public class AttachmentMapper extends FieldMapper {
 
     public static class Builder extends FieldMapper.Builder {
 
-        private ContentPath.Type pathType = Defaults.PATH_TYPE;
-
         private Boolean ignoreErrors = null;
 
         private Integer defaultIndexedChars = null;
@@ -140,11 +136,6 @@ public class AttachmentMapper extends FieldMapper {
             this.contentBuilder = stringField(FieldNames.CONTENT);
         }
 
-        public Builder pathType(ContentPath.Type pathType) {
-            this.pathType = pathType;
-            return this;
-        }
-
         public Builder content(Mapper.Builder content) {
             this.contentBuilder = content;
             return this;
@@ -192,8 +183,6 @@ public class AttachmentMapper extends FieldMapper {
 
         @Override
         public AttachmentMapper build(BuilderContext context) {
-            ContentPath.Type origPathType = context.path().pathType();
-            context.path().pathType(pathType);
 
             FieldMapper contentMapper;
             if (context.indexCreatedVersion().before(Version.V_2_0_0_beta1)) {
@@ -220,8 +209,6 @@ public class AttachmentMapper extends FieldMapper {
             FieldMapper language = (FieldMapper) languageBuilder.build(context);
             context.path().remove();
 
-            context.path().pathType(origPathType);
-
             if (defaultIndexedChars == null && context.indexSettings() != null) {
                 defaultIndexedChars = context.indexSettings().getAsInt("index.mapping.attachment.indexed_chars", 100000);
             }
@@ -257,7 +244,7 @@ public class AttachmentMapper extends FieldMapper {
 
             defaultFieldType.freeze();
             this.setupFieldType(context);
-            return new AttachmentMapper(name, fieldType, defaultFieldType, pathType, defaultIndexedChars, ignoreErrors, langDetect, contentMapper,
+            return new AttachmentMapper(name, fieldType, defaultFieldType, defaultIndexedChars, ignoreErrors, langDetect, contentMapper,
                     dateMapper, titleMapper, nameMapper, authorMapper, keywordsMapper, contentTypeMapper, contentLength,
                     language, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
         }
@@ -309,10 +296,7 @@ public class AttachmentMapper extends FieldMapper {
                 Map.Entry entry = iterator.next();
                 String fieldName = entry.getKey();
                 Object fieldNode = entry.getValue();
-                if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
-                    builder.pathType(parsePathType(name, fieldNode.toString()));
-                    iterator.remove();
-                } else if (fieldName.equals("fields")) {
+                if (fieldName.equals("fields")) {
                     Map fieldsNode = (Map) fieldNode;
                     for (Iterator> fieldsIterator = fieldsNode.entrySet().iterator(); fieldsIterator.hasNext();) {
                         Map.Entry entry1 = fieldsIterator.next();
@@ -375,8 +359,6 @@ public class AttachmentMapper extends FieldMapper {
         }
     }
 
-    private final ContentPath.Type pathType;
-
     private final int defaultIndexedChars;
 
     private final boolean ignoreErrors;
@@ -401,13 +383,12 @@ public class AttachmentMapper extends FieldMapper {
 
     private final FieldMapper languageMapper;
 
-    public AttachmentMapper(String simpleName, MappedFieldType type, MappedFieldType defaultFieldType, ContentPath.Type pathType, int defaultIndexedChars, Boolean ignoreErrors,
+    public AttachmentMapper(String simpleName, MappedFieldType type, MappedFieldType defaultFieldType, int defaultIndexedChars, Boolean ignoreErrors,
                             Boolean defaultLangDetect, FieldMapper contentMapper,
                             FieldMapper dateMapper, FieldMapper titleMapper, FieldMapper nameMapper, FieldMapper authorMapper,
                             FieldMapper keywordsMapper, FieldMapper contentTypeMapper, FieldMapper contentLengthMapper,
                             FieldMapper languageMapper, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
         super(simpleName, type, defaultFieldType, indexSettings, multiFields, copyTo);
-        this.pathType = pathType;
         this.defaultIndexedChars = defaultIndexedChars;
         this.ignoreErrors = ignoreErrors;
         this.defaultLangDetect = defaultLangDetect;
@@ -626,9 +607,6 @@ public class AttachmentMapper extends FieldMapper {
     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
         builder.startObject(simpleName());
         builder.field("type", CONTENT_TYPE);
-        if (indexCreatedBefore2x) {
-            builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
-        }
 
         builder.startObject("fields");
         contentMapper.toXContent(builder, params);

From 0ce88b5887324b74969a87306a43780b732ee28f Mon Sep 17 00:00:00 2001
From: umeku 
Date: Tue, 15 Dec 2015 13:44:58 +0200
Subject: [PATCH 067/167] Fix inaccurate docs for nested datatype

Closes #15436
---
 docs/reference/mapping/types/nested.asciidoc | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc
index cca87853d83..b4bb06e236c 100644
--- a/docs/reference/mapping/types/nested.asciidoc
+++ b/docs/reference/mapping/types/nested.asciidoc
@@ -110,7 +110,7 @@ GET my_index/_search
         "bool": {
           "must": [
             { "match": { "user.first": "Alice" }},
-            { "match": { "user.last":  "White" }} <2>
+            { "match": { "user.last":  "Smith" }} <2>
           ]
         }
       }
@@ -127,7 +127,7 @@ GET my_index/_search
         "bool": {
           "must": [
             { "match": { "user.first": "Alice" }},
-            { "match": { "user.last":  "Smith" }} <3>
+            { "match": { "user.last":  "White" }} <3>
           ]
         }
       },
@@ -137,14 +137,14 @@ GET my_index/_search
             "user.first": {}
           }
         }
-      }
+
     }
   }
 }
 --------------------------------------------------
 // AUTOSENSE
 <1> The `user` field is mapped as type `nested` instead of type `object`.
-<2> This query doesn't match because `Alice` and `White` are not in the same nested object.
+<2> This query doesn't match because `Alice` and `Smith` are not in the same nested object.
 <3> This query matches because `Alice` and `White` are in the same nested object.
 <4> `inner_hits` allow us to highlight the matching nested documents.
 

From 07044e02b99a8453b099ca690337e19a771f232f Mon Sep 17 00:00:00 2001
From: Boaz Leskes 
Date: Tue, 15 Dec 2015 15:25:17 +0100
Subject: [PATCH 068/167] IndexService: remove unneed inject annotation from

---
 core/src/main/java/org/elasticsearch/index/IndexService.java | 1 -
 1 file changed, 1 deletion(-)

diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java
index 92ca00231b5..829d85f3a96 100644
--- a/core/src/main/java/org/elasticsearch/index/IndexService.java
+++ b/core/src/main/java/org/elasticsearch/index/IndexService.java
@@ -93,7 +93,6 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
     private final AtomicBoolean deleted = new AtomicBoolean(false);
     private final IndexSettings indexSettings;
 
-    @Inject
     public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv,
                         SimilarityService similarityService,
                         ShardStoreDeleter shardStoreDeleter,

From 210657a453290f8eb0210822517bab86cb103592 Mon Sep 17 00:00:00 2001
From: Costin Leau 
Date: Tue, 15 Dec 2015 16:44:27 +0200
Subject: [PATCH 069/167] [DOC] escape # in programlisting

---
 docs/plugins/repository-hdfs.asciidoc | 22 ++++++++++++----------
 1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc
index 6a90859c0ef..114dbf13035 100644
--- a/docs/plugins/repository-hdfs.asciidoc
+++ b/docs/plugins/repository-hdfs.asciidoc
@@ -88,24 +88,26 @@ by raising an issue. Thank you!
 Once installed, define the configuration for the `hdfs` repository through `elasticsearch.yml` or the
 {ref}/modules-snapshots.html[REST API]:
 
-[source]
+[source,yaml]
 ----
 repositories
   hdfs:
-    uri: "hdfs://:/"    # optional - Hadoop file-system URI
-    path: "some/path"               # required - path with the file-system where data is stored/loaded
-    load_defaults: "true"           # optional - whether to load the default Hadoop configuration (default) or not
-    conf_location: "extra-cfg.xml"  # optional - Hadoop configuration XML to be loaded (use commas for multi values)
-    conf. : ""          # optional - 'inlined' key=value added to the Hadoop configuration
-    concurrent_streams: 5           # optional - the number of concurrent streams (defaults to 5)
-    compress: "false"               # optional - whether to compress the metadata or not (default)
-    chunk_size: "10mb"              # optional - chunk size (disabled by default)
+    uri: "hdfs://:/"    \# optional - Hadoop file-system URI
+    path: "some/path"               \# required - path with the file-system where data is stored/loaded
+    load_defaults: "true"           \# optional - whether to load the default Hadoop configuration (default) or not
+    conf_location: "extra-cfg.xml"  \# optional - Hadoop configuration XML to be loaded (use commas for multi values)
+    conf. : ""          \# optional - 'inlined' key=value added to the Hadoop configuration
+    concurrent_streams: 5           \# optional - the number of concurrent streams (defaults to 5)
+    compress: "false"               \# optional - whether to compress the metadata or not (default)
+    chunk_size: "10mb"              \# optional - chunk size (disabled by default)
+    
 ----
 
 NOTE: Be careful when including a paths within the `uri` setting; Some implementations ignore them completely while
 others consider them. In general, we recommend keeping the `uri` to a minimum and using the `path` element instead.
 
-===== Plugging other file-systems
+[[repository-hdfs-other-fs]]
+==== Plugging other file-systems
 
 Any HDFS-compatible file-systems (like Amazon `s3://` or Google `gs://`) can be used as long as the proper Hadoop
 configuration is passed to the Elasticsearch plugin. In practice, this means making sure the correct Hadoop configuration

From 4f445688dd59c01f61cfca64e3094e7bbd097cb4 Mon Sep 17 00:00:00 2001
From: Simon Willnauer 
Date: Tue, 15 Dec 2015 16:20:58 +0100
Subject: [PATCH 070/167] apply feedback from @bleskes

---
 .../ClusterRebalanceAllocationDecider.java    |  4 +--
 .../ConcurrentRebalanceAllocationDecider.java |  4 +--
 .../decider/DiskThresholdDecider.java         |  2 +-
 .../decider/ThrottlingAllocationDecider.java  |  5 +--
 .../service/InternalClusterService.java       |  4 +--
 .../settings/AbstractScopedSettings.java      |  1 +
 .../common/settings/ClusterSettings.java      |  5 ++-
 .../common/settings/Setting.java              | 36 +++++++++++++------
 .../HierarchyCircuitBreakerService.java       | 15 ++++----
 .../java/org/elasticsearch/node/Node.java     |  2 +-
 .../elasticsearch/threadpool/ThreadPool.java  |  2 +-
 .../common/settings/SettingTests.java         |  2 +-
 .../UpdateThreadPoolSettingsTests.java        | 12 +++----
 .../NettySizeHeaderFrameDecoderTests.java     |  2 +-
 14 files changed, 56 insertions(+), 40 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
index 0e5e744d274..b1be2a6fce4 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
@@ -79,7 +79,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
         }
     }
 
-    private ClusterRebalanceType type;
+    private volatile ClusterRebalanceType type;
 
     @Inject
     public ClusterRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
@@ -95,7 +95,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
         clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType);
     }
 
-    public void setType(ClusterRebalanceType type) {
+    private void setType(ClusterRebalanceType type) {
         this.type = type;
     }
 
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java
index 21023400e32..504ea5da215 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java
@@ -42,7 +42,7 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider {
 
     public static final String NAME = "concurrent_rebalance";
 
-    public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, true, Setting.Scope.CLUSTER);
+    public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, 0, true, Setting.Scope.CLUSTER);
     private volatile int clusterConcurrentRebalance;
 
     @Inject
@@ -53,7 +53,7 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider {
         clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, this::setClusterConcurrentRebalance);
     }
 
-    public void setClusterConcurrentRebalance(int concurrentRebalance) {
+    private void setClusterConcurrentRebalance(int concurrentRebalance) {
         clusterConcurrentRebalance = concurrentRebalance;
     }
 
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
index 49cf5abadd4..400ed70f808 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
@@ -550,7 +550,7 @@ public class DiskThresholdDecider extends AllocationDecider {
 
     /**
      * Checks if a watermark string is a valid percentage or byte size value,
-     * returning true if valid, false if invalid.
+     * @return the watermark value given
      */
     public static String validWatermarkSetting(String watermark, String settingName) {
         try {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
index 9e3d96b4e18..b97e6138674 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
@@ -19,6 +19,7 @@
 
 package org.elasticsearch.cluster.routing.allocation.decider;
 
+import org.elasticsearch.ElasticsearchParseException;
 import org.elasticsearch.cluster.routing.RoutingNode;
 import org.elasticsearch.cluster.routing.ShardRouting;
 import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
@@ -52,8 +53,8 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
     public static final String NAME = "throttling";
     public static final String CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES = "cluster.routing.allocation.concurrent_recoveries";
 
-    public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, true, Setting.Scope.CLUSTER);
-    public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", (s) -> s.get(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES,Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)), Integer::parseInt, true, Setting.Scope.CLUSTER);
+    public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0, true, Setting.Scope.CLUSTER);
+    public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", (s) -> s.get(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES,Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), true, Setting.Scope.CLUSTER);
 
     private volatile int primariesInitialRecoveries;
     private volatile int concurrentRecoveries;
diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java
index 09f15994848..402df7251db 100644
--- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java
@@ -64,7 +64,7 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF
 public class InternalClusterService extends AbstractLifecycleComponent implements ClusterService {
 
     public static final Setting CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
-    public static final String SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL = "cluster.service.reconnect_interval";
+    public static final Setting CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.service.reconnect_interval",  TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER);
 
     public static final String UPDATE_THREAD_NAME = "clusterService#updateTask";
     private final ThreadPool threadPool;
@@ -123,7 +123,7 @@ public class InternalClusterService extends AbstractLifecycleComponent extends ToXContentToBytes {
         try {
             return parser.apply(value);
         } catch (ElasticsearchParseException ex) {
-            throw ex;
+            throw new IllegalArgumentException(ex.getMessage(), ex);
         } catch (Exception t) {
             throw new IllegalArgumentException("Failed to parse value [" + value + "] for setting [" + getKey() + "]", t);
         }
@@ -146,6 +144,7 @@ public class Setting extends ToXContentToBytes {
         builder.field("key", key);
         builder.field("type", scope.name());
         builder.field("dynamic", dynamic);
+        builder.field("is_group_setting", isGroupSetting());
         builder.field("default", defaultValue.apply(Settings.EMPTY));
         builder.endObject();
         return builder;
@@ -163,9 +162,9 @@ public class Setting extends ToXContentToBytes {
         return newUpdater(consumer, logger, (s) -> {});
     }
 
-    AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Consumer accept) {
+    AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Consumer validator) {
         if (isDynamic()) {
-            return new Updater(consumer, logger, accept);
+            return new Updater(consumer, logger, validator);
         } else {
             throw new IllegalStateException("setting [" + getKey() + "] is not dynamic");
         }
@@ -222,6 +221,9 @@ public class Setting extends ToXContentToBytes {
         public boolean hasChanged(Settings current, Settings previous) {
             final String newValue = getRaw(current);
             final String value = getRaw(previous);
+            assert isGroupSetting() == false : "group settings must override this method";
+            assert value != null : "value was null but can't be unless default is null which is invalid";
+
             return value.equals(newValue) == false;
         }
 
@@ -258,14 +260,26 @@ public class Setting extends ToXContentToBytes {
         return new Setting<>(key, (s) -> Float.toString(defaultValue), (s) -> {
             float value = Float.parseFloat(s);
             if (value < minValue) {
-                throw new ElasticsearchParseException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
+                throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
             }
             return value;
         }, dynamic, scope);
     }
 
+    public static Setting intSetting(String key, int defaultValue, int minValue, boolean dynamic, Scope scope) {
+        return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), dynamic, scope);
+    }
+
+    public static int parseInt(String s, int minValue, String key) {
+        int value = Integer.parseInt(s);
+        if (value < minValue) {
+            throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
+        }
+        return value;
+    }
+
     public static Setting intSetting(String key, int defaultValue, boolean dynamic, Scope scope) {
-        return new Setting<>(key, (s) -> Integer.toString(defaultValue), Integer::parseInt, dynamic, scope);
+        return intSetting(key, defaultValue, Integer.MIN_VALUE, dynamic, scope);
     }
 
     public static Setting boolSetting(String key, boolean defaultValue, boolean dynamic, Scope scope) {
@@ -306,7 +320,7 @@ public class Setting extends ToXContentToBytes {
             }
 
             @Override
-            public AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Consumer accept) {
+            public AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Consumer validator) {
                 if (isDynamic() == false) {
                     throw new IllegalStateException("setting [" + getKey() + "] is not dynamic");
                 }
@@ -325,7 +339,7 @@ public class Setting extends ToXContentToBytes {
                         Settings currentSettings = get(current);
                         Settings previousSettings = get(previous);
                         try {
-                            accept.accept(currentSettings);
+                            validator.accept(currentSettings);
                         } catch (Exception | AssertionError e) {
                             throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + previousSettings.getAsMap() + "] to [" + currentSettings.getAsMap() + "]", e);
                         }
@@ -350,7 +364,7 @@ public class Setting extends ToXContentToBytes {
         return new Setting<>(key, defaultValue, (s) -> {
             TimeValue timeValue = TimeValue.parseTimeValue(s, null, key);
             if (timeValue.millis() < minValue.millis()) {
-                throw new ElasticsearchParseException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
+                throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
             }
             return timeValue;
         }, dynamic, scope);
@@ -368,7 +382,7 @@ public class Setting extends ToXContentToBytes {
         return new Setting<>(key, (s) -> Double.toString(defaultValue), (s) -> {
             final double d = Double.parseDouble(s);
             if (d < minValue) {
-                throw new ElasticsearchParseException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
+                throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
             }
             return d;
         }, dynamic, scope);
diff --git a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java
index 4c2a2ced09e..0e1532bc6b3 100644
--- a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java
+++ b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java
@@ -50,13 +50,13 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
 
     public static final Setting FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", true, Setting.Scope.CLUSTER);
     public static final Setting FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.fielddata.overhead", 1.03d, 0.0d, true, Setting.Scope.CLUSTER);
-    public static final String FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = "indices.breaker.fielddata.type";
+    public static final Setting FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING =  new Setting<>("indices.breaker.fielddata.type", "memory", CircuitBreaker.Type::parseValue, false, Setting.Scope.CLUSTER);
 
     public static final Setting REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.request.limit", "40%", true, Setting.Scope.CLUSTER);
     public static final Setting REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.request.overhead", 1.0d, 0.0d, true, Setting.Scope.CLUSTER);
-    public static final String REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = "indices.breaker.request.type";
+    public static final Setting REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>("indices.breaker.request.type", "memory", CircuitBreaker.Type::parseValue, false, Setting.Scope.CLUSTER);
+
 
-    public static final String DEFAULT_BREAKER_TYPE = "memory";
 
     private volatile BreakerSettings parentSettings;
     private volatile BreakerSettings fielddataSettings;
@@ -71,13 +71,13 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
         this.fielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA,
                 FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(),
                 FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings),
-                CircuitBreaker.Type.parseValue(settings.get(FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, DEFAULT_BREAKER_TYPE))
+                FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.get(settings)
         );
 
         this.requestSettings = new BreakerSettings(CircuitBreaker.REQUEST,
                 REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(),
                 REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings),
-                CircuitBreaker.Type.parseValue(settings.get(REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, DEFAULT_BREAKER_TYPE))
+                REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.get(settings)
         );
 
         this.parentSettings = new BreakerSettings(CircuitBreaker.PARENT, TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(), 1.0, CircuitBreaker.Type.PARENT);
@@ -93,10 +93,7 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
         clusterSettings.addSettingsUpdateConsumer(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, this::setRequestBreakerLimit);
     }
     private void setRequestBreakerLimit(ByteSizeValue newRequestMax, Double newRequestOverhead) {
-        long newRequestLimitBytes = newRequestMax == null ? HierarchyCircuitBreakerService.this.requestSettings.getLimit() : newRequestMax.bytes();
-        newRequestOverhead = newRequestOverhead == null ? HierarchyCircuitBreakerService.this.requestSettings.getOverhead() : newRequestOverhead;
-
-        BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestLimitBytes, newRequestOverhead,
+        BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestMax.bytes(), newRequestOverhead,
                 HierarchyCircuitBreakerService.this.requestSettings.getType());
         registerBreaker(newRequestSettings);
         HierarchyCircuitBreakerService.this.requestSettings = newRequestSettings;
diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java
index 3460539f5b8..a28e532d399 100644
--- a/core/src/main/java/org/elasticsearch/node/Node.java
+++ b/core/src/main/java/org/elasticsearch/node/Node.java
@@ -202,7 +202,7 @@ public class Node implements Releasable {
             injector = modules.createInjector();
 
             client = injector.getInstance(Client.class);
-            threadPool.setNodeSettingsService(injector.getInstance(ClusterSettings.class));
+            threadPool.setClusterSettings(injector.getInstance(ClusterSettings.class));
             success = true;
         } catch (IOException ex) {
             throw new ElasticsearchException("failed to bind service", ex);
diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
index 935fa2231cb..56e02926ed6 100644
--- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
+++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
@@ -250,7 +250,7 @@ public class ThreadPool extends AbstractComponent {
         this.estimatedTimeThread.start();
     }
 
-    public void setNodeSettingsService(ClusterSettings clusterSettings) {
+    public void setClusterSettings(ClusterSettings clusterSettings) {
         if(settingsListenerIsSet.compareAndSet(false, true)) {
             clusterSettings.addSettingsUpdateConsumer(THREADPOOL_GROUP_SETTING, this::updateSettings, (s) -> validate(s.getAsGroups()));
         } else {
diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java
index d8ac616eccb..1640cfdd3b5 100644
--- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java
+++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java
@@ -47,7 +47,7 @@ public class SettingTests extends ESTestCase {
         try {
             settingUpdater.apply(Settings.builder().put("a.byte.size", 12).build(), Settings.EMPTY);
             fail("no unit");
-        } catch (ElasticsearchParseException ex) {
+        } catch (IllegalArgumentException ex) {
             assertEquals("failed to parse setting [a.byte.size] with value [12] as a size in bytes: unit is missing or unrecognized", ex.getMessage());
         }
 
diff --git a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java
index 07f34071442..56b2a03bad1 100644
--- a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java
+++ b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java
@@ -92,7 +92,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
         try {
             threadPool = new ThreadPool(settingsBuilder().put("name", "testUpdateSettingsCanNotChangeThreadPoolType").build());
             ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
-            threadPool.setNodeSettingsService(clusterSettings);
+            threadPool.setClusterSettings(clusterSettings);
 
             clusterSettings.applySettings(
                     settingsBuilder()
@@ -118,7 +118,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
                     .put("name", "testCachedExecutorType").build();
             threadPool = new ThreadPool(nodeSettings);
             ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
-            threadPool.setNodeSettingsService(clusterSettings);
+            threadPool.setClusterSettings(clusterSettings);
 
             assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED);
             assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
@@ -169,7 +169,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
                     .put("name", "testFixedExecutorType").build();
             threadPool = new ThreadPool(nodeSettings);
             ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
-            threadPool.setNodeSettingsService(clusterSettings);
+            threadPool.setClusterSettings(clusterSettings);
             assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
             Settings settings = clusterSettings.applySettings(settingsBuilder()
                     .put("threadpool." + threadPoolName + ".size", "15")
@@ -224,7 +224,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
                     .put("name", "testScalingExecutorType").build();
             threadPool = new ThreadPool(nodeSettings);
             ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
-            threadPool.setNodeSettingsService(clusterSettings);
+            threadPool.setClusterSettings(clusterSettings);
             assertThat(info(threadPool, threadPoolName).getMin(), equalTo(1));
             assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10));
             assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(5L));
@@ -262,7 +262,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
                     .put("name", "testCachedExecutorType").build();
             threadPool = new ThreadPool(nodeSettings);
             ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
-            threadPool.setNodeSettingsService(clusterSettings);
+            threadPool.setClusterSettings(clusterSettings);
             assertEquals(info(threadPool, threadPoolName).getQueueSize().getSingles(), 1000L);
 
             final CountDownLatch latch = new CountDownLatch(1);
@@ -299,7 +299,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase {
                     .put("name", "testCustomThreadPool").build();
             threadPool = new ThreadPool(nodeSettings);
             ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
-            threadPool.setNodeSettingsService(clusterSettings);
+            threadPool.setClusterSettings(clusterSettings);
             ThreadPoolInfo groups = threadPool.info();
             boolean foundPool1 = false;
             boolean foundPool2 = false;
diff --git a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java
index 02fffda722a..7a3fd88f93b 100644
--- a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java
+++ b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java
@@ -64,7 +64,7 @@ public class NettySizeHeaderFrameDecoderTests extends ESTestCase {
     @Before
     public void startThreadPool() {
         threadPool = new ThreadPool(settings);
-        threadPool.setNodeSettingsService(new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
+        threadPool.setClusterSettings(new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
         NetworkService networkService = new NetworkService(settings);
         BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService());
         nettyTransport = new NettyTransport(settings, threadPool, networkService, bigArrays, Version.CURRENT, new NamedWriteableRegistry());

From dbd7b5abbc2584811d5cc011b1f7052c86ba7f5b Mon Sep 17 00:00:00 2001
From: Simon Willnauer 
Date: Tue, 15 Dec 2015 16:39:48 +0100
Subject: [PATCH 071/167] fix several tests

---
 .../org/elasticsearch/common/settings/ClusterSettings.java    | 4 ----
 .../main/java/org/elasticsearch/common/settings/Setting.java  | 2 ++
 .../org/elasticsearch/cluster/settings/ClusterSettingsIT.java | 4 ++--
 3 files changed, 4 insertions(+), 6 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
index 89cdd045170..4e922ca28e3 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
@@ -96,10 +96,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
         IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
         MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
         MetaData.SETTING_READ_ONLY_SETTING,
-        RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING,
-        RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS_SETTING,
-        RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE_SETTING,
-        RecoverySettings.INDICES_RECOVERY_COMPRESS_SETTING,
         RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING,
         RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING,
         RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java
index b15634b35e3..ba9573e0bbf 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java
@@ -116,6 +116,8 @@ public class Setting extends ToXContentToBytes {
             return parser.apply(value);
         } catch (ElasticsearchParseException ex) {
             throw new IllegalArgumentException(ex.getMessage(), ex);
+        } catch (IllegalArgumentException ex) {
+            throw ex;
         } catch (Exception t) {
             throw new IllegalArgumentException("Failed to parse value [" + value + "] for setting [" + getKey() + "]", t);
         }
diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java
index 3a028fe54b0..5dccbb546ce 100644
--- a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java
+++ b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java
@@ -256,7 +256,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
                     .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "whatever").build())
                     .get();
             fail("bogus value");
-        } catch (ElasticsearchParseException ex) {
+        } catch (IllegalArgumentException ex) {
             assertEquals(ex.getMessage(), "Failed to parse setting [discovery.zen.commit_timeout] with value [whatever] as a time value: unit is missing or unrecognized");
         }
 
@@ -268,7 +268,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
                     .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), -1).build())
                     .get();
             fail("bogus value");
-        } catch (ElasticsearchParseException ex) {
+        } catch (IllegalArgumentException ex) {
             assertEquals(ex.getMessage(), "Failed to parse value [-1] for setting [discovery.zen.publish_timeout] must be >= 0s");
         }
 

From 91b55ded6ace919af6f372fa93d5930d7a14b58b Mon Sep 17 00:00:00 2001
From: Lee Hinman 
Date: Tue, 15 Dec 2015 08:48:43 -0700
Subject: [PATCH 072/167] Trace log REST test headers

---
 .../elasticsearch/test/rest/client/http/HttpRequestBuilder.java  | 1 +
 1 file changed, 1 insertion(+)

diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java b/test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
index 34665efa0f1..f6ce416dbff 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
+++ b/test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
@@ -168,6 +168,7 @@ public class HttpRequestBuilder {
             logger.trace("sending request \n{}", stringBuilder.toString());
         }
         for (Map.Entry entry : this.headers.entrySet()) {
+            logger.trace("adding header [{} => {}]", entry.getKey(), entry.getValue());
             httpUriRequest.addHeader(entry.getKey(), entry.getValue());
         }
         try (CloseableHttpResponse closeableHttpResponse = httpClient.execute(httpUriRequest)) {

From 6a7fc8fb321de87dbf0efda8ba5d50c8ad206471 Mon Sep 17 00:00:00 2001
From: Boaz Leskes 
Date: Tue, 15 Dec 2015 17:10:32 +0100
Subject: [PATCH 073/167] IndexService: format and remove unneeded import

---
 .../org/elasticsearch/index/IndexService.java | 34 ++++++++++++-------
 1 file changed, 22 insertions(+), 12 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java
index 829d85f3a96..a6b66742c55 100644
--- a/core/src/main/java/org/elasticsearch/index/IndexService.java
+++ b/core/src/main/java/org/elasticsearch/index/IndexService.java
@@ -29,7 +29,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.cluster.routing.ShardRouting;
 import org.elasticsearch.common.Nullable;
 import org.elasticsearch.common.collect.ImmutableOpenMap;
-import org.elasticsearch.common.inject.Inject;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.xcontent.XContentFactory;
 import org.elasticsearch.common.xcontent.XContentParser;
@@ -48,7 +47,13 @@ import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.MapperService;
 import org.elasticsearch.index.query.ParsedQuery;
 import org.elasticsearch.index.query.QueryShardContext;
-import org.elasticsearch.index.shard.*;
+import org.elasticsearch.index.shard.IndexEventListener;
+import org.elasticsearch.index.shard.IndexSearcherWrapper;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.ShadowIndexShard;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.ShardNotFoundException;
+import org.elasticsearch.index.shard.ShardPath;
 import org.elasticsearch.index.similarity.SimilarityService;
 import org.elasticsearch.index.store.IndexStore;
 import org.elasticsearch.index.store.Store;
@@ -73,7 +78,7 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
 /**
  *
  */
-public final class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable{
+public final class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable {
 
     private final IndexEventListener eventListener;
     private final AnalysisService analysisService;
@@ -145,7 +150,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
      */
     @Nullable
     public IndexShard getShardOrNull(int shardId) {
-         return shards.get(shardId);
+        return shards.get(shardId);
     }
 
     /**
@@ -159,13 +164,17 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
         return indexShard;
     }
 
-    public Set shardIds() { return shards.keySet(); }
+    public Set shardIds() {
+        return shards.keySet();
+    }
 
     public IndexCache cache() {
         return indexCache;
     }
 
-    public IndexFieldDataService fieldData() { return indexFieldData; }
+    public IndexFieldDataService fieldData() {
+        return indexFieldData;
+    }
 
     public AnalysisService analysisService() {
         return this.analysisService;
@@ -206,7 +215,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
     private long getAvgShardSizeInBytes() throws IOException {
         long sum = 0;
         int count = 0;
-        for(IndexShard indexShard : this) {
+        for (IndexShard indexShard : this) {
             sum += indexShard.store().stats().sizeInBytes();
             count++;
         }
@@ -253,17 +262,17 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
                 // TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard
                 // that's being relocated/replicated we know how large it will become once it's done copying:
                 // Count up how many shards are currently on each data path:
-                Map dataPathToShardCount = new HashMap<>();
-                for(IndexShard shard : this) {
+                Map dataPathToShardCount = new HashMap<>();
+                for (IndexShard shard : this) {
                     Path dataPath = shard.shardPath().getRootStatePath();
                     Integer curCount = dataPathToShardCount.get(dataPath);
                     if (curCount == null) {
                         curCount = 0;
                     }
-                    dataPathToShardCount.put(dataPath, curCount+1);
+                    dataPathToShardCount.put(dataPath, curCount + 1);
                 }
                 path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(),
-                                                       dataPathToShardCount);
+                    dataPathToShardCount);
                 logger.debug("{} creating using a new path [{}]", shardId, path);
             } else {
                 logger.debug("{} creating using an existing path [{}]", shardId, path);
@@ -276,7 +285,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
             logger.debug("creating shard_id {}", shardId);
             // if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
             final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false ||
-                    (primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
+                (primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
             store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> nodeServicesProvider.getIndicesQueryCache().onClose(shardId)));
             if (useShadowEngine(primary, indexSettings)) {
                 indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider);
@@ -461,6 +470,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
             }
         }
     }
+
     /**
      * Returns the filter associated with listed filtering aliases.
      * 

From 82a64fd2f8c7e1ddc1b1643c826b323a38316f6d Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 15 Dec 2015 17:21:31 +0100 Subject: [PATCH 074/167] Fix SearchWhileRelocatingIT so that the type of the mapping matches the type of documents. --- .../main/java/org/elasticsearch/index/mapper/MapperService.java | 2 +- .../org/elasticsearch/search/basic/SearchWhileRelocatingIT.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 86610283d75..de35b4712ea 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -338,7 +338,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { for (FieldMapper fieldMapper : fieldMappers) { if (fullPathObjectMappers.containsKey(fieldMapper.name())) { - throw new IllegalArgumentException("Field [{}] is defined as a field in mapping [" + fieldMapper.name() + "] but this name is already used for an object in other types"); + throw new IllegalArgumentException("Field [" + fieldMapper.name() + "] is defined as a field in mapping [" + type + "] but this name is already used for an object in other types"); } } diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java index 69c4bbdbd11..1f421292371 100644 --- a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java +++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java @@ -61,7 +61,7 @@ public class SearchWhileRelocatingIT extends ESIntegTestCase { final int numShards = between(1, 20); client().admin().indices().prepareCreate("test") .setSettings(settingsBuilder().put("index.number_of_shards", numShards).put("index.number_of_replicas", numberOfReplicas)) - .addMapping("type1", "loc", "type=geo_point", "test", "type=string").execute().actionGet(); + .addMapping("type", "loc", "type=geo_point", "test", "type=string").execute().actionGet(); ensureGreen(); List indexBuilders = new ArrayList<>(); final int numDocs = between(10, 20); From 015ead0c458e9fe455c148cef068090f840b8c9b Mon Sep 17 00:00:00 2001 From: soinlv Date: Tue, 15 Dec 2015 16:00:55 -0500 Subject: [PATCH 075/167] Remove unused import Closes #15458 --- .../java/org/elasticsearch/index/store/IndexStoreConfig.java | 1 - 1 file changed, 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java index 1bd023abdb0..6ce709c4f62 100644 --- a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java +++ b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java @@ -22,7 +22,6 @@ import org.apache.lucene.store.StoreRateLimiting; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.node.settings.NodeSettingsService; From d24c83b6907c30f2bf79f153b3e4aa8c5819d654 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 15 Dec 2015 17:38:30 -0500 Subject: [PATCH 076/167] Cleanup o/e/c/r/RoutingNodes.java This commit applies a few trivial cleanups to o/e/c/r/RoutingNodes.java. --- .../cluster/routing/RoutingNodes.java | 34 ++++++++----------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 5d17a59339a..8dd980c8bb3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -31,7 +30,14 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.index.shard.ShardId; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.function.Predicate; /** @@ -78,7 +84,7 @@ public class RoutingNodes implements Iterable { Map> nodesToShards = new HashMap<>(); // fill in the nodeToShards with the "live" nodes for (ObjectCursor cursor : clusterState.nodes().dataNodes().values()) { - nodesToShards.put(cursor.value.id(), new ArrayList()); + nodesToShards.put(cursor.value.id(), new ArrayList<>()); } // fill in the inverse of node -> shards allocated @@ -91,21 +97,13 @@ public class RoutingNodes implements Iterable { // by the ShardId, as this is common for primary and replicas. // A replica Set might have one (and not more) replicas with the state of RELOCATING. if (shard.assignedToNode()) { - List entries = nodesToShards.get(shard.currentNodeId()); - if (entries == null) { - entries = new ArrayList<>(); - nodesToShards.put(shard.currentNodeId(), entries); - } + List entries = nodesToShards.computeIfAbsent(shard.currentNodeId(), k -> new ArrayList<>()); final ShardRouting sr = getRouting(shard, readOnly); entries.add(sr); assignedShardsAdd(sr); if (shard.relocating()) { - entries = nodesToShards.get(shard.relocatingNodeId()); relocatingShards++; - if (entries == null) { - entries = new ArrayList<>(); - nodesToShards.put(shard.relocatingNodeId(), entries); - } + entries = nodesToShards.computeIfAbsent(shard.relocatingNodeId(), k -> new ArrayList<>()); // add the counterpart shard with relocatingNodeId reflecting the source from which // it's relocating from. ShardRouting targetShardRouting = shard.buildTargetRelocatingShard(); @@ -121,7 +119,7 @@ public class RoutingNodes implements Iterable { inactiveShardCount++; } } else { - final ShardRouting sr = getRouting(shard, readOnly); + final ShardRouting sr = getRouting(shard, readOnly); assignedShardsAdd(sr); unassignedShards.add(sr); } @@ -449,12 +447,8 @@ public class RoutingNodes implements Iterable { // no unassigned return; } - List shards = assignedShards.get(shard.shardId()); - if (shards == null) { - shards = new ArrayList<>(); - assignedShards.put(shard.shardId(), shards); - } - assert assertInstanceNotInList(shard, shards); + List shards = assignedShards.computeIfAbsent(shard.shardId(), k -> new ArrayList<>()); + assert assertInstanceNotInList(shard, shards); shards.add(shard); } From c8d199bc83623a1fc232799b0faa8630df4769f4 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 15 Dec 2015 18:51:38 -0800 Subject: [PATCH 077/167] Organize and annotate gitignores --- .gitignore | 47 ++++++++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/.gitignore b/.gitignore index 31f2aa5fc66..b7d5d95a256 100644 --- a/.gitignore +++ b/.gitignore @@ -1,37 +1,42 @@ + +# intellij files .idea/ -.gradle/ *.iml *.ipr *.iws -work/ -/data/ -logs/ -.DS_Store -build/ -generated-resources/ -**/.local* -docs/html/ -docs/build.log -/tmp/ -backwards/ -html_docs -.vagrant/ -## eclipse ignores (use 'mvn eclipse:eclipse' to build eclipse projects) -## All files (.project, .classpath, .settings/*) should be generated through Maven which -## will correctly set the classpath based on the declared dependencies and write settings -## files to ensure common coding style across Eclipse and IDEA. +# eclipse files .project .classpath eclipse-build .settings -## netbeans ignores +# netbeans files nb-configuration.xml nbactions.xml -dependency-reduced-pom.xml +# gradle stuff +.gradle/ +build/ +generated-resources/ -# old patterns specific to maven +# maven stuff (to be removed when trunk becomes 4.x) *-execution-hints.log target/ +dependency-reduced-pom.xml + +# testing stuff +**/.local* +.vagrant/ + +# osx stuff +.DS_Store + +# random old stuff that we should look at the necessity of... +docs/html/ +docs/build.log +html_docs +/tmp/ +backwards/ + + From 36bd8450900072ef260c19a8293406cfe0f7ccab Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 15 Dec 2015 15:10:22 -0500 Subject: [PATCH 078/167] Reorganize o/e/c/a/s/ShardStateAction.java This commit is a trivial reorganization of o/e/c/a/s/ShardStateAction.java. The primary motive is have all of the shard failure handling grouped together, and all of the shard started handling grouped together. --- .../action/shard/ShardStateAction.java | 168 +++++++++--------- 1 file changed, 88 insertions(+), 80 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index d09df094a68..c2ac791aa16 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -20,7 +20,11 @@ package org.elasticsearch.cluster.action.shard; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingService; @@ -37,7 +41,14 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.EmptyTransportResponseHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; @@ -45,11 +56,8 @@ import java.util.List; import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry; -/** - * - */ -public class ShardStateAction extends AbstractComponent { +public class ShardStateAction extends AbstractComponent { public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started"; public static final String SHARD_FAILED_ACTION_NAME = "internal:cluster/shard/failure"; @@ -97,52 +105,26 @@ public class ShardStateAction extends AbstractComponent { options = TransportRequestOptions.builder().withTimeout(timeout).build(); } transportService.sendRequest(masterNode, - SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleResponse(TransportResponse.Empty response) { - listener.onSuccess(); - } + SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleResponse(TransportResponse.Empty response) { + listener.onSuccess(); + } - @Override - public void handleException(TransportException exp) { - logger.warn("failed to send failed shard to {}", exp, masterNode); - listener.onShardFailedFailure(masterNode, exp); - } - }); + @Override + public void handleException(TransportException exp) { + logger.warn("failed to send failed shard to {}", exp, masterNode); + listener.onShardFailedFailure(masterNode, exp); + } + }); } - public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) { - DiscoveryNode masterNode = clusterService.state().nodes().masterNode(); - if (masterNode == null) { - logger.warn("{} can't send shard started for {}, no master known.", shardRouting.shardId(), shardRouting); - return; + private class ShardFailedTransportHandler implements TransportRequestHandler { + @Override + public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { + handleShardFailureOnMaster(request); + channel.sendResponse(TransportResponse.Empty.INSTANCE); } - shardStarted(shardRouting, indexUUID, reason, masterNode); - } - - public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason, final DiscoveryNode masterNode) { - ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason, null); - logger.debug("{} sending shard started for {}", shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); - transportService.sendRequest(masterNode, - SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleException(TransportException exp) { - logger.warn("failed to send shard started to [{}]", exp, masterNode); - } - - }); - } - - private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler(); - - private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) { - logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); - clusterService.submitStateUpdateTask( - "shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", - shardRoutingEntry, - ClusterStateTaskConfig.build(Priority.HIGH), - shardFailedClusterStateHandler, - shardFailedClusterStateHandler); } class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor, ClusterStateTaskListener { @@ -168,10 +150,10 @@ public class ShardStateAction extends AbstractComponent { @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) { - logger.trace("unassigned shards after shard failures. scheduling a reroute."); - routingService.reroute("unassigned shards after shard failures, scheduling a reroute"); - } + if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) { + logger.trace("unassigned shards after shard failures. scheduling a reroute."); + routingService.reroute("unassigned shards after shard failures, scheduling a reroute"); + } } @Override @@ -180,18 +162,45 @@ public class ShardStateAction extends AbstractComponent { } } - private final ShardStartedClusterStateHandler shardStartedClusterStateHandler = - new ShardStartedClusterStateHandler(); - - private void shardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) { - logger.debug("received shard started for {}", shardRoutingEntry); + private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler(); + private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) { + logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); clusterService.submitStateUpdateTask( - "shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]", - shardRoutingEntry, - ClusterStateTaskConfig.build(Priority.URGENT), - shardStartedClusterStateHandler, - shardStartedClusterStateHandler); + "shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", + shardRoutingEntry, + ClusterStateTaskConfig.build(Priority.HIGH), + shardFailedClusterStateHandler, + shardFailedClusterStateHandler); + } + + public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) { + DiscoveryNode masterNode = clusterService.state().nodes().masterNode(); + if (masterNode == null) { + logger.warn("{} can't send shard started for {}, no master known.", shardRouting.shardId(), shardRouting); + return; + } + shardStarted(shardRouting, indexUUID, reason, masterNode); + } + + public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason, final DiscoveryNode masterNode) { + ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason, null); + logger.debug("{} sending shard started for {}", shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); + transportService.sendRequest(masterNode, + SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleException(TransportException exp) { + logger.warn("failed to send shard started to [{}]", exp, masterNode); + } + }); + } + + class ShardStartedTransportHandler implements TransportRequestHandler { + @Override + public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { + handleShardStartedOnMaster(request); + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } } class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor, ClusterStateTaskListener { @@ -223,26 +232,20 @@ public class ShardStateAction extends AbstractComponent { } } - private class ShardFailedTransportHandler implements TransportRequestHandler { + private final ShardStartedClusterStateHandler shardStartedClusterStateHandler = new ShardStartedClusterStateHandler(); - @Override - public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { - handleShardFailureOnMaster(request); - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - } + private void handleShardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) { + logger.debug("received shard started for {}", shardRoutingEntry); - class ShardStartedTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { - shardStartedOnMaster(request); - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } + clusterService.submitStateUpdateTask( + "shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]", + shardRoutingEntry, + ClusterStateTaskConfig.build(Priority.URGENT), + shardStartedClusterStateHandler, + shardStartedClusterStateHandler); } public static class ShardRoutingEntry extends TransportRequest { - ShardRouting shardRouting; String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE; String message; @@ -283,8 +286,13 @@ public class ShardStateAction extends AbstractComponent { } public interface Listener { - default void onSuccess() {} - default void onShardFailedNoMaster() {} - default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) {} + default void onSuccess() { + } + + default void onShardFailedNoMaster() { + } + + default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) { + } } } From 082632dcacc3baa18971af23dcb91d310eba92c7 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 15 Dec 2015 21:20:56 +0100 Subject: [PATCH 079/167] aggs: fixed bug in children agg that prevented all child docs from being evaluated Before we only evaluated segments that yielded matches in parent aggs, which caused us to miss to evaluate child docs in segments we didn't have parent matches for. The fix for this is stop remember in what segments we have matches for and simply evaluate all segments. This makes the code simpler and we can still quickly see if a segment doesn't hold child docs like we did before. --- .../children/ParentToChildrenAggregator.java | 16 +---- .../aggregations/bucket/ChildrenIT.java | 62 +++++++++++++++++++ 2 files changed, 65 insertions(+), 13 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java index 6d9a1edc712..0678338fcf7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket.children; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.search.*; @@ -64,9 +65,6 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { private final LongObjectPagedHashMap parentOrdToOtherBuckets; private boolean multipleBucketsPerParentOrd = false; - // This needs to be a Set to avoid duplicate reader context entries via (#setNextReader(...), it can get invoked multiple times with the same reader context) - private Set replay = new LinkedHashSet<>(); - public ParentToChildrenAggregator(String name, AggregatorFactories factories, AggregationContext aggregationContext, Aggregator parent, String parentType, Query childFilter, Query parentFilter, ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource, @@ -99,17 +97,11 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } - if (replay == null) { - throw new IllegalStateException(); - } final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); assert globalOrdinals != null; Scorer parentScorer = parentFilter.scorer(ctx); final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), parentScorer); - if (childFilter.scorer(ctx) != null) { - replay.add(ctx); - } return new LeafBucketCollector() { @Override @@ -138,10 +130,8 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { @Override protected void doPostCollection() throws IOException { - final Set replay = this.replay; - this.replay = null; - - for (LeafReaderContext ctx : replay) { + IndexReader indexReader = context().searchContext().searcher().getIndexReader(); + for (LeafReaderContext ctx : indexReader.leaves()) { DocIdSetIterator childDocsIter = childFilter.scorer(ctx); if (childDocsIter == null) { continue; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java index b6611a956af..540420c21bc 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java @@ -18,12 +18,15 @@ */ package org.elasticsearch.search.aggregations.bucket; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.children.Children; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.sum.Sum; @@ -392,6 +395,65 @@ public class ChildrenIT extends ESIntegTestCase { assertThat(terms.getBuckets().get(0).getDocCount(), equalTo(1l)); } + public void testPostCollectAllLeafReaders() throws Exception { + // The 'towns' and 'parent_names' aggs operate on parent docs and if child docs are in different segments we need + // to ensure those segments which child docs are also evaluated to in the post collect phase. + + // Before we only evaluated segments that yielded matches in 'towns' and 'parent_names' aggs, which caused + // us to miss to evaluate child docs in segments we didn't have parent matches for. + + assertAcked( + prepareCreate("index") + .addMapping("parentType", "name", "type=string,index=not_analyzed", "town", "type=string,index=not_analyzed") + .addMapping("childType", "_parent", "type=parentType", "name", "type=string,index=not_analyzed", "age", "type=integer") + ); + List requests = new ArrayList<>(); + requests.add(client().prepareIndex("index", "parentType", "1").setSource("name", "Bob", "town", "Memphis")); + requests.add(client().prepareIndex("index", "parentType", "2").setSource("name", "Alice", "town", "Chicago")); + requests.add(client().prepareIndex("index", "parentType", "3").setSource("name", "Bill", "town", "Chicago")); + requests.add(client().prepareIndex("index", "childType", "1").setSource("name", "Jill", "age", 5).setParent("1")); + requests.add(client().prepareIndex("index", "childType", "2").setSource("name", "Joey", "age", 3).setParent("1")); + requests.add(client().prepareIndex("index", "childType", "3").setSource("name", "John", "age", 2).setParent("2")); + requests.add(client().prepareIndex("index", "childType", "4").setSource("name", "Betty", "age", 6).setParent("3")); + requests.add(client().prepareIndex("index", "childType", "5").setSource("name", "Dan", "age", 1).setParent("3")); + indexRandom(true, requests); + + SearchResponse response = client().prepareSearch("index") + .setSize(0) + .addAggregation(AggregationBuilders.terms("towns").field("town") + .subAggregation(AggregationBuilders.terms("parent_names").field("name") + .subAggregation(AggregationBuilders.children("child_docs").childType("childType")) + ) + ) + .get(); + + Terms towns = response.getAggregations().get("towns"); + assertThat(towns.getBuckets().size(), equalTo(2)); + assertThat(towns.getBuckets().get(0).getKeyAsString(), equalTo("Chicago")); + assertThat(towns.getBuckets().get(0).getDocCount(), equalTo(2L)); + + Terms parents = towns.getBuckets().get(0).getAggregations().get("parent_names"); + assertThat(parents.getBuckets().size(), equalTo(2)); + assertThat(parents.getBuckets().get(0).getKeyAsString(), equalTo("Alice")); + assertThat(parents.getBuckets().get(0).getDocCount(), equalTo(1L)); + Children children = parents.getBuckets().get(0).getAggregations().get("child_docs"); + assertThat(children.getDocCount(), equalTo(1L)); + + assertThat(parents.getBuckets().get(1).getKeyAsString(), equalTo("Bill")); + assertThat(parents.getBuckets().get(1).getDocCount(), equalTo(1L)); + children = parents.getBuckets().get(1).getAggregations().get("child_docs"); + assertThat(children.getDocCount(), equalTo(2L)); + + assertThat(towns.getBuckets().get(1).getKeyAsString(), equalTo("Memphis")); + assertThat(towns.getBuckets().get(1).getDocCount(), equalTo(1L)); + parents = towns.getBuckets().get(1).getAggregations().get("parent_names"); + assertThat(parents.getBuckets().size(), equalTo(1)); + assertThat(parents.getBuckets().get(0).getKeyAsString(), equalTo("Bob")); + assertThat(parents.getBuckets().get(0).getDocCount(), equalTo(1L)); + children = parents.getBuckets().get(0).getAggregations().get("child_docs"); + assertThat(children.getDocCount(), equalTo(2L)); + } + private static final class Control { final String category; From 34703a838d71c32e3bf944e95c09120dd443b075 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 15 Dec 2015 15:45:36 +0100 Subject: [PATCH 080/167] Beef up TranslogTests with concurrent fatal exceptions test Today we only test this when writing sequentially. Yet, in practice we mainly write concurrently, this commit adds a test that tests that concurrent writes with sudden fatal failure will not corrupt our translog. Relates to #15420 --- .../index/translog/Translog.java | 16 +- .../index/translog/TranslogTests.java | 187 ++++++++++++++---- 2 files changed, 163 insertions(+), 40 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 4016695dd89..6329e5cc998 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -158,7 +158,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC try { if (translogGeneration != null) { - final Checkpoint checkpoint = Checkpoint.read(location.resolve(CHECKPOINT_FILE_NAME)); + final Checkpoint checkpoint = readCheckpoint(); this.recoveredTranslogs = recoverFromFiles(translogGeneration, checkpoint); if (recoveredTranslogs.isEmpty()) { throw new IllegalStateException("at least one reader must be recovered"); @@ -545,6 +545,15 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC ensureOpen(); return current.syncUpTo(location.translogLocation + location.size); } + } catch (AlreadyClosedException | IOException ex) { + if (current.getTragicException() != null) { + try { + close(); + } catch (Exception inner) { + ex.addSuppressed(inner); + } + } + throw ex; } return false; } @@ -1433,4 +1442,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return current.getTragicException(); } + /** Reads and returns the current checkpoint */ + final Checkpoint readCheckpoint() throws IOException { + return Checkpoint.read(location.resolve(CHECKPOINT_FILE_NAME)); + } + } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index e35c04dcd6b..3ede7355524 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.mockfile.FilterFileChannel; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.ByteArrayDataOutput; +import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; @@ -62,6 +63,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; import static org.hamcrest.Matchers.*; @@ -1242,11 +1244,11 @@ public class TranslogTests extends ESTestCase { private final CountDownLatch downLatch; private final int opsPerThread; private final int threadId; - private final BlockingQueue writtenOperations; + private final Collection writtenOperations; private final Throwable[] threadExceptions; private final Translog translog; - public TranslogThread(Translog translog, CountDownLatch downLatch, int opsPerThread, int threadId, BlockingQueue writtenOperations, Throwable[] threadExceptions) { + public TranslogThread(Translog translog, CountDownLatch downLatch, int opsPerThread, int threadId, Collection writtenOperations, Throwable[] threadExceptions) { this.translog = translog; this.downLatch = downLatch; this.opsPerThread = opsPerThread; @@ -1276,59 +1278,34 @@ public class TranslogTests extends ESTestCase { throw new ElasticsearchException("not supported op type"); } - Translog.Location loc = translog.add(op); + Translog.Location loc = add(op); writtenOperations.add(new LocationOperation(op, loc)); + afterAdd(); } } catch (Throwable t) { threadExceptions[threadId] = t; } } + + protected Translog.Location add(Translog.Operation op) throws IOException { + return translog.add(op); + } + + protected void afterAdd() throws IOException {} } public void testFailFlush() throws IOException { Path tempDir = createTempDir(); - final AtomicBoolean simulateDiskFull = new AtomicBoolean(); + final AtomicBoolean fail = new AtomicBoolean(); TranslogConfig config = getTranslogConfig(tempDir); - Translog translog = new Translog(config) { - @Override - TranslogWriter.ChannelFactory getChannelFactory() { - final TranslogWriter.ChannelFactory factory = super.getChannelFactory(); - - return new TranslogWriter.ChannelFactory() { - @Override - public FileChannel open(Path file) throws IOException { - FileChannel channel = factory.open(file); - return new FilterFileChannel(channel) { - - @Override - public int write(ByteBuffer src) throws IOException { - if (simulateDiskFull.get()) { - if (src.limit() > 1) { - final int pos = src.position(); - final int limit = src.limit(); - src.limit(limit / 2); - super.write(src); - src.position(pos); - src.limit(limit); - throw new IOException("__FAKE__ no space left on device"); - } - } - return super.write(src); - } - }; - } - }; - } - }; + Translog translog = getFailableTranslog(fail, config); List locations = new ArrayList<>(); int opsSynced = 0; - int opsAdded = 0; boolean failed = false; while(failed == false) { try { locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); - opsAdded++; translog.sync(); opsSynced++; } catch (IOException ex) { @@ -1336,9 +1313,9 @@ public class TranslogTests extends ESTestCase { assertFalse(translog.isOpen()); assertEquals("__FAKE__ no space left on device", ex.getMessage()); } - simulateDiskFull.set(randomBoolean()); + fail.set(randomBoolean()); } - simulateDiskFull.set(false); + fail.set(false); if (randomBoolean()) { try { locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); @@ -1402,4 +1379,136 @@ public class TranslogTests extends ESTestCase { } } } + + public void testFatalIOExceptionsWhileWritingConcurrently() throws IOException, InterruptedException { + Path tempDir = createTempDir(); + final AtomicBoolean fail = new AtomicBoolean(false); + + TranslogConfig config = getTranslogConfig(tempDir); + Translog translog = getFailableTranslog(fail, config); + + final int threadCount = randomIntBetween(1, 5); + Thread[] threads = new Thread[threadCount]; + final Throwable[] threadExceptions = new Throwable[threadCount]; + final CountDownLatch downLatch = new CountDownLatch(1); + final CountDownLatch added = new CountDownLatch(randomIntBetween(10, 100)); + List writtenOperations = Collections.synchronizedList(new ArrayList<>()); + for (int i = 0; i < threadCount; i++) { + final int threadId = i; + threads[i] = new TranslogThread(translog, downLatch, 200, threadId, writtenOperations, threadExceptions) { + @Override + protected Translog.Location add(Translog.Operation op) throws IOException { + Translog.Location add = super.add(op); + added.countDown(); + return add; + } + + @Override + protected void afterAdd() throws IOException { + if (randomBoolean()) { + translog.sync(); + } + } + }; + threads[i].setDaemon(true); + threads[i].start(); + } + downLatch.countDown(); + added.await(); + try (Translog.View view = translog.newView()) { + // this holds a reference to the current tlog channel such that it's not closed + // if we hit a tragic event. this is important to ensure that asserts inside the Translog#add doesn't trip + // otherwise our assertions here are off by one sometimes. + fail.set(true); + for (int i = 0; i < threadCount; i++) { + threads[i].join(); + } + Collections.sort(writtenOperations, (a, b) -> a.location.compareTo(b.location)); + assertFalse(translog.isOpen()); + final Checkpoint checkpoint = Checkpoint.read(config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME)); + Iterator iterator = writtenOperations.iterator(); + while (iterator.hasNext()) { + LocationOperation next = iterator.next(); + if (checkpoint.offset < (next.location.translogLocation + next.location.size)) { + // drop all that haven't been synced + iterator.remove(); + } + } + config.setTranslogGeneration(translog.getGeneration()); + try (Translog tlog = new Translog(config)) { + try (Translog.Snapshot snapshot = tlog.newSnapshot()) { + if (writtenOperations.size() != snapshot.estimatedTotalOperations()) { + for (int i = 0; i < threadCount; i++) { + if (threadExceptions[i] != null) + threadExceptions[i].printStackTrace(); + } + } + assertEquals(writtenOperations.size(), snapshot.estimatedTotalOperations()); + for (int i = 0; i < writtenOperations.size(); i++) { + assertEquals("expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1, writtenOperations.get(i).location.generation); + Translog.Operation next = snapshot.next(); + assertNotNull("operation " + i + " must be non-null", next); + assertEquals(next, writtenOperations.get(i).operation); + } + } + } + } + } + + private Translog getFailableTranslog(final AtomicBoolean fail, final TranslogConfig config) throws IOException { + return new Translog(config) { + @Override + TranslogWriter.ChannelFactory getChannelFactory() { + final TranslogWriter.ChannelFactory factory = super.getChannelFactory(); + + return new TranslogWriter.ChannelFactory() { + @Override + public FileChannel open(Path file) throws IOException { + FileChannel channel = factory.open(file); + return new ThrowingFileChannel(fail, randomBoolean(), channel); + } + }; + } + }; + } + + public static class ThrowingFileChannel extends FilterFileChannel { + private final AtomicBoolean fail; + private final boolean partialWrite; + + public ThrowingFileChannel(AtomicBoolean fail, boolean partialWrite, FileChannel delegate) { + super(delegate); + this.fail = fail; + this.partialWrite = partialWrite; + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public int write(ByteBuffer src, long position) throws IOException { + throw new UnsupportedOperationException(); + } + + + public int write(ByteBuffer src) throws IOException { + if (fail.get()) { + if (partialWrite) { + if (src.limit() > 1) { + final int pos = src.position(); + final int limit = src.limit(); + src.limit(limit / 2); + super.write(src); + src.position(pos); + src.limit(limit); + throw new IOException("__FAKE__ no space left on device"); + } + } + throw new MockDirectoryWrapper.FakeIOException(); + } + return super.write(src); + } + } } From d0dbfce49a6988aa91b607b5f4d529d844fe4710 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 16 Dec 2015 10:38:19 +0100 Subject: [PATCH 081/167] allow -1 for concurrent rebalance, it's a special value :( --- .../decider/ConcurrentRebalanceAllocationDecider.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index 504ea5da215..a9ad35fd526 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -42,7 +42,7 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "concurrent_rebalance"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, 0, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1, true, Setting.Scope.CLUSTER); private volatile int clusterConcurrentRebalance; @Inject From 7a469538bca9c7db04437ed6ad002ab02273458c Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Tue, 15 Dec 2015 18:50:04 +0100 Subject: [PATCH 082/167] serialize os name, arch and version too These three properties are build in the jason response but were not transported when a node sends the response. closes #15422 --- .../elasticsearch/monitor/os/DummyOsInfo.java | 34 +++++ .../org/elasticsearch/monitor/os/OsInfo.java | 6 + .../plugins/DummyPluginInfo.java | 28 ++++ .../nodesinfo/NodeInfoStreamingTests.java | 140 ++++++++++++++++++ 4 files changed, 208 insertions(+) create mode 100644 core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java create mode 100644 core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java create mode 100644 core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java diff --git a/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java b/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java new file mode 100644 index 00000000000..599755e78a4 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.os; + +public class DummyOsInfo extends OsInfo { + + DummyOsInfo() { + refreshInterval = 0; + availableProcessors = 0; + allocatedProcessors = 0; + name = "dummy_name"; + arch = "dummy_arch"; + version = "dummy_version"; + } + + public static final DummyOsInfo INSTANCE = new DummyOsInfo(); +} diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java b/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java index f34cd51a143..d94447221c3 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java @@ -108,6 +108,9 @@ public class OsInfo implements Streamable, ToXContent { refreshInterval = in.readLong(); availableProcessors = in.readInt(); allocatedProcessors = in.readInt(); + name = in.readOptionalString(); + arch = in.readOptionalString(); + version = in.readOptionalString(); } @Override @@ -115,5 +118,8 @@ public class OsInfo implements Streamable, ToXContent { out.writeLong(refreshInterval); out.writeInt(availableProcessors); out.writeInt(allocatedProcessors); + out.writeOptionalString(name); + out.writeOptionalString(arch); + out.writeOptionalString(version); } } diff --git a/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java new file mode 100644 index 00000000000..a57a96c631d --- /dev/null +++ b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.plugins; + +public class DummyPluginInfo extends PluginInfo { + + private DummyPluginInfo(String name, String description, boolean site, String version, boolean jvm, String classname, boolean isolated) { + super(name, description, site, version, jvm, classname, isolated); + } + + public static final DummyPluginInfo INSTANCE = new DummyPluginInfo("dummy_plugin_name", "dummy plugin description", true, "dummy_plugin_version", true, "DummyPluginName", true); +} diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java new file mode 100644 index 00000000000..693ba4a2eba --- /dev/null +++ b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -0,0 +1,140 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nodesinfo; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.http.HttpInfo; +import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.monitor.os.DummyOsInfo; +import org.elasticsearch.monitor.os.OsInfo; +import org.elasticsearch.monitor.process.ProcessInfo; +import org.elasticsearch.plugins.DummyPluginInfo; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.ThreadPoolInfo; +import org.elasticsearch.transport.TransportInfo; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.core.IsEqual.equalTo; + +/** + * + */ +public class NodeInfoStreamingTests extends ESTestCase { + + public void testNodeInfoStreaming() throws IOException { + NodeInfo nodeInfo = createNodeInfo(); + Version version = Version.CURRENT; + BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(version); + nodeInfo.writeTo(out); + out.close(); + StreamInput in = StreamInput.wrap(out.bytes()); + in.setVersion(version); + NodeInfo readNodeInfo = NodeInfo.readNodeInfo(in); + assertExpectedUnchanged(nodeInfo, readNodeInfo); + + } + // checks all properties that are expected to be unchanged. Once we start changing them between versions this method has to be changed as well + private void assertExpectedUnchanged(NodeInfo nodeInfo, NodeInfo readNodeInfo) throws IOException { + assertThat(nodeInfo.getBuild().toString(), equalTo(readNodeInfo.getBuild().toString())); + assertThat(nodeInfo.getHostname(), equalTo(readNodeInfo.getHostname())); + assertThat(nodeInfo.getVersion(), equalTo(readNodeInfo.getVersion())); + assertThat(nodeInfo.getServiceAttributes().size(), equalTo(readNodeInfo.getServiceAttributes().size())); + for (Map.Entry entry : nodeInfo.getServiceAttributes().entrySet()) { + assertNotNull(readNodeInfo.getServiceAttributes().get(entry.getKey())); + assertThat(readNodeInfo.getServiceAttributes().get(entry.getKey()), equalTo(entry.getValue())); + } + compareJsonOutput(nodeInfo.getHttp(), readNodeInfo.getHttp()); + compareJsonOutput(nodeInfo.getJvm(), readNodeInfo.getJvm()); + compareJsonOutput(nodeInfo.getProcess(), readNodeInfo.getProcess()); + compareJsonOutput(nodeInfo.getSettings(), readNodeInfo.getSettings()); + compareJsonOutput(nodeInfo.getThreadPool(), readNodeInfo.getThreadPool()); + compareJsonOutput(nodeInfo.getTransport(), readNodeInfo.getTransport()); + compareJsonOutput(nodeInfo.getNode(), readNodeInfo.getNode()); + compareJsonOutput(nodeInfo.getOs(), readNodeInfo.getOs()); + comparePluginsAndModules(nodeInfo, readNodeInfo); + } + + private void comparePluginsAndModules(NodeInfo nodeInfo, NodeInfo readNodeInfo) throws IOException { + ToXContent.Params params = ToXContent.EMPTY_PARAMS; + XContentBuilder pluginsAndModules = jsonBuilder(); + pluginsAndModules.startObject(); + nodeInfo.getPlugins().toXContent(pluginsAndModules, params); + pluginsAndModules.endObject(); + XContentBuilder readPluginsAndModules = jsonBuilder(); + readPluginsAndModules.startObject(); + readNodeInfo.getPlugins().toXContent(readPluginsAndModules, params); + readPluginsAndModules.endObject(); + assertThat(pluginsAndModules.string(), equalTo(readPluginsAndModules.string())); + } + + private void compareJsonOutput(ToXContent param1, ToXContent param2) throws IOException { + ToXContent.Params params = ToXContent.EMPTY_PARAMS; + XContentBuilder param1Builder = jsonBuilder(); + XContentBuilder param2Builder = jsonBuilder(); + param1.toXContent(param1Builder, params); + param2.toXContent(param2Builder, params); + assertThat(param1Builder.string(), equalTo(param2Builder.string())); + } + + + private NodeInfo createNodeInfo() { + Build build = Build.CURRENT; + DiscoveryNode node = new DiscoveryNode("test_node", DummyTransportAddress.INSTANCE, VersionUtils.randomVersion(random())); + Map serviceAttributes = new HashMap<>(); + serviceAttributes.put("test", "attribute"); + Settings settings = Settings.builder().put("test", "setting").build(); + OsInfo osInfo = DummyOsInfo.INSTANCE; + ProcessInfo process = new ProcessInfo(randomInt(), randomBoolean()); + JvmInfo jvm = JvmInfo.jvmInfo(); + List threadPoolInfos = new ArrayList<>(); + threadPoolInfos.add(new ThreadPool.Info("test_threadpool", ThreadPool.ThreadPoolType.FIXED, 5)); + ThreadPoolInfo threadPoolInfo = new ThreadPoolInfo(threadPoolInfos); + Map profileAddresses = new HashMap<>(); + BoundTransportAddress dummyBoundTransportAddress = new BoundTransportAddress(new TransportAddress[]{DummyTransportAddress.INSTANCE}, DummyTransportAddress.INSTANCE); + profileAddresses.put("test_address", dummyBoundTransportAddress); + TransportInfo transport = new TransportInfo(dummyBoundTransportAddress, profileAddresses); + HttpInfo htttpInfo = new HttpInfo(dummyBoundTransportAddress, randomLong()); + PluginsAndModules plugins = new PluginsAndModules(); + plugins.addModule(DummyPluginInfo.INSTANCE); + plugins.addPlugin(DummyPluginInfo.INSTANCE); + return new NodeInfo(VersionUtils.randomVersion(random()), build, node, serviceAttributes, settings, osInfo, process, jvm, threadPoolInfo, transport, htttpInfo, plugins); + } +} From 20dc8556440eed7ee95f34fadf3a2f222a8b108b Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 16 Dec 2015 11:27:45 +0100 Subject: [PATCH 083/167] [TEST] Fix ScriptServiceTests.testFineGrainedSettings that can loop indefinitely --- .../test/java/org/elasticsearch/script/ScriptServiceTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 23cada02c6c..5abdc4f79bb 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -225,7 +225,7 @@ public class ScriptServiceTests extends ESTestCase { } while (scriptContextSettings.containsKey(scriptContext)); scriptContextSettings.put(scriptContext, randomFrom(ScriptMode.values())); } - int numEngineSettings = randomIntBetween(0, 10); + int numEngineSettings = randomIntBetween(0, ScriptType.values().length * scriptContexts.length); Map engineSettings = new HashMap<>(); for (int i = 0; i < numEngineSettings; i++) { String settingKey; From 9cf2f42d15675e09ed03613c17356d89502b5623 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 16 Dec 2015 11:52:30 +0100 Subject: [PATCH 084/167] fix list/array settings --- .../org/elasticsearch/common/Booleans.java | 3 +- .../common/settings/Setting.java | 56 ++++++++++++++++++- .../common/settings/Settings.java | 2 + .../transport/TransportService.java | 23 ++++---- .../common/settings/SettingTests.java | 56 ++++++++++++++++++- 5 files changed, 122 insertions(+), 18 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/Booleans.java b/core/src/main/java/org/elasticsearch/common/Booleans.java index 6b1b9b016a7..9c5f5746633 100644 --- a/core/src/main/java/org/elasticsearch/common/Booleans.java +++ b/core/src/main/java/org/elasticsearch/common/Booleans.java @@ -84,7 +84,6 @@ public class Booleans { * throws exception if string cannot be parsed to boolean */ public static Boolean parseBooleanExact(String value) { - boolean isFalse = isExplicitFalse(value); if (isFalse) { return false; @@ -94,7 +93,7 @@ public class Booleans { return true; } - throw new IllegalArgumentException("value cannot be parsed to boolean [ true/1/on/yes OR false/0/off/no ] "); + throw new IllegalArgumentException("Failed to parse value [" + value + "] cannot be parsed to boolean [ true/1/on/yes OR false/0/off/no ]"); } public static Boolean parseBoolean(String value, Boolean defaultValue) { diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index ba9573e0bbf..758bff5c6cc 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -18,9 +18,11 @@ */ package org.elasticsearch.common.settings; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.regex.Regex; @@ -30,6 +32,8 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.*; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; @@ -38,7 +42,7 @@ import java.util.function.Function; */ public class Setting extends ToXContentToBytes { private final String key; - private final Function defaultValue; + protected final Function defaultValue; private final Function parser; private final boolean dynamic; private final Scope scope; @@ -127,7 +131,7 @@ public class Setting extends ToXContentToBytes { * Returns the raw (string) settings value. If the setting is not present in the given settings object the default value is returned * instead. This is useful if the value can't be parsed due to an invalid value to access the actual value. */ - public final String getRaw(Settings settings) { + public String getRaw(Settings settings) { return settings.get(key, defaultValue.apply(settings)); } @@ -300,6 +304,54 @@ public class Setting extends ToXContentToBytes { return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), dynamic, scope); } + public static Setting> listSetting(String key, List defaultStringValue, Function singleValueParser, boolean dynamic, Scope scope) { + Function> parser = (s) -> { + try (XContentParser xContentParser = XContentType.JSON.xContent().createParser(s)){ + XContentParser.Token token = xContentParser.nextToken(); + if (token != XContentParser.Token.START_ARRAY) { + throw new IllegalArgumentException("expected START_ARRAY but got " + token); + } + ArrayList list = new ArrayList<>(); + while ((token = xContentParser.nextToken()) !=XContentParser.Token.END_ARRAY) { + if (token != XContentParser.Token.VALUE_STRING) { + throw new IllegalArgumentException("expected VALUE_STRING but got " + token); + } + list.add(singleValueParser.apply(xContentParser.text())); + } + return list; + } catch (IOException e) { + throw new IllegalArgumentException("failed to parse array", e); + } + }; + return new Setting>(key, arrayToParsableString(defaultStringValue.toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) { + + @Override + public String getRaw(Settings settings) { + String[] array = settings.getAsArray(key, null); + + return array == null ? defaultValue.apply(settings) : arrayToParsableString(array); + } + + + }; + } + + private static String arrayToParsableString(String[] array) { + try { + XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); + builder.startArray(); + for (String element : array) { + builder.value(element); + } + builder.endArray(); + return builder.string(); + } catch (IOException ex) { + throw new ElasticsearchException(ex); + } + } + + + public static Setting groupSetting(String key, boolean dynamic, Scope scope) { if (key.endsWith(".") == false) { throw new IllegalArgumentException("key must end with a '.'"); diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index 05f3cb1ff0b..f8dd5d4f1f6 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -597,6 +597,8 @@ public final class Settings implements ToXContent { return result.toArray(new String[result.size()]); } + + /** * Returns group settings for the given setting prefix. */ diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index 05d5242ac82..916c4863e07 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -42,10 +42,7 @@ import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicBoolean; @@ -87,8 +84,8 @@ public class TransportService extends AbstractLifecycleComponent TRACE_LOG_INCLUDE_SETTING = new Setting<>("transport.tracer.include", "", Strings::splitStringByCommaToArray , true, Setting.Scope.CLUSTER); - public static final Setting TRACE_LOG_EXCLUDE_SETTING = new Setting<>("transport.tracer.exclude", "internal:discovery/zen/fd*," + TransportLivenessAction.NAME, Strings::splitStringByCommaToArray , true, Setting.Scope.CLUSTER);; + public static final Setting> TRACE_LOG_INCLUDE_SETTING = Setting.listSetting("transport.tracer.include", Collections.emptyList(), (s) -> s, true, Setting.Scope.CLUSTER); + public static final Setting> TRACE_LOG_EXCLUDE_SETTING = Setting.listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), (s) -> s, true, Setting.Scope.CLUSTER); private final ESLogger tracerLog; @@ -107,8 +104,8 @@ public class TransportService extends AbstractLifecycleComponent tracerLogInclude) { + this.tracerLogInclude = tracerLogInclude.toArray(Strings.EMPTY_ARRAY); } - void setTracelLogExclude(String[] tracelLogExclude) { - this.tracelLogExclude = tracelLogExclude; + void setTracerLogExclude(List tracelLogExclude) { + this.tracelLogExclude = tracelLogExclude.toArray(Strings.EMPTY_ARRAY); } @Override protected void doStart() { diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 1640cfdd3b5..1895d3ee326 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -24,6 +24,9 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; @@ -72,7 +75,7 @@ public class SettingTests extends ESTestCase { settingUpdater.apply(build, Settings.EMPTY); fail("not a boolean"); } catch (IllegalArgumentException ex) { - assertEquals("Failed to parse value [I am not a boolean] for setting [foo.bar]", ex.getMessage()); + assertEquals("Failed to parse value [I am not a boolean] cannot be parsed to boolean [ true/1/on/yes OR false/0/off/no ]", ex.getMessage()); } } @@ -248,4 +251,55 @@ public class SettingTests extends ESTestCase { assertEquals(1, c.b.intValue()); } + + public void testListSettings() { + Setting> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), true, Setting.Scope.CLUSTER); + List value = listSetting.get(Settings.EMPTY); + assertEquals(1, value.size()); + assertEquals("foo,bar", value.get(0)); + + List input = Arrays.asList("test", "test1, test2", "test", ",,,,"); + Settings.Builder builder = Settings.builder().putArray("foo.bar", input.toArray(new String[0])); + value = listSetting.get(builder.build()); + assertEquals(input.size(), value.size()); + assertArrayEquals(value.toArray(new String[0]), input.toArray(new String[0])); + + // try to parse this really annoying format + builder = Settings.builder(); + for (int i = 0; i < input.size(); i++) { + builder.put("foo.bar." + i, input.get(i)); + } + value = listSetting.get(builder.build()); + assertEquals(input.size(), value.size()); + assertArrayEquals(value.toArray(new String[0]), input.toArray(new String[0])); + + AtomicReference> ref = new AtomicReference<>(); + AbstractScopedSettings.SettingUpdater settingUpdater = listSetting.newUpdater(ref::set, logger); + assertTrue(settingUpdater.hasChanged(builder.build(), Settings.EMPTY)); + settingUpdater.apply(builder.build(), Settings.EMPTY); + assertEquals(input.size(), ref.get().size()); + assertArrayEquals(ref.get().toArray(new String[0]), input.toArray(new String[0])); + + settingUpdater.apply(Settings.builder().putArray("foo.bar", "123").build(), builder.build()); + assertEquals(1, ref.get().size()); + assertArrayEquals(ref.get().toArray(new String[0]), new String[] {"123"}); + + settingUpdater.apply(Settings.builder().put("foo.bar", "1,2,3").build(), Settings.builder().putArray("foo.bar", "123").build()); + assertEquals(3, ref.get().size()); + assertArrayEquals(ref.get().toArray(new String[0]), new String[] {"1", "2", "3"}); + + settingUpdater.apply(Settings.EMPTY, Settings.builder().put("foo.bar", "1,2,3").build()); + assertEquals(1, ref.get().size()); + assertEquals("foo,bar", ref.get().get(0)); + + Setting> otherSettings = Setting.listSetting("foo.bar", Collections.emptyList(), Integer::parseInt, true, Setting.Scope.CLUSTER); + List defaultValue = otherSettings.get(Settings.EMPTY); + assertEquals(0, defaultValue.size()); + List intValues = otherSettings.get(Settings.builder().put("foo.bar", "0,1,2,3").build()); + assertEquals(4, intValues.size()); + for (int i = 0; i < intValues.size(); i++) { + assertEquals(i, intValues.get(i).intValue()); + } + + } } From 32881c08b3f1e05c213d0bf0f53a51cdb30c59c9 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 16 Dec 2015 12:00:34 +0100 Subject: [PATCH 085/167] fix more excpption useage --- .../main/java/org/elasticsearch/common/settings/Setting.java | 2 ++ .../action/admin/cluster/settings/SettingsUpdaterTests.java | 1 + 2 files changed, 3 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 758bff5c6cc..4e3d708ce21 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -120,6 +120,8 @@ public class Setting extends ToXContentToBytes { return parser.apply(value); } catch (ElasticsearchParseException ex) { throw new IllegalArgumentException(ex.getMessage(), ex); + } catch (NumberFormatException ex) { + throw new IllegalArgumentException("Failed to parse value [" + value + "] for setting [" + getKey() + "]", ex); } catch (IllegalArgumentException ex) { throw ex; } catch (Exception t) { diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java index fe4f8bbdf10..bd1377b89fe 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java @@ -91,6 +91,7 @@ public class SettingsUpdaterTests extends ESTestCase { Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float").put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build()); fail("all or nothing"); } catch (IllegalArgumentException ex) { + logger.info("", ex); assertEquals("Failed to parse value [not a float] for setting [cluster.routing.allocation.balance.index]", ex.getMessage()); } assertNull("updater only does a dryRun", index.get()); From e586c966becd3ef7428cd9c56478749db5e2fb5c Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 16 Dec 2015 12:08:48 +0100 Subject: [PATCH 086/167] fix several .getKey usages --- .../indices/memory/breaker/CircuitBreakerNoopIT.java | 4 ++-- .../main/java/org/elasticsearch/test/InternalTestCluster.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java index 08df60126a5..b1b56acd8cd 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java @@ -41,10 +41,10 @@ public class CircuitBreakerNoopIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, "noop") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop") // This is set low, because if the "noop" is not a noop, it will break .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, "noop") + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop") // This is set low, because if the "noop" is not a noop, it will break .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") .build(); diff --git a/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 2e89cc48312..06724d2c7d9 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -416,8 +416,8 @@ public final class InternalTestCluster extends TestCluster { } if (random.nextInt(10) == 0) { - builder.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, "noop"); - builder.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, "noop"); + builder.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop"); + builder.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop"); } if (random.nextBoolean()) { From e5dc124811f989e0b3b43b8cb8f1bdf981d15338 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 16 Dec 2015 12:27:07 +0100 Subject: [PATCH 087/167] apply review from @bleskes --- .../index/translog/Translog.java | 34 +++++++------------ .../index/translog/TranslogTests.java | 25 +++++++++++++- 2 files changed, 37 insertions(+), 22 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 6329e5cc998..a105f652728 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -421,13 +421,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return location; } } catch (AlreadyClosedException | IOException ex) { - if (current.getTragicException() != null) { - try { - close(); - } catch (Exception inner) { - ex.addSuppressed(inner); - } - } + closeOnTragicEvent(ex); throw ex; } catch (Throwable e) { throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e); @@ -507,13 +501,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC current.sync(); } } catch (AlreadyClosedException | IOException ex) { - if (current.getTragicException() != null) { - try { - close(); - } catch (Exception inner) { - ex.addSuppressed(inner); - } - } + closeOnTragicEvent(ex); throw ex; } } @@ -546,18 +534,22 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return current.syncUpTo(location.translogLocation + location.size); } } catch (AlreadyClosedException | IOException ex) { - if (current.getTragicException() != null) { - try { - close(); - } catch (Exception inner) { - ex.addSuppressed(inner); - } - } + closeOnTragicEvent(ex); throw ex; } return false; } + private void closeOnTragicEvent(Throwable ex) { + if (current.getTragicException() != null) { + try { + close(); + } catch (Exception inner) { + ex.addSuppressed(inner); + } + } + } + /** * return stats */ diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 3ede7355524..51de00f74a6 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -1308,6 +1308,9 @@ public class TranslogTests extends ESTestCase { locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); translog.sync(); opsSynced++; + } catch (MockDirectoryWrapper.FakeIOException ex) { + failed = true; + assertFalse(translog.isOpen()); } catch (IOException ex) { failed = true; assertFalse(translog.isOpen()); @@ -1322,7 +1325,11 @@ public class TranslogTests extends ESTestCase { fail("we are already closed"); } catch (AlreadyClosedException ex) { assertNotNull(ex.getCause()); - assertEquals(ex.getCause().getMessage(), "__FAKE__ no space left on device"); + if (ex.getCause() instanceof MockDirectoryWrapper.FakeIOException) { + assertNull(ex.getCause().getMessage()); + } else { + assertEquals(ex.getCause().getMessage(), "__FAKE__ no space left on device"); + } } } @@ -1423,6 +1430,22 @@ public class TranslogTests extends ESTestCase { for (int i = 0; i < threadCount; i++) { threads[i].join(); } + boolean atLeastOneFailed = false; + for (Throwable ex : threadExceptions) { + if (ex != null) { + atLeastOneFailed = true; + break; + } + } + if (atLeastOneFailed == false) { + try { + boolean syncNeeded = translog.syncNeeded(); + translog.close(); + assertFalse("should have failed if sync was needed", syncNeeded); + } catch (IOException ex) { + // boom now we failed + } + } Collections.sort(writtenOperations, (a, b) -> a.location.compareTo(b.location)); assertFalse(translog.isOpen()); final Checkpoint checkpoint = Checkpoint.read(config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME)); From 4597a22ace8f51569590c18b81a6616f49fa2e70 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Wed, 16 Dec 2015 12:52:28 +0100 Subject: [PATCH 088/167] Merge pull request #15473 from jmluy/patch-1 Update sample in sort for consistency --- docs/reference/search/request/sort.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index 8d0b6708979..14ab207c301 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -238,7 +238,7 @@ Format in `lat,lon`. "sort" : [ { "_geo_distance" : { - "pin.location" : "-70,40", + "pin.location" : "40,-70", "order" : "asc", "unit" : "km" } @@ -301,7 +301,7 @@ Multiple geo points can be passed as an array containing any `geo_point` format, [source,js] -------------------------------------------------- "pin.location" : [[-70, 40], [-71, 42]] -"pin.location" : [{"lat": -70, "lon": 40}, {"lat": -71, "lon": 42}] +"pin.location" : [{"lat": 40, "lon": -70}, {"lat": 42, "lon": -71}] -------------------------------------------------- and so forth. From c1ff619781b935ca9c8879ffeaf768bbc179e1e8 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 16 Dec 2015 14:13:22 +0100 Subject: [PATCH 089/167] fix cluster settings to accept complex list settigns --- .../settings/AbstractScopedSettings.java | 7 +++---- .../elasticsearch/common/settings/Setting.java | 17 ++++++++++++++--- .../common/settings/ScopedSettingsTests.java | 14 ++++++++++++++ .../common/settings/SettingTests.java | 18 ++++++++++++++++++ 4 files changed, 49 insertions(+), 7 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index c7361e75b1d..1efc9d3fed6 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -21,7 +21,6 @@ package org.elasticsearch.common.settings; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.logging.ESLoggerFactory; import java.util.*; import java.util.function.BiConsumer; @@ -34,7 +33,7 @@ import java.util.function.Consumer; public abstract class AbstractScopedSettings extends AbstractComponent { private Settings lastSettingsApplied = Settings.EMPTY; private final List settingUpdaters = new ArrayList<>(); - private final Map> groupSettings = new HashMap<>(); + private final Map> complexMatchers = new HashMap<>(); private final Map> keySettings = new HashMap<>(); private final Setting.Scope scope; @@ -45,7 +44,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { throw new IllegalArgumentException("Setting must be a cluster setting but was: " + entry.getScope()); } if (entry.isGroupSetting()) { - groupSettings.put(entry.getKey(), entry); + complexMatchers.put(entry.getKey(), entry); } else { keySettings.put(entry.getKey(), entry); } @@ -216,7 +215,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { public Setting get(String key) { Setting setting = keySettings.get(key); if (setting == null) { - for (Map.Entry> entry : groupSettings.entrySet()) { + for (Map.Entry> entry : complexMatchers.entrySet()) { if (entry.getValue().match(key)) { return entry.getValue(); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 4e3d708ce21..884479d7cb6 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -37,6 +37,7 @@ import java.util.List; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; +import java.util.regex.Pattern; /** */ @@ -95,6 +96,10 @@ public class Setting extends ToXContentToBytes { return false; } + boolean hasComplexMatcher() { + return isGroupSetting(); + } + /** * Returns the default values string representation for this setting. * @param settings a settings object for settings that has a default value depending on another setting if available @@ -143,7 +148,7 @@ public class Setting extends ToXContentToBytes { * @see #isGroupSetting() */ public boolean match(String toTest) { - return Regex.simpleMatch(key, toTest); + return key.equals(toTest); } @Override @@ -326,15 +331,21 @@ public class Setting extends ToXContentToBytes { } }; return new Setting>(key, arrayToParsableString(defaultStringValue.toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) { - + private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?"); @Override public String getRaw(Settings settings) { String[] array = settings.getAsArray(key, null); - return array == null ? defaultValue.apply(settings) : arrayToParsableString(array); } + public boolean match(String toTest) { + return pattern.matcher(toTest).matches(); + } + @Override + boolean hasComplexMatcher() { + return true; + } }; } diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 9c5320b1354..b54784dc16d 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -21,12 +21,15 @@ package org.elasticsearch.common.settings; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; public class ScopedSettingsTests extends ESTestCase { @@ -138,4 +141,15 @@ public class ScopedSettingsTests extends ESTestCase { assertEquals(diff.getAsMap().size(), 1); assertEquals(diff.getAsInt("foo.bar.baz", null), Integer.valueOf(17)); } + + public void testUpdateTracer() { + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AtomicReference> ref = new AtomicReference<>(); + settings.addSettingsUpdateConsumer(TransportService.TRACE_LOG_INCLUDE_SETTING, ref::set); + settings.applySettings(Settings.builder().putArray("transport.tracer.include", "internal:index/shard/recovery/*", "internal:gateway/local*").build()); + assertNotNull(ref.get().size()); + assertEquals(ref.get().size(), 2); + assertTrue(ref.get().contains("internal:index/shard/recovery/*")); + assertTrue(ref.get().contains("internal:gateway/local*")); + } } diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 1895d3ee326..069418a7e1d 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -300,6 +300,24 @@ public class SettingTests extends ESTestCase { for (int i = 0; i < intValues.size(); i++) { assertEquals(i, intValues.get(i).intValue()); } + } + + public void testListSettingAcceptsNumberSyntax() { + Setting> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), true, Setting.Scope.CLUSTER); + List input = Arrays.asList("test", "test1, test2", "test", ",,,,"); + Settings.Builder builder = Settings.builder().putArray("foo.bar", input.toArray(new String[0])); + // try to parse this really annoying format + for (String key : builder.internalMap().keySet()) { + assertTrue("key: " + key + " doesn't match", listSetting.match(key)); + } + builder = Settings.builder().put("foo.bar", "1,2,3"); + for (String key : builder.internalMap().keySet()) { + assertTrue("key: " + key + " doesn't match", listSetting.match(key)); + } + assertFalse(listSetting.match("foo_bar")); + assertFalse(listSetting.match("foo_bar.1")); + assertTrue(listSetting.match("foo.bar")); + assertTrue(listSetting.match("foo.bar." + randomIntBetween(0,10000))); } } From 8ac8c1f54770049ce35034ed3bede66fa6add9fd Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 16 Dec 2015 15:54:11 +0100 Subject: [PATCH 090/167] Make mapping serialization more robust. When creating a metadata mapper for a new type, we reuse an existing configuration from an existing type (if any) in order to avoid introducing conflicts. However this field type that is provided is considered as both an initial configuration and the default configuration. So at serialization time, we might only serialize the difference between the current configuration and this default configuration, which might be different to what is actually considered the default configuration. This does not cause bugs today because metadata mappers usually override the toXContent method and compare the current field type with Defaults.FIELD_TYPE instead of defaultFieldType() but I would still like to do this change to avoid future bugs. --- .../main/java/org/elasticsearch/index/mapper/FieldMapper.java | 4 ++-- .../org/elasticsearch/index/mapper/MetadataFieldMapper.java | 4 ++-- .../elasticsearch/index/mapper/core/BinaryFieldMapper.java | 2 +- .../elasticsearch/index/mapper/core/BooleanFieldMapper.java | 2 +- .../index/mapper/core/CompletionFieldMapper.java | 2 +- .../elasticsearch/index/mapper/core/NumberFieldMapper.java | 2 +- .../elasticsearch/index/mapper/core/StringFieldMapper.java | 2 +- .../index/mapper/geo/BaseGeoPointFieldMapper.java | 2 +- .../elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java | 2 +- .../elasticsearch/index/mapper/internal/AllFieldMapper.java | 2 +- .../index/mapper/internal/FieldNamesFieldMapper.java | 2 +- .../elasticsearch/index/mapper/internal/IdFieldMapper.java | 2 +- .../elasticsearch/index/mapper/internal/IndexFieldMapper.java | 2 +- .../index/mapper/internal/ParentFieldMapper.java | 2 +- .../index/mapper/internal/RoutingFieldMapper.java | 2 +- .../index/mapper/internal/SourceFieldMapper.java | 2 +- .../elasticsearch/index/mapper/internal/TTLFieldMapper.java | 2 +- .../index/mapper/internal/TimestampFieldMapper.java | 2 +- .../elasticsearch/index/mapper/internal/TypeFieldMapper.java | 2 +- .../elasticsearch/index/mapper/internal/UidFieldMapper.java | 2 +- .../index/mapper/internal/VersionFieldMapper.java | 2 +- .../index/mapper/externalvalues/ExternalMapper.java | 2 +- .../index/mapper/externalvalues/ExternalMetadataMapper.java | 2 +- .../index/mapper/internal/FieldNamesFieldMapperTests.java | 2 +- .../elasticsearch/mapper/attachments/AttachmentMapper.java | 2 +- .../org/elasticsearch/index/mapper/size/SizeFieldMapper.java | 2 +- 26 files changed, 28 insertions(+), 28 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 6f447cdeb86..30df3562aec 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -64,10 +64,10 @@ public abstract class FieldMapper extends Mapper implements Cloneable { protected final MultiFields.Builder multiFieldsBuilder; protected CopyTo copyTo; - protected Builder(String name, MappedFieldType fieldType) { + protected Builder(String name, MappedFieldType fieldType, MappedFieldType defaultFieldType) { super(name); this.fieldType = fieldType.clone(); - this.defaultFieldType = fieldType.clone(); + this.defaultFieldType = defaultFieldType.clone(); this.defaultOptions = fieldType.indexOptions(); // we have to store it the fieldType is mutable multiFieldsBuilder = new MultiFields.Builder(); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java index 2f3b40126ed..622c7729dd4 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java @@ -51,8 +51,8 @@ public abstract class MetadataFieldMapper extends FieldMapper { } public abstract static class Builder extends FieldMapper.Builder { - public Builder(String name, MappedFieldType fieldType) { - super(name, fieldType); + public Builder(String name, MappedFieldType fieldType, MappedFieldType defaultFieldType) { + super(name, fieldType, defaultFieldType); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java index 7468f4fb2f6..0ee311678ef 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java @@ -72,7 +72,7 @@ public class BinaryFieldMapper extends FieldMapper { public static class Builder extends FieldMapper.Builder { public Builder(String name) { - super(name, Defaults.FIELD_TYPE); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); builder = this; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java index cd76fdbb047..e381bc9c60b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java @@ -72,7 +72,7 @@ public class BooleanFieldMapper extends FieldMapper { public static class Builder extends FieldMapper.Builder { public Builder(String name) { - super(name, Defaults.FIELD_TYPE); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); this.builder = this; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java index 9d465b4cffc..69177401db7 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java @@ -356,7 +356,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp * @param name of the completion field to build */ public Builder(String name) { - super(name, new CompletionFieldType()); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); builder = this; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java index 04dd1a21335..ed537aa7e5f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java @@ -66,7 +66,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM private Boolean coerce; public Builder(String name, MappedFieldType fieldType, int defaultPrecisionStep) { - super(name, fieldType); + super(name, fieldType, fieldType); this.fieldType.setNumericPrecisionStep(defaultPrecisionStep); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java index 061d3a2e343..08582c65997 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java @@ -98,7 +98,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc protected int ignoreAbove = Defaults.IGNORE_ABOVE; public Builder(String name) { - super(name, Defaults.FIELD_TYPE); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); builder = this; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java index f7910e1f5af..0bbe2fe8f1b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java @@ -94,7 +94,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr protected Boolean ignoreMalformed; public Builder(String name, GeoPointFieldType fieldType) { - super(name, fieldType); + super(name, fieldType, fieldType); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index 7c100a306c2..a99517f4003 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -120,7 +120,7 @@ public class GeoShapeFieldMapper extends FieldMapper { private Boolean coerce; public Builder(String name) { - super(name, Defaults.FIELD_TYPE); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java index 4676c63e793..bcd094d2ae6 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java @@ -101,7 +101,7 @@ public class AllFieldMapper extends MetadataFieldMapper { private EnabledAttributeMapper enabled = Defaults.ENABLED; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); builder = this; indexName = Defaults.INDEX_NAME; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java index 7883415e59a..e03439f3f54 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java @@ -78,7 +78,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper { private boolean enabled = Defaults.ENABLED; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java index a0b7cddae76..0fe3e10bcb8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java @@ -89,7 +89,7 @@ public class IdFieldMapper extends MetadataFieldMapper { private String path = Defaults.PATH; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java index 167807f3b2a..dbbf03b72e2 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java @@ -79,7 +79,7 @@ public class IndexFieldMapper extends MetadataFieldMapper { private EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java index 6142bf475ec..65daef2a834 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java @@ -97,7 +97,7 @@ public class ParentFieldMapper extends MetadataFieldMapper { private final MappedFieldType childJoinFieldType = Defaults.JOIN_FIELD_TYPE.clone(); public Builder(String documentType) { - super(Defaults.NAME, Defaults.FIELD_TYPE); + super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); this.indexName = name; this.documentType = documentType; builder = this; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java index e791ad376c3..40b7e6871c4 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java @@ -77,7 +77,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper { private String path = Defaults.PATH; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); } public Builder required(boolean required) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java index 4d47c3bf446..40bf9eb0c8e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java @@ -88,7 +88,7 @@ public class SourceFieldMapper extends MetadataFieldMapper { private String[] excludes = null; public Builder() { - super(Defaults.NAME, Defaults.FIELD_TYPE); + super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); } public Builder enabled(boolean enabled) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java index 7a17e56e7dd..f99ca18600a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java @@ -78,7 +78,7 @@ public class TTLFieldMapper extends MetadataFieldMapper { private long defaultTTL = Defaults.DEFAULT; public Builder() { - super(Defaults.NAME, Defaults.TTL_FIELD_TYPE); + super(Defaults.NAME, Defaults.TTL_FIELD_TYPE, Defaults.FIELD_TYPE); } public Builder enabled(EnabledAttributeMapper enabled) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java index 0657d67857b..b0606f1994f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java @@ -96,7 +96,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper { private Boolean ignoreMissing = null; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); if (existing != null) { // if there is an existing type, always use that store value (only matters for < 2.0) explicitStore = true; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java index a140593943f..c529db5183e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java @@ -80,7 +80,7 @@ public class TypeFieldMapper extends MetadataFieldMapper { public static class Builder extends MetadataFieldMapper.Builder { public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java index 1cf3b9d9ac3..10f9880d97d 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java @@ -78,7 +78,7 @@ public class UidFieldMapper extends MetadataFieldMapper { public static class Builder extends MetadataFieldMapper.Builder { public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java index d9659f40c22..6b1471afda7 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java @@ -61,7 +61,7 @@ public class VersionFieldMapper extends MetadataFieldMapper { public static class Builder extends MetadataFieldMapper.Builder { public Builder() { - super(Defaults.NAME, Defaults.FIELD_TYPE); + super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); } @Override diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java index e7b7c26b84f..6f7541a272a 100755 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java @@ -81,7 +81,7 @@ public class ExternalMapper extends FieldMapper { private String mapperName; public Builder(String name, String generatedValue, String mapperName) { - super(name, new ExternalFieldType()); + super(name, new ExternalFieldType(), new ExternalFieldType()); this.builder = this; this.stringBuilder = stringField(name).store(false); this.generatedValue = generatedValue; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java index 2731e30a84e..8bdb5670dbb 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java @@ -98,7 +98,7 @@ public class ExternalMetadataMapper extends MetadataFieldMapper { public static class Builder extends MetadataFieldMapper.Builder { protected Builder() { - super(CONTENT_TYPE, FIELD_TYPE); + super(CONTENT_TYPE, FIELD_TYPE, FIELD_TYPE); } @Override diff --git a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java index 3f3c5702e8c..f97b22e0ecb 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java @@ -204,7 +204,7 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { @Override public Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - return new MetadataFieldMapper.Builder("_dummy", FIELD_TYPE) { + return new MetadataFieldMapper.Builder("_dummy", FIELD_TYPE, FIELD_TYPE) { @Override public DummyMetadataFieldMapper build(BuilderContext context) { return new DummyMetadataFieldMapper(context.indexSettings()); diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java index 66ecbe0850b..ffae8205e33 100644 --- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java +++ b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java @@ -131,7 +131,7 @@ public class AttachmentMapper extends FieldMapper { private Mapper.Builder languageBuilder = stringField(FieldNames.LANGUAGE); public Builder(String name) { - super(name, new AttachmentFieldType()); + super(name, new AttachmentFieldType(), new AttachmentFieldType()); this.builder = this; this.contentBuilder = stringField(FieldNames.CONTENT); } diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java index fb5d47bdf69..1e27e18bac7 100644 --- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java @@ -66,7 +66,7 @@ public class SizeFieldMapper extends MetadataFieldMapper { protected EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED; public Builder(MappedFieldType existing) { - super(NAME, existing == null ? Defaults.SIZE_FIELD_TYPE : existing); + super(NAME, existing == null ? Defaults.SIZE_FIELD_TYPE : existing, Defaults.SIZE_FIELD_TYPE); builder = this; } From 89a09b9bedc7665abc9b6c5dcadb3f487a6626de Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 15 Dec 2015 21:45:47 -0500 Subject: [PATCH 091/167] Master should wait on cluster state publication when failing a shard When a client sends a request to fail a shard to the master, the current behavior is that the master will submit the cluster state update task and then immediately send a successful response back to the client; additionally, if there are any failures while processing the cluster state update task to fail the shard, then the client will never be notified of these failures. This commit modifies the master behavior when handling requests to fail a shard. In particular, the master will now wait until successful publication of the cluster state update before notifying the request client that the shard is marked as failed; additionally, the client is now notified of any failures during the execution of the cluster state update task. Relates #14252 --- .../action/shard/ShardStateAction.java | 73 +++++++++++++------ 1 file changed, 51 insertions(+), 22 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index c2ac791aa16..3a01ced6ebf 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingService; @@ -53,6 +54,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Locale; import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry; @@ -113,7 +115,7 @@ public class ShardStateAction extends AbstractComponent { @Override public void handleException(TransportException exp) { - logger.warn("failed to send failed shard to {}", exp, masterNode); + logger.warn("unexpected failure while sending request to [{}] to fail shard [{}]", exp, masterNode, shardRoutingEntry); listener.onShardFailedFailure(masterNode, exp); } }); @@ -122,22 +124,62 @@ public class ShardStateAction extends AbstractComponent { private class ShardFailedTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { - handleShardFailureOnMaster(request); - channel.sendResponse(TransportResponse.Empty.INSTANCE); + handleShardFailureOnMaster(request, new ClusterStateTaskListener() { + @Override + public void onFailure(String source, Throwable t) { + logger.error("unexpected failure while failing shard [{}]", t, request.shardRouting); + try { + channel.sendResponse(t); + } catch (Throwable channelThrowable) { + logger.warn("failed to send failure [{}] while failing shard [{}]", channelThrowable, t, request.shardRouting); + } + } + + @Override + public void onNoLongerMaster(String source) { + logger.error("no longer master while failing shard [{}]", request.shardRouting); + try { + channel.sendResponse(new NotMasterException(source)); + } catch (Throwable channelThrowable) { + logger.warn("failed to send no longer master while failing shard [{}]", channelThrowable, request.shardRouting); + } + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + try { + int numberOfUnassignedShards = newState.getRoutingNodes().unassigned().size(); + if (oldState != newState && numberOfUnassignedShards > 0) { + String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shard [%s]", numberOfUnassignedShards, request.shardRouting); + if (logger.isTraceEnabled()) { + logger.trace(reason + ", scheduling a reroute"); + } + routingService.reroute(reason); + } + } finally { + try { + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } catch (Throwable channelThrowable) { + logger.warn("failed to send response while failing shard [{}]", channelThrowable, request.shardRouting); + } + } + } + } + ); } } - class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor, ClusterStateTaskListener { + class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor { @Override public BatchResult execute(ClusterState currentState, List tasks) throws Exception { BatchResult.Builder batchResultBuilder = BatchResult.builder(); - List shardRoutingsToBeApplied = new ArrayList<>(tasks.size()); + List failedShards = new ArrayList<>(tasks.size()); for (ShardRoutingEntry task : tasks) { - shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure)); + failedShards.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure)); } ClusterState maybeUpdatedState = currentState; try { - RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied); + RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, failedShards); if (result.changed()) { maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build(); } @@ -147,31 +189,18 @@ public class ShardStateAction extends AbstractComponent { } return batchResultBuilder.build(maybeUpdatedState); } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) { - logger.trace("unassigned shards after shard failures. scheduling a reroute."); - routingService.reroute("unassigned shards after shard failures, scheduling a reroute"); - } - } - - @Override - public void onFailure(String source, Throwable t) { - logger.error("unexpected failure during [{}]", t, source); - } } private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler(); - private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) { + private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry, ClusterStateTaskListener listener) { logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); clusterService.submitStateUpdateTask( "shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", shardRoutingEntry, ClusterStateTaskConfig.build(Priority.HIGH), shardFailedClusterStateHandler, - shardFailedClusterStateHandler); + listener); } public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) { From 02354fc491c2767448b21150b6ed0e58dfefb322 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 16 Dec 2015 10:53:59 -0500 Subject: [PATCH 092/167] More useful assertions in o.e.c.s.InternalClusterService --- .../cluster/service/InternalClusterService.java | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index afdfea65328..b8c898a31e9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -72,6 +72,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Queue; import java.util.concurrent.ConcurrentMap; @@ -442,9 +443,15 @@ public class InternalClusterService extends AbstractLifecycleComponent finalBatchResult = batchResult; - assert toExecute.stream().map(updateTask -> updateTask.task).allMatch(finalBatchResult.executionResults::containsKey); + assert batchResult.executionResults.size() == toExecute.size() + : String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", toExecute.size(), toExecute.size() == 1 ? "" : "s", batchResult.executionResults.size()); + boolean assertsEnabled = false; + assert (assertsEnabled = true); + if (assertsEnabled) { + for (UpdateTask updateTask : toExecute) { + assert batchResult.executionResults.containsKey(updateTask.task) : "missing task result for [" + updateTask.task + "]"; + } + } ClusterState newClusterState = batchResult.resultingState; final ArrayList> proccessedListeners = new ArrayList<>(); From 99f9bd7cfe0191214dc95a02d817874e1ecafa5b Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 16 Dec 2015 14:40:16 +0100 Subject: [PATCH 093/167] Throw exception when trying to write map with null keys Closes #14346 --- .../common/xcontent/XContentBuilder.java | 28 ++++----------- .../builder/XContentBuilderTests.java | 34 ++++++++++++++++--- 2 files changed, 36 insertions(+), 26 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index af8e7534692..d26485a121d 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -53,7 +53,7 @@ import java.util.Map; */ public final class XContentBuilder implements BytesStream, Releasable { - public static enum FieldCaseConversion { + public enum FieldCaseConversion { /** * No conversion will occur. */ @@ -251,14 +251,7 @@ public final class XContentBuilder implements BytesStream, Releasable { } public XContentBuilder field(XContentBuilderString name) throws IOException { - if (fieldCaseConversion == FieldCaseConversion.UNDERSCORE) { - generator.writeFieldName(name.underscore()); - } else if (fieldCaseConversion == FieldCaseConversion.CAMELCASE) { - generator.writeFieldName(name.camelCase()); - } else { - generator.writeFieldName(name.underscore()); - } - return this; + return field(name, fieldCaseConversion); } public XContentBuilder field(XContentBuilderString name, FieldCaseConversion conversion) throws IOException { @@ -273,22 +266,13 @@ public final class XContentBuilder implements BytesStream, Releasable { } public XContentBuilder field(String name) throws IOException { - if (fieldCaseConversion == FieldCaseConversion.UNDERSCORE) { - if (cachedStringBuilder == null) { - cachedStringBuilder = new StringBuilder(); - } - name = Strings.toUnderscoreCase(name, cachedStringBuilder); - } else if (fieldCaseConversion == FieldCaseConversion.CAMELCASE) { - if (cachedStringBuilder == null) { - cachedStringBuilder = new StringBuilder(); - } - name = Strings.toCamelCase(name, cachedStringBuilder); - } - generator.writeFieldName(name); - return this; + return field(name, fieldCaseConversion); } public XContentBuilder field(String name, FieldCaseConversion conversion) throws IOException { + if (name == null) { + throw new IllegalArgumentException("field name cannot be null"); + } if (conversion == FieldCaseConversion.UNDERSCORE) { if (cachedStringBuilder == null) { cachedStringBuilder = new StringBuilder(); diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java index 7ffafc004ab..9129e3c05b3 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.common.xcontent.builder; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.io.FastCharArrayWriter; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,6 +38,7 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; +import java.util.Collections; import java.util.Date; import java.util.GregorianCalendar; import java.util.HashMap; @@ -51,9 +51,6 @@ import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConvers import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConversion.UNDERSCORE; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class XContentBuilderTests extends ESTestCase { public void testPrettyWithLfAtEnd() throws Exception { ByteArrayOutputStream os = new ByteArrayOutputStream(); @@ -350,4 +347,33 @@ public class XContentBuilderTests extends ESTestCase { "}", string.trim()); } + public void testWriteMapWithNullKeys() throws IOException { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + try { + builder.map(Collections.singletonMap(null, "test")); + fail("write map should have failed"); + } catch(IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("field name cannot be null")); + } + } + + public void testWriteMapValueWithNullKeys() throws IOException { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + try { + builder.value(Collections.singletonMap(null, "test")); + fail("write map should have failed"); + } catch(IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("field name cannot be null")); + } + } + + public void testWriteFieldMapWithNullKeys() throws IOException { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + try { + builder.field("map", Collections.singletonMap(null, "test")); + fail("write map should have failed"); + } catch(IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("field name cannot be null")); + } + } } From db357f078aa1f817fc4078277e0a49b35079281f Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Wed, 16 Dec 2015 16:36:50 +0100 Subject: [PATCH 094/167] Merge pull request #15396 from brwe/java-api-for-synced-flush Add java API for synced flush closes #12812 --- .../elasticsearch/action/ActionModule.java | 3 + .../indices/flush/SyncedFlushAction.java | 44 +++++ .../indices/flush/SyncedFlushRequest.java | 64 +++++++ .../flush/SyncedFlushRequestBuilder.java | 41 +++++ .../indices/flush/SyncedFlushResponse.java} | 97 +++++++++-- .../flush/TransportSyncedFlushAction.java | 52 ++++++ .../client/IndicesAdminClient.java | 30 +++- .../org/elasticsearch/client/Requests.java | 14 +- .../client/support/AbstractClient.java | 19 +++ .../flush/ShardsSyncedFlushResult.java | 56 +++++- .../indices/flush/SyncedFlushService.java | 86 +++++----- .../indices/flush/RestSyncedFlushAction.java | 17 +- .../indices/flush/SyncedFlushUnitTests.java | 84 +++++++-- .../gateway/RecoveryFromGatewayIT.java | 2 +- .../gateway/ReusePeerRecoverySharedTest.java | 161 ++++++++++++++++++ .../elasticsearch/indices/flush/FlushIT.java | 9 +- .../flush/SyncedFlushSingleNodeTests.java | 4 +- .../indices/flush/SyncedFlushUtil.java | 24 +-- .../elasticsearch/test/ESIntegTestCase.java | 19 +-- 19 files changed, 696 insertions(+), 130 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java create mode 100644 core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java create mode 100644 core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java rename core/src/main/java/org/elasticsearch/{indices/flush/IndicesSyncedFlushResult.java => action/admin/indices/flush/SyncedFlushResponse.java} (64%) create mode 100644 core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java rename core/src/test/java/org/elasticsearch/{ => action/admin}/indices/flush/SyncedFlushUnitTests.java (51%) create mode 100644 core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index 88ccb809712..adcb873e838 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -107,6 +107,8 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction; +import org.elasticsearch.action.admin.indices.flush.TransportSyncedFlushAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; @@ -293,6 +295,7 @@ public class ActionModule extends AbstractModule { registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class); registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class); registerAction(FlushAction.INSTANCE, TransportFlushAction.class); + registerAction(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class); registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class); registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class); registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java new file mode 100644 index 00000000000..291fd49c63a --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + + +public class SyncedFlushAction extends Action { + + public static final SyncedFlushAction INSTANCE = new SyncedFlushAction(); + public static final String NAME = "indices:admin/synced_flush"; + + private SyncedFlushAction() { + super(NAME); + } + + @Override + public SyncedFlushResponse newResponse() { + return new SyncedFlushResponse(); + } + + @Override + public SyncedFlushRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new SyncedFlushRequestBuilder(client, this); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java new file mode 100644 index 00000000000..59719fe8877 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.support.broadcast.BroadcastRequest; + +import java.util.Arrays; + +/** + * A synced flush request to sync flush one or more indices. The synced flush process of an index performs a flush + * and writes the same sync id to primary and all copies. + * + *

Best created with {@link org.elasticsearch.client.Requests#syncedFlushRequest(String...)}.

+ * + * @see org.elasticsearch.client.Requests#flushRequest(String...) + * @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest) + * @see SyncedFlushResponse + */ +public class SyncedFlushRequest extends BroadcastRequest { + + public SyncedFlushRequest() { + } + + /** + * Copy constructor that creates a new synced flush request that is a copy of the one provided as an argument. + * The new request will inherit though headers and context from the original request that caused it. + */ + public SyncedFlushRequest(ActionRequest originalRequest) { + super(originalRequest); + } + + /** + * Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will + * be sync flushed. + */ + public SyncedFlushRequest(String... indices) { + super(indices); + } + + + @Override + public String toString() { + return "SyncedFlushRequest{" + + "indices=" + Arrays.toString(indices) + "}"; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java new file mode 100644 index 00000000000..9e407260811 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.ElasticsearchClient; + +public class SyncedFlushRequestBuilder extends ActionRequestBuilder { + + public SyncedFlushRequestBuilder(ElasticsearchClient client, SyncedFlushAction action) { + super(client, action, new SyncedFlushRequest()); + } + + public SyncedFlushRequestBuilder setIndices(String[] indices) { + super.request().indices(indices); + return this; + } + + public SyncedFlushRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { + super.request().indicesOptions(indicesOptions); + return this; + } +} diff --git a/core/src/main/java/org/elasticsearch/indices/flush/IndicesSyncedFlushResult.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java similarity index 64% rename from core/src/main/java/org/elasticsearch/indices/flush/IndicesSyncedFlushResult.java rename to core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java index 435c0d138cd..5925370e5f7 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/IndicesSyncedFlushResult.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java @@ -16,16 +16,25 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.indices.flush; +package org.elasticsearch.action.admin.indices.flush; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; +import org.elasticsearch.indices.flush.ShardsSyncedFlushResult; +import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -34,13 +43,16 @@ import static java.util.Collections.unmodifiableMap; /** * The result of performing a sync flush operation on all shards of multiple indices */ -public class IndicesSyncedFlushResult implements ToXContent { +public class SyncedFlushResponse extends ActionResponse implements ToXContent { - final Map> shardsResultPerIndex; - final ShardCounts shardCounts; + Map> shardsResultPerIndex; + ShardCounts shardCounts; + SyncedFlushResponse() { - public IndicesSyncedFlushResult(Map> shardsResultPerIndex) { + } + + public SyncedFlushResponse(Map> shardsResultPerIndex) { // shardsResultPerIndex is never modified after it is passed to this // constructor so this is safe even though shardsResultPerIndex is a // ConcurrentHashMap @@ -48,17 +60,23 @@ public class IndicesSyncedFlushResult implements ToXContent { this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values())); } - /** total number shards, including replicas, both assigned and unassigned */ + /** + * total number shards, including replicas, both assigned and unassigned + */ public int totalShards() { return shardCounts.total; } - /** total number of shards for which the operation failed */ + /** + * total number of shards for which the operation failed + */ public int failedShards() { return shardCounts.failed; } - /** total number of shards which were successfully sync-flushed */ + /** + * total number of shards which were successfully sync-flushed + */ public int successfulShards() { return shardCounts.successful; } @@ -91,8 +109,8 @@ public class IndicesSyncedFlushResult implements ToXContent { builder.endObject(); continue; } - Map failedShards = shardResults.failedShards(); - for (Map.Entry shardEntry : failedShards.entrySet()) { + Map failedShards = shardResults.failedShards(); + for (Map.Entry shardEntry : failedShards.entrySet()) { builder.startObject(); builder.field(Fields.SHARD, shardResults.shardId().id()); builder.field(Fields.REASON, shardEntry.getValue().failureReason()); @@ -123,11 +141,11 @@ public class IndicesSyncedFlushResult implements ToXContent { return new ShardCounts(total, successful, failed); } - static final class ShardCounts implements ToXContent { + static final class ShardCounts implements ToXContent, Streamable { - public final int total; - public final int successful; - public final int failed; + public int total; + public int successful; + public int failed; ShardCounts(int total, int successful, int failed) { this.total = total; @@ -135,6 +153,10 @@ public class IndicesSyncedFlushResult implements ToXContent { this.failed = failed; } + ShardCounts() { + + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(Fields.TOTAL, total); @@ -142,6 +164,20 @@ public class IndicesSyncedFlushResult implements ToXContent { builder.field(Fields.FAILED, failed); return builder; } + + @Override + public void readFrom(StreamInput in) throws IOException { + total = in.readInt(); + successful = in.readInt(); + failed = in.readInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(total); + out.writeInt(successful); + out.writeInt(failed); + } } static final class Fields { @@ -154,4 +190,37 @@ public class IndicesSyncedFlushResult implements ToXContent { static final XContentBuilderString ROUTING = new XContentBuilderString("routing"); static final XContentBuilderString REASON = new XContentBuilderString("reason"); } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + shardCounts = new ShardCounts(); + shardCounts.readFrom(in); + Map> tmpShardsResultPerIndex = new HashMap<>(); + int numShardsResults = in.readInt(); + for (int i =0 ; i< numShardsResults; i++) { + String index = in.readString(); + List shardsSyncedFlushResults = new ArrayList<>(); + int numShards = in.readInt(); + for (int j =0; j< numShards; j++) { + shardsSyncedFlushResults.add(ShardsSyncedFlushResult.readShardsSyncedFlushResult(in)); + } + tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults); + } + shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + shardCounts.writeTo(out); + out.writeInt(shardsResultPerIndex.size()); + for (Map.Entry> entry : shardsResultPerIndex.entrySet()) { + out.writeString(entry.getKey()); + out.writeInt(entry.getValue().size()); + for (ShardsSyncedFlushResult shardsSyncedFlushResult : entry.getValue()) { + shardsSyncedFlushResult.writeTo(out); + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java new file mode 100644 index 00000000000..3ba354f4629 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.flush.SyncedFlushService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +/** + * Synced flush Action. + */ +public class TransportSyncedFlushAction extends HandledTransportAction { + + SyncedFlushService syncedFlushService; + + @Inject + public TransportSyncedFlushAction(Settings settings, ThreadPool threadPool, + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + SyncedFlushService syncedFlushService) { + super(settings, SyncedFlushAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SyncedFlushRequest::new); + this.syncedFlushService = syncedFlushService; + } + + @Override + protected void doExecute(SyncedFlushRequest request, ActionListener listener) { + syncedFlushService.attemptSyncedFlush(request.indices(), request.indicesOptions(), listener); + } +} diff --git a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java index 15def3b273e..73eed43352b 100644 --- a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java +++ b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java @@ -53,8 +53,8 @@ import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; @@ -82,11 +82,14 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse; @@ -390,6 +393,29 @@ public interface IndicesAdminClient extends ElasticsearchClient { */ FlushRequestBuilder prepareFlush(String... indices); + /** + * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). + * + * @param request The sync flush request + * @return A result future + * @see org.elasticsearch.client.Requests#syncedFlushRequest(String...) + */ + ActionFuture syncedFlush(SyncedFlushRequest request); + + /** + * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). + * + * @param request The sync flush request + * @param listener A listener to be notified with a result + * @see org.elasticsearch.client.Requests#syncedFlushRequest(String...) + */ + void syncedFlush(SyncedFlushRequest request, ActionListener listener); + + /** + * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). + */ + SyncedFlushRequestBuilder prepareSyncedFlush(String... indices); + /** * Explicitly force merge one or more indices into a the number of segments. * diff --git a/core/src/main/java/org/elasticsearch/client/Requests.java b/core/src/main/java/org/elasticsearch/client/Requests.java index 7f0decaba52..063fd10dcfc 100644 --- a/core/src/main/java/org/elasticsearch/client/Requests.java +++ b/core/src/main/java/org/elasticsearch/client/Requests.java @@ -50,6 +50,7 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -131,7 +132,7 @@ public class Requests { public static SuggestRequest suggestRequest(String... indices) { return new SuggestRequest(indices); } - + /** * Creates a search request against one or more indices. Note, the search source must be set either using the * actual JSON search source, or the {@link org.elasticsearch.search.builder.SearchSourceBuilder}. @@ -265,6 +266,17 @@ public class Requests { return new FlushRequest(indices); } + /** + * Creates a synced flush indices request. + * + * @param indices The indices to sync flush. Use null or _all to execute against all indices + * @return The synced flush request + * @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest) + */ + public static SyncedFlushRequest syncedFlushRequest(String... indices) { + return new SyncedFlushRequest(indices); + } + /** * Creates a force merge request. * diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 1b5e8539ac6..ea57901f2b3 100644 --- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -188,6 +188,10 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; @@ -1315,6 +1319,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client return new FlushRequestBuilder(this, FlushAction.INSTANCE).setIndices(indices); } + @Override + public ActionFuture syncedFlush(SyncedFlushRequest request) { + return execute(SyncedFlushAction.INSTANCE, request); + } + + @Override + public void syncedFlush(SyncedFlushRequest request, ActionListener listener) { + execute(SyncedFlushAction.INSTANCE, request, listener); + } + + @Override + public SyncedFlushRequestBuilder prepareSyncedFlush(String... indices) { + return new SyncedFlushRequestBuilder(this, SyncedFlushAction.INSTANCE).setIndices(indices); + } + @Override public void getMappings(GetMappingsRequest request, ActionListener listener) { execute(GetMappingsAction.INSTANCE, request, listener); diff --git a/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java b/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java index f7ae5f94b96..220ce9120e9 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java +++ b/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java @@ -19,8 +19,12 @@ package org.elasticsearch.indices.flush; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.index.shard.ShardId; +import java.io.IOException; import java.util.HashMap; import java.util.Map; @@ -30,15 +34,15 @@ import static java.util.Collections.unmodifiableMap; /** * Result for all copies of a shard */ -public class ShardsSyncedFlushResult { +public class ShardsSyncedFlushResult implements Streamable { private String failureReason; - private Map shardResponses; + private Map shardResponses; private String syncId; private ShardId shardId; // some shards may be unassigned, so we need this as state private int totalShards; - public ShardsSyncedFlushResult() { + private ShardsSyncedFlushResult() { } public ShardId getShardId() { @@ -59,7 +63,7 @@ public class ShardsSyncedFlushResult { /** * success constructor */ - public ShardsSyncedFlushResult(ShardId shardId, String syncId, int totalShards, Map shardResponses) { + public ShardsSyncedFlushResult(ShardId shardId, String syncId, int totalShards, Map shardResponses) { this.failureReason = null; this.shardResponses = unmodifiableMap(new HashMap<>(shardResponses)); this.syncId = syncId; @@ -98,7 +102,7 @@ public class ShardsSyncedFlushResult { */ public int successfulShards() { int i = 0; - for (SyncedFlushService.SyncedFlushResponse result : shardResponses.values()) { + for (SyncedFlushService.ShardSyncedFlushResponse result : shardResponses.values()) { if (result.success()) { i++; } @@ -109,9 +113,9 @@ public class ShardsSyncedFlushResult { /** * @return an array of shard failures */ - public Map failedShards() { - Map failures = new HashMap<>(); - for (Map.Entry result : shardResponses.entrySet()) { + public Map failedShards() { + Map failures = new HashMap<>(); + for (Map.Entry result : shardResponses.entrySet()) { if (result.getValue().success() == false) { failures.put(result.getKey(), result.getValue()); } @@ -123,11 +127,45 @@ public class ShardsSyncedFlushResult { * @return Individual responses for each shard copy with a detailed failure message if the copy failed to perform the synced flush. * Empty if synced flush failed before step three. */ - public Map shardResponses() { + public Map shardResponses() { return shardResponses; } public ShardId shardId() { return shardId; } + + @Override + public void readFrom(StreamInput in) throws IOException { + failureReason = in.readOptionalString(); + int numResponses = in.readInt(); + shardResponses = new HashMap<>(); + for (int i = 0; i < numResponses; i++) { + ShardRouting shardRouting = ShardRouting.readShardRoutingEntry(in); + SyncedFlushService.ShardSyncedFlushResponse response = SyncedFlushService.ShardSyncedFlushResponse.readSyncedFlushResponse(in); + shardResponses.put(shardRouting, response); + } + syncId = in.readOptionalString(); + shardId = ShardId.readShardId(in); + totalShards = in.readInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(failureReason); + out.writeInt(shardResponses.size()); + for (Map.Entry entry : shardResponses.entrySet()) { + entry.getKey().writeTo(out); + entry.getValue().writeTo(out); + } + out.writeOptionalString(syncId); + shardId.writeTo(out); + out.writeInt(totalShards); + } + + public static ShardsSyncedFlushResult readShardsSyncedFlushResult(StreamInput in) throws IOException { + ShardsSyncedFlushResult shardsSyncedFlushResult = new ShardsSyncedFlushResult(); + shardsSyncedFlushResult.readFrom(in); + return shardsSyncedFlushResult; + } } diff --git a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index ad264c2ac05..0918ad2afee 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.flush; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; @@ -81,9 +82,8 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL this.clusterService = clusterService; this.transportService = transportService; this.indexNameExpressionResolver = indexNameExpressionResolver; - - transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, PreSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new PreSyncedFlushTransportHandler()); - transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, SyncedFlushRequest::new, ThreadPool.Names.FLUSH, new SyncedFlushTransportHandler()); + transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, PreShardSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new PreSyncedFlushTransportHandler()); + transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, ShardSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new SyncedFlushTransportHandler()); transportService.registerRequestHandler(IN_FLIGHT_OPS_ACTION_NAME, InFlightOpsRequest::new, ThreadPool.Names.SAME, new InFlightOpCountTransportHandler()); } @@ -109,7 +109,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL * a utility method to perform a synced flush for all shards of multiple indices. see {@link #attemptSyncedFlush(ShardId, ActionListener)} * for more details. */ - public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener listener) { + public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener listener) { final ClusterState state = clusterService.state(); final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices); final Map> results = ConcurrentCollections.newConcurrentMap(); @@ -123,7 +123,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } if (numberOfShards == 0) { - listener.onResponse(new IndicesSyncedFlushResult(results)); + listener.onResponse(new SyncedFlushResponse(results)); return; } final int finalTotalNumberOfShards = totalNumberOfShards; @@ -138,7 +138,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL public void onResponse(ShardsSyncedFlushResult syncedFlushResult) { results.get(index).add(syncedFlushResult); if (countDown.countDown()) { - listener.onResponse(new IndicesSyncedFlushResult(results)); + listener.onResponse(new SyncedFlushResponse(results)); } } @@ -147,7 +147,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL logger.debug("{} unexpected error while executing synced flush", shardId); results.get(index).add(new ShardsSyncedFlushResult(shardId, finalTotalNumberOfShards, e.getMessage())); if (countDown.countDown()) { - listener.onResponse(new IndicesSyncedFlushResult(results)); + listener.onResponse(new SyncedFlushResponse(results)); } } }); @@ -297,33 +297,33 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL void sendSyncRequests(final String syncId, final List shards, ClusterState state, Map expectedCommitIds, final ShardId shardId, final int totalShards, final ActionListener listener) { final CountDown countDown = new CountDown(shards.size()); - final Map results = ConcurrentCollections.newConcurrentMap(); + final Map results = ConcurrentCollections.newConcurrentMap(); for (final ShardRouting shard : shards) { final DiscoveryNode node = state.nodes().get(shard.currentNodeId()); if (node == null) { logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); - results.put(shard, new SyncedFlushResponse("unknown node")); + results.put(shard, new ShardSyncedFlushResponse("unknown node")); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); continue; } final Engine.CommitId expectedCommitId = expectedCommitIds.get(shard.currentNodeId()); if (expectedCommitId == null) { logger.trace("{} can't resolve expected commit id for {}, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); - results.put(shard, new SyncedFlushResponse("no commit id from pre-sync flush")); + results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush")); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); continue; } logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId); - transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new SyncedFlushRequest(shard.shardId(), syncId, expectedCommitId), - new BaseTransportResponseHandler() { + transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new ShardSyncedFlushRequest(shard.shardId(), syncId, expectedCommitId), + new BaseTransportResponseHandler() { @Override - public SyncedFlushResponse newInstance() { - return new SyncedFlushResponse(); + public ShardSyncedFlushResponse newInstance() { + return new ShardSyncedFlushResponse(); } @Override - public void handleResponse(SyncedFlushResponse response) { - SyncedFlushResponse existing = results.put(shard, response); + public void handleResponse(ShardSyncedFlushResponse response) { + ShardSyncedFlushResponse existing = results.put(shard, response); assert existing == null : "got two answers for node [" + node + "]"; // count after the assert so we won't decrement twice in handleException contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); @@ -332,7 +332,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL @Override public void handleException(TransportException exp) { logger.trace("{} error while performing synced flush on [{}], skipping", exp, shardId, shard); - results.put(shard, new SyncedFlushResponse(exp.getMessage())); + results.put(shard, new ShardSyncedFlushResponse(exp.getMessage())); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); } @@ -346,7 +346,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } private void contDownAndSendResponseIfDone(String syncId, List shards, ShardId shardId, int totalShards, - ActionListener listener, CountDown countDown, Map results) { + ActionListener listener, CountDown countDown, Map results) { if (countDown.countDown()) { assert results.size() == shards.size(); listener.onResponse(new ShardsSyncedFlushResult(shardId, syncId, totalShards, results)); @@ -369,7 +369,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } continue; } - transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreSyncedFlushRequest(shard.shardId()), new BaseTransportResponseHandler() { + transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreShardSyncedFlushRequest(shard.shardId()), new BaseTransportResponseHandler() { @Override public PreSyncedFlushResponse newInstance() { return new PreSyncedFlushResponse(); @@ -401,7 +401,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } } - private PreSyncedFlushResponse performPreSyncedFlush(PreSyncedFlushRequest request) { + private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) { IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true); logger.trace("{} performing pre sync flush", request.shardId()); @@ -410,7 +410,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL return new PreSyncedFlushResponse(commitId); } - private SyncedFlushResponse performSyncedFlush(SyncedFlushRequest request) { + private ShardSyncedFlushResponse performSyncedFlush(ShardSyncedFlushRequest request) { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.getShard(request.shardId().id()); logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", request.shardId(), request.syncId(), request.expectedCommitId()); @@ -418,11 +418,11 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result); switch (result) { case SUCCESS: - return new SyncedFlushResponse(); + return new ShardSyncedFlushResponse(); case COMMIT_MISMATCH: - return new SyncedFlushResponse("commit has changed"); + return new ShardSyncedFlushResponse("commit has changed"); case PENDING_OPERATIONS: - return new SyncedFlushResponse("pending operations"); + return new ShardSyncedFlushResponse("pending operations"); default: throw new ElasticsearchException("unknown synced flush result [" + result + "]"); } @@ -439,19 +439,19 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL return new InFlightOpsResponse(opCount); } - public final static class PreSyncedFlushRequest extends TransportRequest { + public final static class PreShardSyncedFlushRequest extends TransportRequest { private ShardId shardId; - public PreSyncedFlushRequest() { + public PreShardSyncedFlushRequest() { } - public PreSyncedFlushRequest(ShardId shardId) { + public PreShardSyncedFlushRequest(ShardId shardId) { this.shardId = shardId; } @Override public String toString() { - return "PreSyncedFlushRequest{" + + return "PreShardSyncedFlushRequest{" + "shardId=" + shardId + '}'; } @@ -504,16 +504,16 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } } - public static final class SyncedFlushRequest extends TransportRequest { + public static final class ShardSyncedFlushRequest extends TransportRequest { private String syncId; private Engine.CommitId expectedCommitId; private ShardId shardId; - public SyncedFlushRequest() { + public ShardSyncedFlushRequest() { } - public SyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) { + public ShardSyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) { this.expectedCommitId = expectedCommitId; this.shardId = shardId; this.syncId = syncId; @@ -549,7 +549,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL @Override public String toString() { - return "SyncedFlushRequest{" + + return "ShardSyncedFlushRequest{" + "shardId=" + shardId + ",syncId='" + syncId + '\'' + '}'; @@ -559,18 +559,18 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL /** * Response for third step of synced flush (writing the sync id) for one shard copy */ - public static final class SyncedFlushResponse extends TransportResponse { + public static final class ShardSyncedFlushResponse extends TransportResponse { /** * a non null value indicates a failure to sync flush. null means success */ String failureReason; - public SyncedFlushResponse() { + public ShardSyncedFlushResponse() { failureReason = null; } - public SyncedFlushResponse(String failureReason) { + public ShardSyncedFlushResponse(String failureReason) { this.failureReason = failureReason; } @@ -596,11 +596,17 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL @Override public String toString() { - return "SyncedFlushResponse{" + + return "ShardSyncedFlushResponse{" + "success=" + success() + ", failureReason='" + failureReason + '\'' + '}'; } + + public static ShardSyncedFlushResponse readSyncedFlushResponse(StreamInput in) throws IOException { + ShardSyncedFlushResponse shardSyncedFlushResponse = new ShardSyncedFlushResponse(); + shardSyncedFlushResponse.readFrom(in); + return shardSyncedFlushResponse; + } } @@ -677,18 +683,18 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } } - private final class PreSyncedFlushTransportHandler implements TransportRequestHandler { + private final class PreSyncedFlushTransportHandler implements TransportRequestHandler { @Override - public void messageReceived(PreSyncedFlushRequest request, TransportChannel channel) throws Exception { + public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel) throws Exception { channel.sendResponse(performPreSyncedFlush(request)); } } - private final class SyncedFlushTransportHandler implements TransportRequestHandler { + private final class SyncedFlushTransportHandler implements TransportRequestHandler { @Override - public void messageReceived(SyncedFlushRequest request, TransportChannel channel) throws Exception { + public void messageReceived(ShardSyncedFlushRequest request, TransportChannel channel) throws Exception { channel.sendResponse(performSyncedFlush(request)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java index 9a3f844abb1..0b8ffcf94da 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java @@ -19,14 +19,14 @@ package org.elasticsearch.rest.action.admin.indices.flush; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.indices.flush.IndicesSyncedFlushResult; -import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.rest.*; import org.elasticsearch.rest.action.support.RestBuilderListener; @@ -38,12 +38,9 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; */ public class RestSyncedFlushAction extends BaseRestHandler { - private final SyncedFlushService syncedFlushService; - @Inject - public RestSyncedFlushAction(Settings settings, RestController controller, Client client, SyncedFlushService syncedFlushService) { + public RestSyncedFlushAction(Settings settings, RestController controller, Client client) { super(settings, controller, client); - this.syncedFlushService = syncedFlushService; controller.registerHandler(POST, "/_flush/synced", this); controller.registerHandler(POST, "/{index}/_flush/synced", this); @@ -53,12 +50,12 @@ public class RestSyncedFlushAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { - String[] indices = Strings.splitStringByCommaToArray(request.param("index")); IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.lenientExpandOpen()); - - syncedFlushService.attemptSyncedFlush(indices, indicesOptions, new RestBuilderListener(channel) { + SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); + syncedFlushRequest.indicesOptions(indicesOptions); + client.admin().indices().syncedFlush(syncedFlushRequest, new RestBuilderListener(channel) { @Override - public RestResponse buildResponse(IndicesSyncedFlushResult results, XContentBuilder builder) throws Exception { + public RestResponse buildResponse(SyncedFlushResponse results, XContentBuilder builder) throws Exception { builder.startObject(); results.toXContent(builder, request); builder.endObject(); diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUnitTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java similarity index 51% rename from core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUnitTests.java rename to core/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java index 19cce93c6e4..dfc6ea67c49 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUnitTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java @@ -17,16 +17,20 @@ * under the License. */ -package org.elasticsearch.indices.flush; +package org.elasticsearch.action.admin.indices.flush; import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntMap; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse.ShardCounts; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.flush.IndicesSyncedFlushResult.ShardCounts; -import org.elasticsearch.indices.flush.SyncedFlushService.SyncedFlushResponse; +import org.elasticsearch.indices.flush.ShardsSyncedFlushResult; +import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; @@ -42,14 +46,11 @@ import static org.hamcrest.Matchers.hasSize; public class SyncedFlushUnitTests extends ESTestCase { - private static class TestPlan { - public ShardCounts totalCounts; - public Map countsPerIndex = new HashMap<>(); + public SyncedFlushResponse.ShardCounts totalCounts; + public Map countsPerIndex = new HashMap<>(); public ObjectIntMap expectedFailuresPerIndex = new ObjectIntHashMap<>(); - - public IndicesSyncedFlushResult result; - + public SyncedFlushResponse result; } public void testIndicesSyncedFlushResult() throws IOException { @@ -76,6 +77,56 @@ public class SyncedFlushUnitTests extends ESTestCase { } } + public void testResponseStreaming() throws IOException { + final TestPlan testPlan = createTestPlan(); + assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total)); + assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful)); + assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed)); + assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); + BytesStreamOutput out = new BytesStreamOutput(); + testPlan.result.writeTo(out); + StreamInput in = StreamInput.wrap(out.bytes()); + SyncedFlushResponse readResponse = new SyncedFlushResponse(); + readResponse.readFrom(in); + assertThat(readResponse.totalShards(), equalTo(testPlan.totalCounts.total)); + assertThat(readResponse.successfulShards(), equalTo(testPlan.totalCounts.successful)); + assertThat(readResponse.failedShards(), equalTo(testPlan.totalCounts.failed)); + assertThat(readResponse.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); + assertThat(readResponse.shardsResultPerIndex.size(), equalTo(testPlan.result.getShardsResultPerIndex().size())); + for (Map.Entry> entry : readResponse.getShardsResultPerIndex().entrySet()) { + List originalShardsResults = testPlan.result.getShardsResultPerIndex().get(entry.getKey()); + assertNotNull(originalShardsResults); + List readShardsResults = entry.getValue(); + assertThat(readShardsResults.size(), equalTo(originalShardsResults.size())); + for (int i = 0; i < readShardsResults.size(); i++) { + ShardsSyncedFlushResult originalShardResult = originalShardsResults.get(i); + ShardsSyncedFlushResult readShardResult = readShardsResults.get(i); + assertThat(originalShardResult.failureReason(), equalTo(readShardResult.failureReason())); + assertThat(originalShardResult.failed(), equalTo(readShardResult.failed())); + assertThat(originalShardResult.getShardId(), equalTo(readShardResult.getShardId())); + assertThat(originalShardResult.successfulShards(), equalTo(readShardResult.successfulShards())); + assertThat(originalShardResult.syncId(), equalTo(readShardResult.syncId())); + assertThat(originalShardResult.totalShards(), equalTo(readShardResult.totalShards())); + assertThat(originalShardResult.failedShards().size(), equalTo(readShardResult.failedShards().size())); + for (Map.Entry shardEntry : originalShardResult.failedShards().entrySet()) { + SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.failedShards().get(shardEntry.getKey()); + assertNotNull(readShardResponse); + SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); + assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason())); + assertThat(originalShardResponse.success(), equalTo(readShardResponse.success())); + } + assertThat(originalShardResult.shardResponses().size(), equalTo(readShardResult.shardResponses().size())); + for (Map.Entry shardEntry : originalShardResult.shardResponses().entrySet()) { + SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.shardResponses().get(shardEntry.getKey()); + assertNotNull(readShardResponse); + SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); + assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason())); + assertThat(originalShardResponse.success(), equalTo(readShardResponse.success())); + } + } + } + } + private void assertShardCount(String name, Map header, ShardCounts expectedCounts) { assertThat(name + " has unexpected total count", (Integer) header.get("total"), equalTo(expectedCounts.total)); assertThat(name + " has unexpected successful count", (Integer) header.get("successful"), equalTo(expectedCounts.successful)); @@ -105,32 +156,33 @@ public class SyncedFlushUnitTests extends ESTestCase { failures++; shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure")); } else { - Map shardResponses = new HashMap<>(); + Map shardResponses = new HashMap<>(); for (int copy = 0; copy < replicas + 1; copy++) { final ShardRouting shardRouting = TestShardRouting.newShardRouting(index, shard, "node_" + shardId + "_" + copy, null, - copy == 0, ShardRoutingState.STARTED, 0); + copy == 0, ShardRoutingState.STARTED, 0); if (randomInt(5) < 2) { // shard copy failure failed++; failures++; - shardResponses.put(shardRouting, new SyncedFlushResponse("copy failure " + shardId)); + shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId)); } else { successful++; - shardResponses.put(shardRouting, new SyncedFlushResponse()); + shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse()); } } shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses)); } } indicesResults.put(index, shardsResults); - testPlan.countsPerIndex.put(index, new ShardCounts(shards * (replicas + 1), successful, failed)); + testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed)); testPlan.expectedFailuresPerIndex.put(index, failures); totalFailed += failed; totalShards += shards * (replicas + 1); totalSuccesful += successful; } - testPlan.result = new IndicesSyncedFlushResult(indicesResults); - testPlan.totalCounts = new ShardCounts(totalShards, totalSuccesful, totalFailed); + testPlan.result = new SyncedFlushResponse(indicesResults); + testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccesful, totalFailed); return testPlan; } + } diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 01c76b465a9..2bfa885fa79 100644 --- a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -369,7 +369,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { ensureGreen(); } else { logger.info("--> trying to sync flush"); - assertEquals(SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test").failedShards(), 0); + assertEquals(client().admin().indices().prepareSyncedFlush("test").get().failedShards(), 0); assertSyncIdsNotNull(); } diff --git a/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java new file mode 100644 index 00000000000..5e16fcebe86 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java @@ -0,0 +1,161 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.indices.flush.SyncedFlushUtil; +import org.elasticsearch.indices.recovery.RecoveryState; + +import static org.elasticsearch.common.settings.Settings.settingsBuilder; +import static org.elasticsearch.test.ESIntegTestCase.client; +import static org.elasticsearch.test.ESIntegTestCase.internalCluster; +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; + +/** + * Test of file reuse on recovery shared between integration tests and backwards + * compatibility tests. + */ +public class ReusePeerRecoverySharedTest { + /** + * Test peer reuse on recovery. This is shared between RecoverFromGatewayIT + * and RecoveryBackwardsCompatibilityIT. + * + * @param indexSettings + * settings for the index to test + * @param restartCluster + * runnable that will restart the cluster under test + * @param logger + * logger for logging + * @param useSyncIds + * should this use synced flush? can't use synced from in the bwc + * tests + */ + public static void testCase(Settings indexSettings, Runnable restartCluster, ESLogger logger, boolean useSyncIds) { + /* + * prevent any rebalance actions during the peer recovery if we run into + * a relocation the reuse count will be 0 and this fails the test. We + * are testing here if we reuse the files on disk after full restarts + * for replicas. + */ + assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put(indexSettings) + .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE))); + client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); + logger.info("--> indexing docs"); + for (int i = 0; i < 1000; i++) { + client().prepareIndex("test", "type").setSource("field", "value").execute().actionGet(); + if ((i % 200) == 0) { + client().admin().indices().prepareFlush().execute().actionGet(); + } + } + if (randomBoolean()) { + client().admin().indices().prepareFlush().execute().actionGet(); + } + logger.info("--> running cluster health"); + client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); + // just wait for merges + client().admin().indices().prepareForceMerge("test").setMaxNumSegments(100).get(); + client().admin().indices().prepareFlush().setWaitIfOngoing(true).setForce(true).get(); + + if (useSyncIds == false) { + logger.info("--> disabling allocation while the cluster is shut down"); + + // Disable allocations while we are closing nodes + client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE)).get(); + logger.info("--> full cluster restart"); + restartCluster.run(); + + logger.info("--> waiting for cluster to return to green after first shutdown"); + client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); + } else { + logger.info("--> trying to sync flush"); + assertEquals(client().admin().indices().prepareSyncedFlush("test").get().failedShards(), 0); + assertSyncIdsNotNull(); + } + + logger.info("--> disabling allocation while the cluster is shut down", useSyncIds ? "" : " a second time"); + // Disable allocations while we are closing nodes + client().admin().cluster().prepareUpdateSettings().setTransientSettings( + settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE)) + .get(); + logger.info("--> full cluster restart"); + restartCluster.run(); + + logger.info("--> waiting for cluster to return to green after {}shutdown", useSyncIds ? "" : "second "); + client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); + + if (useSyncIds) { + assertSyncIdsNotNull(); + } + RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").get(); + for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { + long recovered = 0; + for (RecoveryState.File file : recoveryState.getIndex().fileDetails()) { + if (file.name().startsWith("segments")) { + recovered += file.length(); + } + } + if (!recoveryState.getPrimary() && (useSyncIds == false)) { + logger.info("--> replica shard {} recovered from {} to {}, recovered {}, reuse {}", recoveryState.getShardId().getId(), + recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(), + recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes()); + assertThat("no bytes should be recovered", recoveryState.getIndex().recoveredBytes(), equalTo(recovered)); + assertThat("data should have been reused", recoveryState.getIndex().reusedBytes(), greaterThan(0l)); + // we have to recover the segments file since we commit the translog ID on engine startup + assertThat("all bytes should be reused except of the segments file", recoveryState.getIndex().reusedBytes(), + equalTo(recoveryState.getIndex().totalBytes() - recovered)); + assertThat("no files should be recovered except of the segments file", recoveryState.getIndex().recoveredFileCount(), + equalTo(1)); + assertThat("all files should be reused except of the segments file", recoveryState.getIndex().reusedFileCount(), + equalTo(recoveryState.getIndex().totalFileCount() - 1)); + assertThat("> 0 files should be reused", recoveryState.getIndex().reusedFileCount(), greaterThan(0)); + } else { + if (useSyncIds && !recoveryState.getPrimary()) { + logger.info("--> replica shard {} recovered from {} to {} using sync id, recovered {}, reuse {}", + recoveryState.getShardId().getId(), recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(), + recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes()); + } + assertThat(recoveryState.getIndex().recoveredBytes(), equalTo(0l)); + assertThat(recoveryState.getIndex().reusedBytes(), equalTo(recoveryState.getIndex().totalBytes())); + assertThat(recoveryState.getIndex().recoveredFileCount(), equalTo(0)); + assertThat(recoveryState.getIndex().reusedFileCount(), equalTo(recoveryState.getIndex().totalFileCount())); + } + } + } + + public static void assertSyncIdsNotNull() { + IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); + for (ShardStats shardStats : indexStats.getShards()) { + assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index aa8c9f18c01..9ff0df4d390 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; @@ -99,7 +100,7 @@ public class FlushIT extends ESIntegTestCase { result = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), new ShardId("test", 0)); } else { logger.info("--> sync flushing index [test]"); - IndicesSyncedFlushResult indicesResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test"); + SyncedFlushResponse indicesResult = client().admin().indices().prepareSyncedFlush("test").get(); result = indicesResult.getShardsResultPerIndex().get("test").get(0); } assertFalse(result.failed()); @@ -171,7 +172,7 @@ public class FlushIT extends ESIntegTestCase { assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); } logger.info("--> trying sync flush"); - IndicesSyncedFlushResult syncedFlushResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test"); + SyncedFlushResponse syncedFlushResult = client().admin().indices().prepareSyncedFlush("test").get(); logger.info("--> sync flush done"); stop.set(true); indexingThread.join(); @@ -191,7 +192,7 @@ public class FlushIT extends ESIntegTestCase { for (final ShardStats shardStats : shardsStats) { for (final ShardsSyncedFlushResult shardResult : syncedFlushResults) { if (shardStats.getShardRouting().getId() == shardResult.shardId().getId()) { - for (Map.Entry singleResponse : shardResult.shardResponses().entrySet()) { + for (Map.Entry singleResponse : shardResult.shardResponses().entrySet()) { if (singleResponse.getKey().currentNodeId().equals(shardStats.getShardRouting().currentNodeId())) { if (singleResponse.getValue().success()) { logger.info("{} sync flushed on node {}", singleResponse.getKey().shardId(), singleResponse.getKey().currentNodeId()); @@ -212,7 +213,7 @@ public class FlushIT extends ESIntegTestCase { prepareCreate("test").setSettings(Settings.builder().put("index.routing.allocation.include._name", "nonexistent")).get(); // this should not hang but instead immediately return with empty result set - List shardsResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test").getShardsResultPerIndex().get("test"); + List shardsResult = client().admin().indices().prepareSyncedFlush("test").get().getShardsResultPerIndex().get("test"); // just to make sure the test actually tests the right thing int numShards = client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test").getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1); assertThat(shardsResult.size(), equalTo(numShards)); diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java index 1a4bf8fd3f7..e4c9cb8a7ef 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java @@ -98,7 +98,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { assertNotNull(syncedFlushResult); assertEquals(1, syncedFlushResult.successfulShards()); assertEquals(1, syncedFlushResult.totalShards()); - SyncedFlushService.SyncedFlushResponse response = syncedFlushResult.shardResponses().values().iterator().next(); + SyncedFlushService.ShardSyncedFlushResponse response = syncedFlushResult.shardResponses().values().iterator().next(); assertTrue(response.success()); } @@ -157,7 +157,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { assertNull(listener.result); assertEquals("no such index", listener.error.getMessage()); } - + public void testFailAfterIntermediateCommit() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java index fef6c23231e..fdabaf6b5a8 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java @@ -20,6 +20,7 @@ package org.elasticsearch.indices.flush; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.ShardRouting; @@ -31,6 +32,9 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; +import static org.elasticsearch.test.ESIntegTestCase.client; +import static org.elasticsearch.test.ESTestCase.randomBoolean; + /** Utils for SyncedFlush */ public class SyncedFlushUtil { @@ -38,25 +42,6 @@ public class SyncedFlushUtil { } - /** - * Blocking single index version of {@link SyncedFlushService#attemptSyncedFlush(String[], IndicesOptions, ActionListener)} - */ - public static IndicesSyncedFlushResult attemptSyncedFlush(InternalTestCluster cluster, String index) { - SyncedFlushService service = cluster.getInstance(SyncedFlushService.class); - LatchedListener listener = new LatchedListener(); - service.attemptSyncedFlush(new String[]{index}, IndicesOptions.lenientExpandOpen(), listener); - try { - listener.latch.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - if (listener.error != null) { - throw ExceptionsHelper.convertToElastic(listener.error); - } - return listener.result; - } - - /** * Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)} */ @@ -109,5 +94,4 @@ public class SyncedFlushUtil { } return listener.result; } - } diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 37a8fd388b8..ba1c16d61ab 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -20,15 +20,12 @@ package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.RandomizedTest; -import com.carrotsearch.randomizedtesting.Randomness; import com.carrotsearch.randomizedtesting.annotations.TestGroup; import com.carrotsearch.randomizedtesting.generators.RandomInts; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.http.impl.client.HttpClients; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.SuppressForbidden; import org.apache.lucene.util.TestUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -36,7 +33,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -44,8 +40,9 @@ import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; @@ -63,6 +60,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -137,9 +135,7 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import java.net.InetAddress; import java.net.InetSocketAddress; -import java.net.MalformedURLException; import java.net.URL; -import java.net.UnknownHostException; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; @@ -162,6 +158,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BooleanSupplier; +import static org.elasticsearch.client.Requests.syncedFlushRequest; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.settings.Settings.settingsBuilder; @@ -1499,13 +1496,13 @@ public abstract class ESIntegTestCase extends ESTestCase { if (randomBoolean()) { client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute( new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); - } else if (isInternalCluster()) { - internalCluster().getInstance(SyncedFlushService.class).attemptSyncedFlush(indices, IndicesOptions.lenientExpandOpen(), - new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); + } else { + client().admin().indices().syncedFlush(syncedFlushRequest(indices).indicesOptions(IndicesOptions.lenientExpandOpen()), + new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } } else if (rarely()) { client().admin().indices().prepareForceMerge(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).setMaxNumSegments(between(1, 10)).setFlush(maybeFlush && randomBoolean()).execute( - new LatchedActionListener(newLatch(inFlightAsyncOperations))); + new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } } while (inFlightAsyncOperations.size() > MAX_IN_FLIGHT_ASYNC_INDEXES) { From 88a010468731e354f3b95c2b44526227dab64940 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 16 Dec 2015 17:36:14 +0100 Subject: [PATCH 095/167] [TEST] ScriptServiceTests only test single ScriptEngineService --- .../script/ScriptServiceTests.java | 87 +++++++++---------- 1 file changed, 41 insertions(+), 46 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 5abdc4f79bb..e81b0894eca 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -33,6 +33,7 @@ import org.junit.Before; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -48,7 +49,7 @@ import static org.hamcrest.Matchers.sameInstance; public class ScriptServiceTests extends ESTestCase { private ResourceWatcherService resourceWatcherService; - private Set scriptEngineServices; + private ScriptEngineService scriptEngineService; private Map scriptEnginesByLangMap; private ScriptContextRegistry scriptContextRegistry; private ScriptContext[] scriptContexts; @@ -72,8 +73,8 @@ public class ScriptServiceTests extends ESTestCase { .put("path.conf", genericConfigFolder) .build(); resourceWatcherService = new ResourceWatcherService(baseSettings, null); - scriptEngineServices = newHashSet(new TestEngineService()); - scriptEnginesByLangMap = ScriptModesTests.buildScriptEnginesByLangMap(scriptEngineServices); + scriptEngineService = new TestEngineService(); + scriptEnginesByLangMap = ScriptModesTests.buildScriptEnginesByLangMap(Collections.singleton(scriptEngineService)); //randomly register custom script contexts int randomInt = randomIntBetween(0, 3); //prevent duplicates using map @@ -100,7 +101,7 @@ public class ScriptServiceTests extends ESTestCase { private void buildScriptService(Settings additionalSettings) throws IOException { Settings finalSettings = Settings.builder().put(baseSettings).put(additionalSettings).build(); Environment environment = new Environment(finalSettings); - scriptService = new ScriptService(finalSettings, environment, scriptEngineServices, resourceWatcherService, scriptContextRegistry) { + scriptService = new ScriptService(finalSettings, environment, Collections.singleton(scriptEngineService), resourceWatcherService, scriptContextRegistry) { @Override String getScriptFromIndex(String scriptLang, String id, HasContextAndHeaders headersContext) { //mock the script that gets retrieved from an index @@ -230,8 +231,6 @@ public class ScriptServiceTests extends ESTestCase { for (int i = 0; i < numEngineSettings; i++) { String settingKey; do { - ScriptEngineService[] scriptEngineServices = this.scriptEngineServices.toArray(new ScriptEngineService[this.scriptEngineServices.size()]); - ScriptEngineService scriptEngineService = randomFrom(scriptEngineServices); ScriptType scriptType = randomFrom(ScriptType.values()); ScriptContext scriptContext = randomFrom(this.scriptContexts); settingKey = scriptEngineService.types()[0] + "." + scriptType + "." + scriptContext.getKey(); @@ -288,40 +287,38 @@ public class ScriptServiceTests extends ESTestCase { buildScriptService(builder.build()); createFileScripts("groovy", "expression", "mustache", "test"); - for (ScriptEngineService scriptEngineService : scriptEngineServices) { - for (ScriptType scriptType : ScriptType.values()) { - //make sure file scripts have a different name than inline ones. - //Otherwise they are always considered file ones as they can be found in the static cache. - String script = scriptType == ScriptType.FILE ? "file_script" : "script"; - for (ScriptContext scriptContext : this.scriptContexts) { - //fallback mechanism: 1) engine specific settings 2) op based settings 3) source based settings - ScriptMode scriptMode = engineSettings.get(scriptEngineService.types()[0] + "." + scriptType + "." + scriptContext.getKey()); - if (scriptMode == null) { - scriptMode = scriptContextSettings.get(scriptContext.getKey()); - } - if (scriptMode == null) { - scriptMode = scriptSourceSettings.get(scriptType); - } - if (scriptMode == null) { - scriptMode = DEFAULT_SCRIPT_MODES.get(scriptType); - } + for (ScriptType scriptType : ScriptType.values()) { + //make sure file scripts have a different name than inline ones. + //Otherwise they are always considered file ones as they can be found in the static cache. + String script = scriptType == ScriptType.FILE ? "file_script" : "script"; + for (ScriptContext scriptContext : this.scriptContexts) { + //fallback mechanism: 1) engine specific settings 2) op based settings 3) source based settings + ScriptMode scriptMode = engineSettings.get(scriptEngineService.types()[0] + "." + scriptType + "." + scriptContext.getKey()); + if (scriptMode == null) { + scriptMode = scriptContextSettings.get(scriptContext.getKey()); + } + if (scriptMode == null) { + scriptMode = scriptSourceSettings.get(scriptType); + } + if (scriptMode == null) { + scriptMode = DEFAULT_SCRIPT_MODES.get(scriptType); + } - for (String lang : scriptEngineService.types()) { - switch (scriptMode) { - case ON: + for (String lang : scriptEngineService.types()) { + switch (scriptMode) { + case ON: + assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders); + break; + case OFF: + assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders); + break; + case SANDBOX: + if (scriptEngineService.sandboxed()) { assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders); - break; - case OFF: + } else { assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders); - break; - case SANDBOX: - if (scriptEngineService.sandboxed()) { - assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders); - } else { - assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders); - } - break; - } + } + break; } } } @@ -338,15 +335,13 @@ public class ScriptServiceTests extends ESTestCase { unknownContext = randomAsciiOfLength(randomIntBetween(1, 30)); } while(scriptContextRegistry.isSupportedContext(new ScriptContext.Plugin(pluginName, unknownContext))); - for (ScriptEngineService scriptEngineService : scriptEngineServices) { - for (String type : scriptEngineService.types()) { - try { - scriptService.compile(new Script("test", randomFrom(ScriptType.values()), type, null), new ScriptContext.Plugin( - pluginName, unknownContext), contextAndHeaders); - fail("script compilation should have been rejected"); - } catch(IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("script context [" + pluginName + "_" + unknownContext + "] not supported")); - } + for (String type : scriptEngineService.types()) { + try { + scriptService.compile(new Script("test", randomFrom(ScriptType.values()), type, null), new ScriptContext.Plugin( + pluginName, unknownContext), contextAndHeaders); + fail("script compilation should have been rejected"); + } catch(IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("script context [" + pluginName + "_" + unknownContext + "] not supported")); } } } From 53b3cd83a573648e153f6548c2b8c085b864186f Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 24 Nov 2015 20:39:26 -0500 Subject: [PATCH 096/167] Tribe nodes should apply cluster state updates in batches This commit applies the general mechanism for applying cluster state updates in batches to tribe nodes. Relates #14899, relates #14725 --- .../org/elasticsearch/tribe/TribeService.java | 275 ++++++++++-------- 1 file changed, 157 insertions(+), 118 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index 87da13fad4a..62647ad3829 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -20,14 +20,14 @@ package org.elasticsearch.tribe; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -208,142 +209,180 @@ public class TribeService extends AbstractLifecycleComponent { } } - class TribeClusterStateListener implements ClusterStateListener { + class TribeClusterStateListener implements ClusterStateListener { private final String tribeName; + private final TribeNodeClusterStateTaskExecutor executor; TribeClusterStateListener(Node tribeNode) { - this.tribeName = tribeNode.settings().get(TRIBE_NAME); + String tribeName = tribeNode.settings().get(TRIBE_NAME); + this.tribeName = tribeName; + executor = new TribeNodeClusterStateTaskExecutor(tribeName); } @Override public void clusterChanged(final ClusterChangedEvent event) { logger.debug("[{}] received cluster event, [{}]", tribeName, event.source()); - clusterService.submitStateUpdateTask("cluster event from " + tribeName + ", " + event.source(), new ClusterStateUpdateTask() { - @Override - public boolean runOnlyOnMaster() { - return false; + clusterService.submitStateUpdateTask( + "cluster event from " + tribeName + ", " + event.source(), + event, + ClusterStateTaskConfig.build(Priority.NORMAL), + executor, + (source, t) -> logger.warn("failed to process [{}]", t, source)); + } + } + + class TribeNodeClusterStateTaskExecutor implements ClusterStateTaskExecutor { + private final String tribeName; + + TribeNodeClusterStateTaskExecutor(String tribeName) { + this.tribeName = tribeName; + } + + + @Override + public boolean runOnlyOnMaster() { + return false; + } + + @Override + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + ClusterState accumulator = ClusterState.builder(currentState).build(); + BatchResult.Builder builder = BatchResult.builder(); + + try { + // we only need to apply the latest cluster state update + accumulator = applyUpdate(accumulator, tasks.get(tasks.size() - 1)); + builder.successes(tasks); + } catch (Throwable t) { + builder.failures(tasks, t); + } + + return builder.build(accumulator); + } + + private ClusterState applyUpdate(ClusterState currentState, ClusterChangedEvent task) { + boolean clusterStateChanged = false; + ClusterState tribeState = task.state(); + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes()); + // -- merge nodes + // go over existing nodes, and see if they need to be removed + for (DiscoveryNode discoNode : currentState.nodes()) { + String markedTribeName = discoNode.attributes().get(TRIBE_NAME); + if (markedTribeName != null && markedTribeName.equals(tribeName)) { + if (tribeState.nodes().get(discoNode.id()) == null) { + clusterStateChanged = true; + logger.info("[{}] removing node [{}]", tribeName, discoNode); + nodes.remove(discoNode.id()); + } } + } + // go over tribe nodes, and see if they need to be added + for (DiscoveryNode tribe : tribeState.nodes()) { + if (currentState.nodes().get(tribe.id()) == null) { + // a new node, add it, but also add the tribe name to the attributes + Map tribeAttr = new HashMap<>(); + for (ObjectObjectCursor attr : tribe.attributes()) { + tribeAttr.put(attr.key, attr.value); + } + tribeAttr.put(TRIBE_NAME, tribeName); + DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version()); + clusterStateChanged = true; + logger.info("[{}] adding node [{}]", tribeName, discoNode); + nodes.put(discoNode); + } + } - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - ClusterState tribeState = event.state(); - DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes()); - // -- merge nodes - // go over existing nodes, and see if they need to be removed - for (DiscoveryNode discoNode : currentState.nodes()) { - String markedTribeName = discoNode.attributes().get(TRIBE_NAME); - if (markedTribeName != null && markedTribeName.equals(tribeName)) { - if (tribeState.nodes().get(discoNode.id()) == null) { - logger.info("[{}] removing node [{}]", tribeName, discoNode); - nodes.remove(discoNode.id()); - } - } + // -- merge metadata + ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); + MetaData.Builder metaData = MetaData.builder(currentState.metaData()); + RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); + // go over existing indices, and see if they need to be removed + for (IndexMetaData index : currentState.metaData()) { + String markedTribeName = index.getSettings().get(TRIBE_NAME); + if (markedTribeName != null && markedTribeName.equals(tribeName)) { + IndexMetaData tribeIndex = tribeState.metaData().index(index.getIndex()); + clusterStateChanged = true; + if (tribeIndex == null || tribeIndex.getState() == IndexMetaData.State.CLOSE) { + logger.info("[{}] removing index [{}]", tribeName, index.getIndex()); + removeIndex(blocks, metaData, routingTable, index); + } else { + // always make sure to update the metadata and routing table, in case + // there are changes in them (new mapping, shards moving from initializing to started) + routingTable.add(tribeState.routingTable().index(index.getIndex())); + Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); + metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); } - // go over tribe nodes, and see if they need to be added - for (DiscoveryNode tribe : tribeState.nodes()) { - if (currentState.nodes().get(tribe.id()) == null) { - // a new node, add it, but also add the tribe name to the attributes - Map tribeAttr = new HashMap<>(); - for (ObjectObjectCursor attr : tribe.attributes()) { - tribeAttr.put(attr.key, attr.value); - } - tribeAttr.put(TRIBE_NAME, tribeName); - DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version()); - logger.info("[{}] adding node [{}]", tribeName, discoNode); - nodes.put(discoNode); - } + } + } + // go over tribe one, and see if they need to be added + for (IndexMetaData tribeIndex : tribeState.metaData()) { + // if there is no routing table yet, do nothing with it... + IndexRoutingTable table = tribeState.routingTable().index(tribeIndex.getIndex()); + if (table == null) { + continue; + } + final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex()); + if (indexMetaData == null) { + if (!droppedIndices.contains(tribeIndex.getIndex())) { + // a new index, add it, and add the tribe name as a setting + clusterStateChanged = true; + logger.info("[{}] adding index [{}]", tribeName, tribeIndex.getIndex()); + addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); } - - // -- merge metadata - ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - MetaData.Builder metaData = MetaData.builder(currentState.metaData()); - RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); - // go over existing indices, and see if they need to be removed - for (IndexMetaData index : currentState.metaData()) { - String markedTribeName = index.getSettings().get(TRIBE_NAME); - if (markedTribeName != null && markedTribeName.equals(tribeName)) { - IndexMetaData tribeIndex = tribeState.metaData().index(index.getIndex()); - if (tribeIndex == null || tribeIndex.getState() == IndexMetaData.State.CLOSE) { - logger.info("[{}] removing index [{}]", tribeName, index.getIndex()); - removeIndex(blocks, metaData, routingTable, index); - } else { - // always make sure to update the metadata and routing table, in case - // there are changes in them (new mapping, shards moving from initializing to started) - routingTable.add(tribeState.routingTable().index(index.getIndex())); - Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); - metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); - } - } - } - // go over tribe one, and see if they need to be added - for (IndexMetaData tribeIndex : tribeState.metaData()) { - // if there is no routing table yet, do nothing with it... - IndexRoutingTable table = tribeState.routingTable().index(tribeIndex.getIndex()); - if (table == null) { - continue; - } - final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex()); - if (indexMetaData == null) { - if (!droppedIndices.contains(tribeIndex.getIndex())) { - // a new index, add it, and add the tribe name as a setting - logger.info("[{}] adding index [{}]", tribeName, tribeIndex.getIndex()); + } else { + String existingFromTribe = indexMetaData.getSettings().get(TRIBE_NAME); + if (!tribeName.equals(existingFromTribe)) { + // we have a potential conflict on index names, decide what to do... + if (ON_CONFLICT_ANY.equals(onConflict)) { + // we chose any tribe, carry on + } else if (ON_CONFLICT_DROP.equals(onConflict)) { + // drop the indices, there is a conflict + clusterStateChanged = true; + logger.info("[{}] dropping index [{}] due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); + removeIndex(blocks, metaData, routingTable, tribeIndex); + droppedIndices.add(tribeIndex.getIndex()); + } else if (onConflict.startsWith(ON_CONFLICT_PREFER)) { + // on conflict, prefer a tribe... + String preferredTribeName = onConflict.substring(ON_CONFLICT_PREFER.length()); + if (tribeName.equals(preferredTribeName)) { + // the new one is hte preferred one, replace... + clusterStateChanged = true; + logger.info("[{}] adding index [{}], preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); + removeIndex(blocks, metaData, routingTable, tribeIndex); addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); - } - } else { - String existingFromTribe = indexMetaData.getSettings().get(TRIBE_NAME); - if (!tribeName.equals(existingFromTribe)) { - // we have a potential conflict on index names, decide what to do... - if (ON_CONFLICT_ANY.equals(onConflict)) { - // we chose any tribe, carry on - } else if (ON_CONFLICT_DROP.equals(onConflict)) { - // drop the indices, there is a conflict - logger.info("[{}] dropping index [{}] due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); - removeIndex(blocks, metaData, routingTable, tribeIndex); - droppedIndices.add(tribeIndex.getIndex()); - } else if (onConflict.startsWith(ON_CONFLICT_PREFER)) { - // on conflict, prefer a tribe... - String preferredTribeName = onConflict.substring(ON_CONFLICT_PREFER.length()); - if (tribeName.equals(preferredTribeName)) { - // the new one is hte preferred one, replace... - logger.info("[{}] adding index [{}], preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); - removeIndex(blocks, metaData, routingTable, tribeIndex); - addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); - } // else: either the existing one is the preferred one, or we haven't seen one, carry on - } - } + } // else: either the existing one is the preferred one, or we haven't seen one, carry on } } - - return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData).routingTable(routingTable.build()).build(); } + } - private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData index) { - metaData.remove(index.getIndex()); - routingTable.remove(index.getIndex()); - blocks.removeIndexBlocks(index.getIndex()); - } + if (!clusterStateChanged) { + return currentState; + } else { + return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData).routingTable(routingTable.build()).build(); + } + } - private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) { - Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); - metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); - routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex())); - if (Regex.simpleMatch(blockIndicesMetadata, tribeIndex.getIndex())) { - blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK); - } - if (Regex.simpleMatch(blockIndicesRead, tribeIndex.getIndex())) { - blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_READ_BLOCK); - } - if (Regex.simpleMatch(blockIndicesWrite, tribeIndex.getIndex())) { - blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK); - } - } + private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData index) { + metaData.remove(index.getIndex()); + routingTable.remove(index.getIndex()); + blocks.removeIndexBlocks(index.getIndex()); + } - @Override - public void onFailure(String source, Throwable t) { - logger.warn("failed to process [{}]", t, source); - } - }); + private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) { + Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); + metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); + routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex())); + if (Regex.simpleMatch(blockIndicesMetadata, tribeIndex.getIndex())) { + blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK); + } + if (Regex.simpleMatch(blockIndicesRead, tribeIndex.getIndex())) { + blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_READ_BLOCK); + } + if (Regex.simpleMatch(blockIndicesWrite, tribeIndex.getIndex())) { + blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK); + } } } } From d9a24961c55b80d64a10f12ea1ad97d04d9d9e16 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 16 Dec 2015 12:49:41 -0500 Subject: [PATCH 097/167] Fix minor issues in delimited payload token filter docs This commit addresses a few minor issues in the delimited payload token filter docs: - the provided example reversed the payloads associated with the tokens "the" and "fox" - two additional typos in the same sentence - "per default" -> "by default" - "default int to" -> "default into" - adds two serial commas --- .../tokenfilters/delimited-payload-tokenfilter.asciidoc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/reference/analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc index 293b51a0331..b64f5edbeb9 100644 --- a/docs/reference/analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc @@ -3,9 +3,7 @@ Named `delimited_payload_filter`. Splits tokens into tokens and payload whenever a delimiter character is found. -Example: "the|1 quick|2 fox|3" is split per default int to tokens `fox`, `quick` and `the` with payloads `1`, `2` and `3` respectively. - - +Example: "the|1 quick|2 fox|3" is split by default into tokens `the`, `quick`, and `fox` with payloads `1`, `2`, and `3` respectively. Parameters: From 45b7afe14f4d8f300e808f1987fd74e3e61172df Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 16 Dec 2015 20:06:43 +0100 Subject: [PATCH 098/167] more tests --- .../common/settings/AbstractScopedSettings.java | 2 +- .../org/elasticsearch/common/settings/Setting.java | 2 +- .../cluster/settings/ClusterSettingsIT.java | 10 ++++++++++ .../common/settings/ScopedSettingsTests.java | 2 ++ 4 files changed, 14 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 1efc9d3fed6..13743cabcf6 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -43,7 +43,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { if (entry.getScope() != scope) { throw new IllegalArgumentException("Setting must be a cluster setting but was: " + entry.getScope()); } - if (entry.isGroupSetting()) { + if (entry.hasComplexMatcher()) { complexMatchers.put(entry.getKey(), entry); } else { keySettings.put(entry.getKey(), entry); diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 884479d7cb6..42f77a18c9c 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -92,7 +92,7 @@ public class Setting extends ToXContentToBytes { * rather than a single value. The key, see {@link #getKey()}, in contrast to non-group settings is a prefix like cluster.store. * that matches all settings with this prefix. */ - public boolean isGroupSetting() { + boolean isGroupSetting() { return false; } diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index 5dccbb546ce..fb8a8e28b33 100644 --- a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -235,6 +235,16 @@ public class ClusterSettingsIT extends ESIntegTestCase { assertThat(response3.getPersistentSettings().get(key2), notNullValue()); } + public void testCanUpdateTracerSettings() { + ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putArray("transport.tracer.include", "internal:index/shard/recovery/*", + "internal:gateway/local*")) + .get(); + assertArrayEquals(clusterUpdateSettingsResponse.getTransientSettings().getAsArray("transport.tracer.include"), new String[] {"internal:index/shard/recovery/*", + "internal:gateway/local*"}); + } + public void testUpdateDiscoveryPublishTimeout() { DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class); diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index b54784dc16d..a4148594de6 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -151,5 +151,7 @@ public class ScopedSettingsTests extends ESTestCase { assertEquals(ref.get().size(), 2); assertTrue(ref.get().contains("internal:index/shard/recovery/*")); assertTrue(ref.get().contains("internal:gateway/local*")); + assertNotNull(settings.get("transport.tracer.include." + randomIntBetween(1, 100))); + assertSame(TransportService.TRACE_LOG_INCLUDE_SETTING, settings.get("transport.tracer.include." + randomIntBetween(1, 100))); } } From 161cabbef41133e47ab5462d94e67e9aa1f91168 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 16 Dec 2015 20:14:00 +0100 Subject: [PATCH 099/167] organize tests --- .../common/settings/ScopedSettingsTests.java | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index a4148594de6..97393c51b8d 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -115,11 +115,20 @@ public class ScopedSettingsTests extends ESTestCase { public void testGet() { ClusterSettings settings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + + // group setting - complex matcher Setting setting = settings.get("cluster.routing.allocation.require.value"); assertEquals(setting, FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING); setting = settings.get("cluster.routing.allocation.total_shards_per_node"); assertEquals(setting, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING); + + // array settings - complex matcher + assertNotNull(settings.get("transport.tracer.include." + randomIntBetween(1, 100))); + assertSame(TransportService.TRACE_LOG_INCLUDE_SETTING, settings.get("transport.tracer.include." + randomIntBetween(1, 100))); + + // array settings - complex matcher - only accepts numbers + assertNull(settings.get("transport.tracer.include.FOO")); } public void testIsDynamic(){ @@ -127,6 +136,10 @@ public class ScopedSettingsTests extends ESTestCase { assertFalse(settings.hasDynamicSetting("foo.bar.baz")); assertTrue(settings.hasDynamicSetting("foo.bar")); assertNotNull(settings.get("foo.bar.baz")); + settings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + assertTrue(settings.hasDynamicSetting("transport.tracer.include." + randomIntBetween(1, 100))); + assertFalse(settings.hasDynamicSetting("transport.tracer.include.BOOM")); + assertTrue(settings.hasDynamicSetting("cluster.routing.allocation.require.value")); } public void testDiff() throws IOException { @@ -151,7 +164,5 @@ public class ScopedSettingsTests extends ESTestCase { assertEquals(ref.get().size(), 2); assertTrue(ref.get().contains("internal:index/shard/recovery/*")); assertTrue(ref.get().contains("internal:gateway/local*")); - assertNotNull(settings.get("transport.tracer.include." + randomIntBetween(1, 100))); - assertSame(TransportService.TRACE_LOG_INCLUDE_SETTING, settings.get("transport.tracer.include." + randomIntBetween(1, 100))); } } From 67e0b5190a155b58b790bafbf56aca721f3c0611 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 16 Dec 2015 11:56:55 -0800 Subject: [PATCH 100/167] Remove old docs gitignores and comment on docs build dir --- .gitignore | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index b7d5d95a256..add7a22e840 100644 --- a/.gitignore +++ b/.gitignore @@ -32,10 +32,10 @@ dependency-reduced-pom.xml # osx stuff .DS_Store -# random old stuff that we should look at the necessity of... -docs/html/ -docs/build.log +# needed in case docs build is run...maybe we can configure doc build to generate files under build? html_docs + +# random old stuff that we should look at the necessity of... /tmp/ backwards/ From ee79d465831e708a5d431aa5ff69aea4756bf799 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Wed, 16 Dec 2015 16:38:16 -0500 Subject: [PATCH 101/167] Add gradle thirdPartyAudit to precommit tasks --- build.gradle | 10 ++ .../gradle/precommit/PrecommitTasks.groovy | 3 +- .../precommit/ThirdPartyAuditTask.groovy | 162 ++++++++++++++++++ core/build.gradle | 8 + modules/lang-expression/build.gradle | 4 + modules/lang-groovy/build.gradle | 9 + plugins/discovery-azure/build.gradle | 11 ++ plugins/discovery-ec2/build.gradle | 9 + plugins/discovery-gce/build.gradle | 3 + plugins/lang-plan-a/build.gradle | 3 + plugins/lang-python/build.gradle | 37 ++++ plugins/mapper-attachments/build.gradle | 7 + plugins/repository-hdfs/build.gradle | 5 +- plugins/repository-s3/build.gradle | 9 + qa/evil-tests/build.gradle | 11 ++ test-framework/build.gradle | 2 + 16 files changed, 291 insertions(+), 2 deletions(-) create mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy diff --git a/build.gradle b/build.gradle index 831db456a19..8a95fa90925 100644 --- a/build.gradle +++ b/build.gradle @@ -123,6 +123,16 @@ subprojects { } } } + // For reasons we don't fully understand yet, external dependencies are not picked up by Ant's optional tasks. + // But you can easily do it in another way. + // Only if your buildscript and Ant's optional task need the same library would you have to define it twice. + // https://docs.gradle.org/current/userguide/organizing_build_logic.html + configurations { + forbiddenApis + } + dependencies { + forbiddenApis 'de.thetaphi:forbiddenapis:2.0' + } } // Ensure similar tasks in dependent projects run first. The projectsEvaluated here is diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 04878d979e9..ef2a49cc444 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -34,7 +34,8 @@ class PrecommitTasks { List precommitTasks = [ configureForbiddenApis(project), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), - project.tasks.create('jarHell', JarHellTask.class)] + project.tasks.create('jarHell', JarHellTask.class), + project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class)] // tasks with just tests don't need dependency licenses, so this flag makes adding // the task optional diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy new file mode 100644 index 00000000000..b5ce35816a1 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy @@ -0,0 +1,162 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.precommit + +import org.gradle.api.DefaultTask +import org.gradle.api.artifacts.UnknownConfigurationException +import org.gradle.api.file.FileCollection +import org.gradle.api.tasks.TaskAction + +import org.apache.tools.ant.BuildLogger +import org.apache.tools.ant.Project + +import org.slf4j.Logger +import org.slf4j.LoggerFactory + +/** + * Basic static checking to keep tabs on third party JARs + */ +public class ThirdPartyAuditTask extends DefaultTask { + + // true to be lenient about MISSING CLASSES + private boolean lenient; + + // patterns for classes to exclude, because we understand their issues + private String[] excludes = new String[0]; + + ThirdPartyAuditTask() { + dependsOn(project.configurations.testCompile) + description = "Checks third party JAR bytecode for missing classes, use of internal APIs, and other horrors'" + } + + /** + * Set to true to be lenient with dependencies. By default this check will fail if it finds + * MISSING CLASSES. This means the set of jars is incomplete. However, in some cases + * this can be due to intentional exclusions that are well-tested and understood. + */ + public void setLenient(boolean value) { + lenient = value; + } + + /** + * Returns true if leniency about missing classes is enabled. + */ + public boolean isLenient() { + return lenient; + } + + /** + * classes that should be excluded from the scan, + * e.g. because we know what sheisty stuff those particular classes are up to. + */ + public void setExcludes(String[] classes) { + for (String s : classes) { + if (s.indexOf('*') != -1) { + throw new IllegalArgumentException("illegal third party audit exclusion: '" + s + "', wildcards are not permitted!") + } + } + excludes = classes; + } + + /** + * Returns current list of exclusions. + */ + public String[] getExcludes() { + return excludes; + } + + @TaskAction + public void check() { + AntBuilder ant = new AntBuilder() + + // we are noisy for many reasons, working around performance problems with forbidden-apis, dealing + // with warnings about missing classes, etc. so we use our own "quiet" AntBuilder + ant.project.buildListeners.each { listener -> + if (listener instanceof BuildLogger) { + listener.messageOutputLevel = Project.MSG_ERR; + } + }; + + // we only want third party dependencies. + FileCollection jars = project.configurations.testCompile.fileCollection({ dependency -> + dependency.group != "org.elasticsearch" + }) + + // we don't want provided dependencies, which we have already scanned. e.g. don't + // scan ES core's dependencies for every single plugin + try { + jars -= project.configurations.getByName("provided") + } catch (UnknownConfigurationException ignored) {} + + // no dependencies matched, we are done + if (jars.isEmpty()) { + return; + } + + ant.taskdef(name: "thirdPartyAudit", + classname: "de.thetaphi.forbiddenapis.ant.AntTask", + classpath: project.configurations.forbiddenApis.asPath) + + // print which jars we are going to scan, always + // this is not the time to try to be succinct! Forbidden will print plenty on its own! + Set names = new HashSet<>() + for (File jar : jars) { + names.add(jar.getName()) + } + Logger logger = LoggerFactory.getLogger(getClass()); + logger.error("[thirdPartyAudit] Scanning: " + names) + + // warn that you won't see any forbidden apis warnings + if (lenient) { + logger.warn("[thirdPartyAudit] WARNING: leniency is enabled, will not fail if classes are missing!") + } + + // TODO: forbidden-apis + zipfileset gives O(n^2) behavior unless we dump to a tmpdir first, + // and then remove our temp dir afterwards. don't complain: try it yourself. + // we don't use gradle temp dir handling, just google it, or try it yourself. + + File tmpDir = new File(project.buildDir, 'tmp/thirdPartyAudit') + + // clean up any previous mess (if we failed), then unzip everything to one directory + ant.delete(dir: tmpDir.getAbsolutePath()) + tmpDir.mkdirs() + for (File jar : jars) { + ant.unzip(src: jar.getAbsolutePath(), dest: tmpDir.getAbsolutePath()) + } + + // convert exclusion class names to binary file names + String[] excludedFiles = new String[excludes.length]; + for (int i = 0; i < excludes.length; i++) { + excludedFiles[i] = excludes[i].replace('.', '/') + ".class" + // check if the excluded file exists, if not, sure sign things are outdated + if (! new File(tmpDir, excludedFiles[i]).exists()) { + throw new IllegalStateException("bogus thirdPartyAudit exclusion: '" + excludes[i] + "', not found in any dependency") + } + } + + ant.thirdPartyAudit(internalRuntimeForbidden: true, + failOnUnsupportedJava: false, + failOnMissingClasses: !lenient, + classpath: project.configurations.testCompile.asPath) { + fileset(dir: tmpDir, excludes: excludedFiles.join(',')) + } + // clean up our mess (if we succeed) + ant.delete(dir: tmpDir.getAbsolutePath()) + } +} diff --git a/core/build.gradle b/core/build.gradle index fd8a0c10f5a..e00164af972 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -111,6 +111,14 @@ forbiddenPatterns { exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt' } +// classes are missing, e.g. org.jboss.marshalling.Marshaller +thirdPartyAudit.lenient = true +// uses internal sun ssl classes! +thirdPartyAudit.excludes = [ + // sun.security.x509 (X509CertInfo, X509CertImpl, X500Name) + 'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', +] + // dependency license are currently checked in distribution dependencyLicenses.enabled = false diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle index 9f62e34687d..aac94570a36 100644 --- a/modules/lang-expression/build.gradle +++ b/modules/lang-expression/build.gradle @@ -33,6 +33,10 @@ dependencyLicenses { mapping from: /lucene-.*/, to: 'lucene' } +// do we or do we not depend on asm-tree, that is the question +// classes are missing, e.g. org.objectweb.asm.tree.LabelNode +thirdPartyAudit.lenient = true + compileJava.options.compilerArgs << '-Xlint:-rawtypes' compileTestJava.options.compilerArgs << '-Xlint:-rawtypes' diff --git a/modules/lang-groovy/build.gradle b/modules/lang-groovy/build.gradle index 341dcbf0d6c..2bacd2dd560 100644 --- a/modules/lang-groovy/build.gradle +++ b/modules/lang-groovy/build.gradle @@ -35,3 +35,12 @@ integTest { systemProperty 'es.script.indexed', 'on' } } + +// classes are missing, e.g. jline.console.completer.Completer +thirdPartyAudit.lenient = true +thirdPartyAudit.excludes = [ + // sun.misc.Unsafe + 'groovy.json.internal.FastStringUtils', + 'groovy.json.internal.FastStringUtils$StringImplementation$1', + 'groovy.json.internal.FastStringUtils$StringImplementation$2', +] diff --git a/plugins/discovery-azure/build.gradle b/plugins/discovery-azure/build.gradle index 5042824eb07..e823351dc36 100644 --- a/plugins/discovery-azure/build.gradle +++ b/plugins/discovery-azure/build.gradle @@ -66,3 +66,14 @@ compileJava.options.compilerArgs << '-Xlint:-deprecation' // TODO: and why does this static not show up in maven... compileTestJava.options.compilerArgs << '-Xlint:-static' +// classes are missing, e.g. org.osgi.framework.BundleActivator +thirdPartyAudit.lenient = true +// WE ARE JAR HELLING WITH THE JDK AND THAT IS WHY THIS HAPPENS +// TODO: fix this!!!!!!!!!!! +thirdPartyAudit.excludes = [ + // com.sun.xml.fastinfoset.stax.StAXDocumentParser + 'com.sun.xml.bind.v2.runtime.unmarshaller.FastInfosetConnector', + 'com.sun.xml.bind.v2.runtime.unmarshaller.FastInfosetConnector$CharSequenceImpl', + // com.sun.xml.fastinfoset.stax.StAXDocumentSerializer + 'com.sun.xml.bind.v2.runtime.output.FastInfosetStreamWriterOutput', +] diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 77cfd6626d5..04eafd07866 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -48,3 +48,12 @@ test { // this is needed for insecure plugins, remove if possible! systemProperty 'tests.artifact', project.name } + +// classes are missing, e.g. org.apache.avalon.framework.logger.Logger +thirdPartyAudit.lenient = true +thirdPartyAudit.excludes = [ + // com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl + // com.sun.org.apache.xml.internal.dtm.ref.DTMManagerDefault + // com.sun.org.apache.xpath.internal.XPathContext + 'com.amazonaws.util.XpathUtils', +] diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 4e6ade8788f..2ec479a3e80 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -31,3 +31,6 @@ test { // this is needed for insecure plugins, remove if possible! systemProperty 'tests.artifact', project.name } + +// classes are missing, e.g. org.apache.log.Logger +thirdPartyAudit.lenient = true diff --git a/plugins/lang-plan-a/build.gradle b/plugins/lang-plan-a/build.gradle index 618c094f683..c23c3a30efc 100644 --- a/plugins/lang-plan-a/build.gradle +++ b/plugins/lang-plan-a/build.gradle @@ -33,6 +33,9 @@ dependencies { compileJava.options.compilerArgs << '-Xlint:-cast,-fallthrough,-rawtypes' compileTestJava.options.compilerArgs << '-Xlint:-unchecked' +// classes are missing, e.g. org.objectweb.asm.tree.LabelNode +thirdPartyAudit.lenient = true + // regeneration logic, comes in via ant right now // don't port it to gradle, it works fine. diff --git a/plugins/lang-python/build.gradle b/plugins/lang-python/build.gradle index 269a3249386..0c5edc13522 100644 --- a/plugins/lang-python/build.gradle +++ b/plugins/lang-python/build.gradle @@ -36,3 +36,40 @@ integTest { } } +// classes are missing, e.g. org.tukaani.xz.FilterOptions +thirdPartyAudit.lenient = true +thirdPartyAudit.excludes = [ + // sun.security.x509 (X509CertInfo, X509CertImpl, X500Name) + 'org.python.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + + // sun.misc.Cleaner + 'org.python.netty.util.internal.Cleaner0', + + // sun.misc.Signal + 'jnr.posix.JavaPOSIX', + 'jnr.posix.JavaPOSIX$SunMiscSignalHandler', + + // sun.misc.Unsafe + 'com.kenai.jffi.MemoryIO$UnsafeImpl', + 'com.kenai.jffi.MemoryIO$UnsafeImpl32', + 'com.kenai.jffi.MemoryIO$UnsafeImpl64', + 'org.python.google.common.cache.Striped64', + 'org.python.google.common.cache.Striped64$1', + 'org.python.google.common.cache.Striped64$Cell', + 'org.python.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'org.python.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + 'org.python.netty.util.internal.chmv8.ForkJoinPool$2', + 'org.python.netty.util.internal.PlatformDependent0', + 'org.python.netty.util.internal.UnsafeAtomicIntegerFieldUpdater', + 'org.python.netty.util.internal.UnsafeAtomicLongFieldUpdater', + 'org.python.netty.util.internal.UnsafeAtomicReferenceFieldUpdater', + 'org.python.netty.util.internal.chmv8.ConcurrentHashMapV8', + 'org.python.netty.util.internal.chmv8.ConcurrentHashMapV8$1', + 'org.python.netty.util.internal.chmv8.ConcurrentHashMapV8$TreeBin', + 'org.python.netty.util.internal.chmv8.CountedCompleter', + 'org.python.netty.util.internal.chmv8.CountedCompleter$1', + 'org.python.netty.util.internal.chmv8.ForkJoinPool', + 'org.python.netty.util.internal.chmv8.ForkJoinPool$WorkQueue', + 'org.python.netty.util.internal.chmv8.ForkJoinTask', + 'org.python.netty.util.internal.chmv8.ForkJoinTask$1', +] diff --git a/plugins/mapper-attachments/build.gradle b/plugins/mapper-attachments/build.gradle index e14cf543043..867b316fbf5 100644 --- a/plugins/mapper-attachments/build.gradle +++ b/plugins/mapper-attachments/build.gradle @@ -69,3 +69,10 @@ forbiddenPatterns { exclude '**/*.pdf' exclude '**/*.epub' } + +// classes are missing, e.g. org.openxmlformats.schemas.drawingml.x2006.chart.CTExtensionList +thirdPartyAudit.lenient = true +thirdPartyAudit.excludes = [ + // com.sun.syndication (SyndFeedInput, SyndFeed, SyndEntry, SyndContent) + 'org.apache.tika.parser.feed.FeedParser', +] diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 8f18f67f70d..c09a3ff4d67 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -200,4 +200,7 @@ integTest { cluster { plugin(pluginProperties.extension.name, zipTree(distZipHadoop2.archivePath)) } -} \ No newline at end of file +} + +// classes are missing, e.g. org.mockito.Mockito +thirdPartyAudit.lenient = true diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 32ad37530c2..bd38e92f4c8 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -49,3 +49,12 @@ test { // this is needed for insecure plugins, remove if possible! systemProperty 'tests.artifact', project.name } + +// classes are missing, e.g. org.apache.log.Logger +thirdPartyAudit.lenient = true +thirdPartyAudit.excludes = [ + // com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl + // com.sun.org.apache.xml.internal.dtm.ref.DTMManagerDefault + // com.sun.org.apache.xpath.internal.XPathContext + 'com.amazonaws.util.XpathUtils', +] diff --git a/qa/evil-tests/build.gradle b/qa/evil-tests/build.gradle index 96aa6fb635d..bb5a521e53c 100644 --- a/qa/evil-tests/build.gradle +++ b/qa/evil-tests/build.gradle @@ -34,3 +34,14 @@ dependencies { test { systemProperty 'tests.security.manager', 'false' } + +// classes are missing, com.ibm.icu.lang.UCharacter +thirdPartyAudit.lenient = true +thirdPartyAudit.excludes = [ + // sun.misc.Unsafe + 'com.google.common.cache.Striped64', + 'com.google.common.cache.Striped64$1', + 'com.google.common.cache.Striped64$Cell', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', +] diff --git a/test-framework/build.gradle b/test-framework/build.gradle index a423f56c922..2263413bbde 100644 --- a/test-framework/build.gradle +++ b/test-framework/build.gradle @@ -47,3 +47,5 @@ forbiddenApisMain { // TODO: should we have licenses for our test deps? dependencyLicenses.enabled = false +// we intentionally exclude the ant tasks because people were depending on them from their tests!!!!!!! +thirdPartyAudit.lenient = true From 49f37a526c1adcea498a3274026c9a929e9fc08a Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Wed, 16 Dec 2015 16:50:52 -0500 Subject: [PATCH 102/167] fix bad indent --- .../elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy index b5ce35816a1..010b19675d1 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy @@ -91,7 +91,7 @@ public class ThirdPartyAuditTask extends DefaultTask { if (listener instanceof BuildLogger) { listener.messageOutputLevel = Project.MSG_ERR; } - }; + }; // we only want third party dependencies. FileCollection jars = project.configurations.testCompile.fileCollection({ dependency -> From 9aa41d092ed5b7ebe4bfd9bfe21450b8f92ee010 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 16 Dec 2015 13:57:08 -0500 Subject: [PATCH 103/167] Info on compressed ordinary object pointers This commit adds to JvmInfo the status of whether or not compressed ordinary object pointers are enabled. Additionally, logging of the max heap size and the status of the compressed ordinary object pointers flag are provided on startup. Relates #13187, relates elastic/elasticsearch-definitive-guide#455 --- .../elasticsearch/env/NodeEnvironment.java | 38 +++++++++++++--- .../elasticsearch/monitor/jvm/JvmInfo.java | 44 ++++++++++++++++++- 2 files changed, 76 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 3a1b430f98b..86b6b704a72 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -21,7 +21,12 @@ package org.elasticsearch.env; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.store.*; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.store.Lock; +import org.apache.lucene.store.LockObtainFailedException; +import org.apache.lucene.store.NativeFSLockFactory; +import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -31,6 +36,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -38,11 +44,25 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsProbe; +import org.elasticsearch.monitor.jvm.JvmInfo; import java.io.Closeable; import java.io.IOException; -import java.nio.file.*; -import java.util.*; +import java.nio.file.AtomicMoveNotSupportedException; +import java.nio.file.DirectoryStream; +import java.nio.file.FileStore; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -145,7 +165,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { for (int dirIndex = 0; dirIndex < environment.dataWithClusterFiles().length; dirIndex++) { Path dir = environment.dataWithClusterFiles()[dirIndex].resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId)); Files.createDirectories(dir); - + try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) { logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath()); try { @@ -187,6 +207,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { } maybeLogPathDetails(); + maybeLogHeapDetails(); if (settings.getAsBoolean(SETTING_ENABLE_LUCENE_SEGMENT_INFOS_TRACE, false)) { SegmentInfos.setInfoStream(System.out); @@ -274,6 +295,13 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { } } + private void maybeLogHeapDetails() { + ByteSizeValue maxHeapSize = JvmInfo.jvmInfo().getMem().getHeapMax(); + Boolean usingCompressedOops = JvmInfo.jvmInfo().usingCompressedOops(); + String usingCompressedOopsStatus = usingCompressedOops == null ? "unknown" : Boolean.toString(usingCompressedOops); + logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, usingCompressedOopsStatus); + } + private static String toString(Collection items) { StringBuilder b = new StringBuilder(); for(String item : items) { @@ -811,7 +839,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { // Sanity check: assert Integer.parseInt(shardPath.getName(count-1).toString()) >= 0; assert "indices".equals(shardPath.getName(count-3).toString()); - + return shardPath.getParent().getParent().getParent(); } } diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java index e224c722d42..7fbfb5bc82b 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java @@ -29,7 +29,14 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import java.io.IOException; -import java.lang.management.*; +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; +import java.lang.management.ManagementPermission; +import java.lang.management.MemoryMXBean; +import java.lang.management.MemoryPoolMXBean; +import java.lang.management.PlatformManagedObject; +import java.lang.management.RuntimeMXBean; +import java.lang.reflect.Method; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -101,6 +108,21 @@ public class JvmInfo implements Streamable, ToXContent { info.memoryPools[i] = memoryPoolMXBean.getName(); } + try { + @SuppressWarnings("unchecked") Class clazz = + (Class)Class.forName("com.sun.management.HotSpotDiagnosticMXBean"); + Class vmOptionClazz = Class.forName("com.sun.management.VMOption"); + PlatformManagedObject hotSpotDiagnosticMXBean = ManagementFactory.getPlatformMXBean(clazz); + Method vmOptionMethod = clazz.getMethod("getVMOption", String.class); + Object useCompressedOopsVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseCompressedOops"); + Method valueMethod = vmOptionClazz.getMethod("getValue"); + String value = (String)valueMethod.invoke(useCompressedOopsVmOption); + info.usingCompressedOops = Boolean.parseBoolean(value); + } catch (Throwable t) { + // unable to deduce the state of compressed oops + // usingCompressedOops will hold its default value of null + } + INSTANCE = info; } @@ -135,6 +157,8 @@ public class JvmInfo implements Streamable, ToXContent { String[] gcCollectors = Strings.EMPTY_ARRAY; String[] memoryPools = Strings.EMPTY_ARRAY; + private Boolean usingCompressedOops; + private JvmInfo() { } @@ -258,6 +282,10 @@ public class JvmInfo implements Streamable, ToXContent { return this.systemProperties; } + public Boolean usingCompressedOops() { + return this.usingCompressedOops; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.JVM); @@ -279,6 +307,8 @@ public class JvmInfo implements Streamable, ToXContent { builder.field(Fields.GC_COLLECTORS, gcCollectors); builder.field(Fields.MEMORY_POOLS, memoryPools); + builder.field(Fields.USING_COMPRESSED_OOPS, usingCompressedOops == null ? "unknown" : usingCompressedOops); + builder.endObject(); return builder; } @@ -306,6 +336,7 @@ public class JvmInfo implements Streamable, ToXContent { static final XContentBuilderString DIRECT_MAX_IN_BYTES = new XContentBuilderString("direct_max_in_bytes"); static final XContentBuilderString GC_COLLECTORS = new XContentBuilderString("gc_collectors"); static final XContentBuilderString MEMORY_POOLS = new XContentBuilderString("memory_pools"); + static final XContentBuilderString USING_COMPRESSED_OOPS = new XContentBuilderString("using_compressed_ordinary_object_pointers"); } public static JvmInfo readJvmInfo(StreamInput in) throws IOException { @@ -337,6 +368,11 @@ public class JvmInfo implements Streamable, ToXContent { mem.readFrom(in); gcCollectors = in.readStringArray(); memoryPools = in.readStringArray(); + if (in.readBoolean()) { + usingCompressedOops = in.readBoolean(); + } else { + usingCompressedOops = null; + } } @Override @@ -361,6 +397,12 @@ public class JvmInfo implements Streamable, ToXContent { mem.writeTo(out); out.writeStringArray(gcCollectors); out.writeStringArray(memoryPools); + if (usingCompressedOops != null) { + out.writeBoolean(true); + out.writeBoolean(usingCompressedOops); + } else { + out.writeBoolean(false); + } } public static class Mem implements Streamable { From 12e241ff1ac6c9b9418c69b821c644703481cf77 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 16 Dec 2015 17:29:08 -0500 Subject: [PATCH 104/167] Ensure compressed oops flag is written as a string --- core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java index 7fbfb5bc82b..8e1fb9ba7db 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java @@ -307,7 +307,7 @@ public class JvmInfo implements Streamable, ToXContent { builder.field(Fields.GC_COLLECTORS, gcCollectors); builder.field(Fields.MEMORY_POOLS, memoryPools); - builder.field(Fields.USING_COMPRESSED_OOPS, usingCompressedOops == null ? "unknown" : usingCompressedOops); + builder.field(Fields.USING_COMPRESSED_OOPS, usingCompressedOops == null ? "unknown" : Boolean.toString(usingCompressedOops)); builder.endObject(); return builder; From 207f0a4eda73fd706a7e8e8dc938b8bedec0740e Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Wed, 16 Dec 2015 17:53:55 -0500 Subject: [PATCH 105/167] use async translog for this test --- .../java/org/elasticsearch/indices/stats/IndexStatsIT.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index a87da6fc046..4bdd972ea9c 100644 --- a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -23,8 +23,8 @@ import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.apache.lucene.util.Version; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.CommonStats; -import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; +import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -46,11 +46,12 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.MergePolicyConfig; import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; import java.util.EnumSet; @@ -315,7 +316,7 @@ public class IndexStatsIT extends ESIntegTestCase { .put(MergeSchedulerConfig.MAX_THREAD_COUNT, "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT, "1") .put("index.merge.policy.type", "tiered") - + .put(TranslogConfig.INDEX_TRANSLOG_DURABILITY, "ASYNC") )); ensureGreen(); long termUpto = 0; From 7e14245c698b6206824fe320c8b64c384493d852 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 16 Dec 2015 18:43:31 -0500 Subject: [PATCH 106/167] Add callback for publication of new cluster state This commit adds a callback for a cluster state task executor that will be invoked if the execution of a batch of cluster state update tasks led to a new cluster state and that new cluster state was successfully published. Closes #15482 --- .../cluster/ClusterStateTaskExecutor.java | 7 +++++ .../service/InternalClusterService.java | 2 ++ .../cluster/ClusterServiceIT.java | 30 +++++++++++++++++-- 3 files changed, 36 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index ab85d9540f0..fb22c2ca368 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -37,6 +37,13 @@ public interface ClusterStateTaskExecutor { return true; } + /** + * Callback invoked after new cluster state is published. Note that + * this method is not invoked if the cluster state was not updated. + */ + default void clusterStatePublished(ClusterState newClusterState) { + } + /** * Represents the result of a batched execution of cluster state update tasks * @param the type of the cluster state update task diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index b8c898a31e9..4888fc9d48e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -605,6 +605,8 @@ public class InternalClusterService extends AbstractLifecycleComponent { private AtomicInteger counter = new AtomicInteger(); + private AtomicInteger batches = new AtomicInteger(); + private AtomicInteger published = new AtomicInteger(); @Override public BatchResult execute(ClusterState currentState, List tasks) throws Exception { tasks.forEach(task -> task.execute()); counter.addAndGet(tasks.size()); - return BatchResult.builder().successes(tasks).build(currentState); + ClusterState maybeUpdatedClusterState = currentState; + if (randomBoolean()) { + maybeUpdatedClusterState = ClusterState.builder(currentState).build(); + batches.incrementAndGet(); + } + return BatchResult.builder().successes(tasks).build(maybeUpdatedClusterState); } @Override public boolean runOnlyOnMaster() { return false; } + + @Override + public void clusterStatePublished(ClusterState newClusterState) { + published.incrementAndGet(); + } } int numberOfThreads = randomIntBetween(2, 8); int tasksSubmittedPerThread = randomIntBetween(1, 1024); @@ -838,6 +861,7 @@ public class ClusterServiceIT extends ESIntegTestCase { for (TaskExecutor executor : executors) { if (counts.containsKey(executor)) { assertEquals((int) counts.get(executor), executor.counter.get()); + assertEquals(executor.batches.get(), executor.published.get()); } } From 42138007db45fee85558a19ca8702e944ec706fd Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Wed, 16 Dec 2015 18:56:02 -0500 Subject: [PATCH 107/167] add some more comments about internal api usage --- .../gradle/precommit/ThirdPartyAuditTask.groovy | 4 ---- core/build.gradle | 2 +- modules/lang-groovy/build.gradle | 2 +- plugins/discovery-azure/build.gradle | 4 ++-- plugins/discovery-ec2/build.gradle | 6 +++--- plugins/lang-python/build.gradle | 8 ++++---- plugins/mapper-attachments/build.gradle | 2 +- plugins/repository-s3/build.gradle | 6 +++--- qa/evil-tests/build.gradle | 2 +- 9 files changed, 16 insertions(+), 20 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy index 010b19675d1..86ac767d06c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy @@ -26,9 +26,6 @@ import org.gradle.api.tasks.TaskAction import org.apache.tools.ant.BuildLogger import org.apache.tools.ant.Project -import org.slf4j.Logger -import org.slf4j.LoggerFactory - /** * Basic static checking to keep tabs on third party JARs */ @@ -119,7 +116,6 @@ public class ThirdPartyAuditTask extends DefaultTask { for (File jar : jars) { names.add(jar.getName()) } - Logger logger = LoggerFactory.getLogger(getClass()); logger.error("[thirdPartyAudit] Scanning: " + names) // warn that you won't see any forbidden apis warnings diff --git a/core/build.gradle b/core/build.gradle index e00164af972..302757ccc8b 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -115,7 +115,7 @@ forbiddenPatterns { thirdPartyAudit.lenient = true // uses internal sun ssl classes! thirdPartyAudit.excludes = [ - // sun.security.x509 (X509CertInfo, X509CertImpl, X500Name) + // uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name) 'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', ] diff --git a/modules/lang-groovy/build.gradle b/modules/lang-groovy/build.gradle index 2bacd2dd560..76686a760a2 100644 --- a/modules/lang-groovy/build.gradle +++ b/modules/lang-groovy/build.gradle @@ -39,7 +39,7 @@ integTest { // classes are missing, e.g. jline.console.completer.Completer thirdPartyAudit.lenient = true thirdPartyAudit.excludes = [ - // sun.misc.Unsafe + // uses internal java api: sun.misc.Unsafe 'groovy.json.internal.FastStringUtils', 'groovy.json.internal.FastStringUtils$StringImplementation$1', 'groovy.json.internal.FastStringUtils$StringImplementation$2', diff --git a/plugins/discovery-azure/build.gradle b/plugins/discovery-azure/build.gradle index e823351dc36..6f21364ea6d 100644 --- a/plugins/discovery-azure/build.gradle +++ b/plugins/discovery-azure/build.gradle @@ -71,9 +71,9 @@ thirdPartyAudit.lenient = true // WE ARE JAR HELLING WITH THE JDK AND THAT IS WHY THIS HAPPENS // TODO: fix this!!!!!!!!!!! thirdPartyAudit.excludes = [ - // com.sun.xml.fastinfoset.stax.StAXDocumentParser + // uses internal java api: com.sun.xml.fastinfoset.stax.StAXDocumentParser 'com.sun.xml.bind.v2.runtime.unmarshaller.FastInfosetConnector', 'com.sun.xml.bind.v2.runtime.unmarshaller.FastInfosetConnector$CharSequenceImpl', - // com.sun.xml.fastinfoset.stax.StAXDocumentSerializer + // uses internal java api: com.sun.xml.fastinfoset.stax.StAXDocumentSerializer 'com.sun.xml.bind.v2.runtime.output.FastInfosetStreamWriterOutput', ] diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 04eafd07866..14767c6783b 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -52,8 +52,8 @@ test { // classes are missing, e.g. org.apache.avalon.framework.logger.Logger thirdPartyAudit.lenient = true thirdPartyAudit.excludes = [ - // com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl - // com.sun.org.apache.xml.internal.dtm.ref.DTMManagerDefault - // com.sun.org.apache.xpath.internal.XPathContext + // uses internal java api: com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl + // uses internal java api: com.sun.org.apache.xml.internal.dtm.ref.DTMManagerDefault + // uses internal java api: com.sun.org.apache.xpath.internal.XPathContext 'com.amazonaws.util.XpathUtils', ] diff --git a/plugins/lang-python/build.gradle b/plugins/lang-python/build.gradle index 0c5edc13522..a7faedf6a78 100644 --- a/plugins/lang-python/build.gradle +++ b/plugins/lang-python/build.gradle @@ -39,17 +39,17 @@ integTest { // classes are missing, e.g. org.tukaani.xz.FilterOptions thirdPartyAudit.lenient = true thirdPartyAudit.excludes = [ - // sun.security.x509 (X509CertInfo, X509CertImpl, X500Name) + // uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name) 'org.python.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', - // sun.misc.Cleaner + // uses internal java api: sun.misc.Cleaner 'org.python.netty.util.internal.Cleaner0', - // sun.misc.Signal + // uses internal java api: sun.misc.Signal 'jnr.posix.JavaPOSIX', 'jnr.posix.JavaPOSIX$SunMiscSignalHandler', - // sun.misc.Unsafe + // uses internal java api: sun.misc.Unsafe 'com.kenai.jffi.MemoryIO$UnsafeImpl', 'com.kenai.jffi.MemoryIO$UnsafeImpl32', 'com.kenai.jffi.MemoryIO$UnsafeImpl64', diff --git a/plugins/mapper-attachments/build.gradle b/plugins/mapper-attachments/build.gradle index 867b316fbf5..f3c414a0718 100644 --- a/plugins/mapper-attachments/build.gradle +++ b/plugins/mapper-attachments/build.gradle @@ -73,6 +73,6 @@ forbiddenPatterns { // classes are missing, e.g. org.openxmlformats.schemas.drawingml.x2006.chart.CTExtensionList thirdPartyAudit.lenient = true thirdPartyAudit.excludes = [ - // com.sun.syndication (SyndFeedInput, SyndFeed, SyndEntry, SyndContent) + // uses internal java api: com.sun.syndication (SyndFeedInput, SyndFeed, SyndEntry, SyndContent) 'org.apache.tika.parser.feed.FeedParser', ] diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index bd38e92f4c8..82797788f8e 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -53,8 +53,8 @@ test { // classes are missing, e.g. org.apache.log.Logger thirdPartyAudit.lenient = true thirdPartyAudit.excludes = [ - // com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl - // com.sun.org.apache.xml.internal.dtm.ref.DTMManagerDefault - // com.sun.org.apache.xpath.internal.XPathContext + // uses internal java api: com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl + // uses internal java api: com.sun.org.apache.xml.internal.dtm.ref.DTMManagerDefault + // uses internal java api: com.sun.org.apache.xpath.internal.XPathContext 'com.amazonaws.util.XpathUtils', ] diff --git a/qa/evil-tests/build.gradle b/qa/evil-tests/build.gradle index bb5a521e53c..02ed75fccbb 100644 --- a/qa/evil-tests/build.gradle +++ b/qa/evil-tests/build.gradle @@ -38,7 +38,7 @@ test { // classes are missing, com.ibm.icu.lang.UCharacter thirdPartyAudit.lenient = true thirdPartyAudit.excludes = [ - // sun.misc.Unsafe + // uses internal java api: sun.misc.Unsafe 'com.google.common.cache.Striped64', 'com.google.common.cache.Striped64$1', 'com.google.common.cache.Striped64$Cell', From 9a1133ca504cbf93114360230d40ccaa486a5dee Mon Sep 17 00:00:00 2001 From: Jason Bryan Date: Wed, 16 Dec 2015 17:43:50 -0500 Subject: [PATCH 108/167] Fix typo in scroll.asciidoc Fix scroll request with sort. Closes #15493 --- docs/reference/search/request/scroll.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index 825564d799d..e18593d21cc 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -98,7 +98,7 @@ curl -XGET 'localhost:9200/_search?scroll=1m' -d ' { "sort": [ "_doc" - } + ] } ' -------------------------------------------------- From 17a806631d0b4afca80c457d04c0d7282cc94bef Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 16 Dec 2015 17:59:43 -0800 Subject: [PATCH 109/167] Test: Allow integ tests to exclude mock plugins We currently randomly add a set of mock plugins to integ tests. Sometimes it is necessary to omit this mock plugins, but other times you may just want to suppress a particular mock plugin. For example, if you have your own transport, you want to omit the asserting local transport mock, since they would both try to set the transport.type. --- .../java/org/elasticsearch/tribe/TribeIT.java | 9 +++++++- .../test/ESBackcompatTestCase.java | 8 +++++++ .../elasticsearch/test/ESIntegTestCase.java | 22 +++++++++++++++---- .../test/InternalTestCluster.java | 20 ++++++----------- .../test/NodeConfigurationSource.java | 20 +++++++++++++++++ .../ClusterDiscoveryConfiguration.java | 3 +++ .../test/test/InternalTestClusterTests.java | 8 +++---- 7 files changed, 68 insertions(+), 22 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index 28a3dea118e..1350dcbb8ed 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.NodeConfigurationSource; @@ -47,6 +48,7 @@ import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.Map; @@ -79,6 +81,11 @@ public class TribeIT extends ESIntegTestCase { return Settings.builder().put(Node.HTTP_ENABLED, false).build(); } + @Override + public Collection> nodePlugins() { + return Collections.emptyList(); + } + @Override public Settings transportClientSettings() { return null; @@ -86,7 +93,7 @@ public class TribeIT extends ESIntegTestCase { }; cluster2 = new InternalTestCluster(InternalTestCluster.configuredNodeMode(), randomLong(), createTempDir(), 2, 2, - Strings.randomBase64UUID(getRandom()), nodeConfigurationSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, true); + Strings.randomBase64UUID(getRandom()), nodeConfigurationSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, Collections.emptyList()); cluster2.beforeTest(getRandom(), 0.1); cluster2.ensureAtLeastNumDataNodes(2); diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java index ade424599fe..49644196da4 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.listeners.LoggingListener; @@ -40,6 +41,8 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collection; +import java.util.Collections; import java.util.Map; import static org.hamcrest.Matchers.is; @@ -175,6 +178,11 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase { return externalNodeSettings(nodeOrdinal); } + @Override + public Collection> nodePlugins() { + return Collections.emptyList(); + } + @Override public Settings transportClientSettings() { return transportClientSettings(); diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index ba1c16d61ab..e4fb5e755ca 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -95,6 +95,7 @@ import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -111,13 +112,18 @@ import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.node.Node; +import org.elasticsearch.node.NodeMocksPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchService; import org.elasticsearch.test.client.RandomizingClient; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.rest.client.http.HttpRequestBuilder; +import org.elasticsearch.test.store.MockFSIndexStore; +import org.elasticsearch.test.transport.AssertingLocalTransport; +import org.elasticsearch.test.transport.MockTransportService; import org.hamcrest.Matchers; import org.joda.time.DateTimeZone; import org.junit.After; @@ -128,6 +134,7 @@ import org.junit.BeforeClass; import java.io.IOException; import java.io.InputStream; import java.lang.annotation.Annotation; +import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Inherited; import java.lang.annotation.Retention; @@ -1806,14 +1813,21 @@ public abstract class ESIntegTestCase extends ESTestCase { nodeMode = "local"; } - boolean enableMockModules = enableMockModules(); + Collection> mockPlugins = getMockPlugins(); + return new InternalTestCluster(nodeMode, seed, createTempDir(), minNumDataNodes, maxNumDataNodes, InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(), - InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, enableMockModules); + InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, mockPlugins); } - protected boolean enableMockModules() { - return RandomizedTest.systemPropertyAsBoolean(TESTS_ENABLE_MOCK_MODULES, true); + /** Return the mock plugins the cluster should use. These may be randomly omitted based on the cluster seed. */ + protected Collection> getMockPlugins() { + return pluginList(MockTransportService.TestPlugin.class, + MockFSIndexStore.TestPlugin.class, + NodeMocksPlugin.class, + MockEngineFactoryPlugin.class, + MockSearchService.TestPlugin.class, + AssertingLocalTransport.TestPlugin.class); } /** diff --git a/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 7ae3226b66a..bc6db112d76 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -199,7 +199,7 @@ public final class InternalTestCluster extends TestCluster { private final ExecutorService executor; - private final boolean enableMockModules; + private final Collection> mockPlugins; /** * All nodes started by the cluster will have their name set to nodePrefix followed by a positive number @@ -212,7 +212,7 @@ public final class InternalTestCluster extends TestCluster { public InternalTestCluster(String nodeMode, long clusterSeed, Path baseDir, int minNumDataNodes, int maxNumDataNodes, String clusterName, NodeConfigurationSource nodeConfigurationSource, int numClientNodes, - boolean enableHttpPipelining, String nodePrefix, boolean enableMockModules) { + boolean enableHttpPipelining, String nodePrefix, Collection> mockPlugins) { super(clusterSeed); if ("network".equals(nodeMode) == false && "local".equals(nodeMode) == false) { throw new IllegalArgumentException("Unknown nodeMode: " + nodeMode); @@ -248,7 +248,7 @@ public final class InternalTestCluster extends TestCluster { this.nodePrefix = nodePrefix; assert nodePrefix != null; - this.enableMockModules = enableMockModules; + this.mockPlugins = mockPlugins; /* * TODO @@ -359,16 +359,10 @@ public final class InternalTestCluster extends TestCluster { private Collection> getPlugins(long seed) { Set> plugins = new HashSet<>(nodeConfigurationSource.nodePlugins()); - Random random = new Random(seed); - if (enableMockModules && usually(random)) { - plugins.add(MockTransportService.TestPlugin.class); - plugins.add(MockFSIndexStore.TestPlugin.class); - plugins.add(NodeMocksPlugin.class); - plugins.add(MockEngineFactoryPlugin.class); - plugins.add(MockSearchService.TestPlugin.class); - if (isLocalTransportConfigured()) { - plugins.add(AssertingLocalTransport.TestPlugin.class); - } + plugins.addAll(mockPlugins); + if (isLocalTransportConfigured() == false) { + // this is crazy we must do this here...we should really just always be using local transport... + plugins.remove(AssertingLocalTransport.TestPlugin.class); } return plugins; } diff --git a/test-framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java b/test-framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java index e04e840e525..5dfb845c192 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java +++ b/test-framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java @@ -19,10 +19,18 @@ package org.elasticsearch.test; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.MockEngineFactoryPlugin; +import org.elasticsearch.node.NodeMocksPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.MockSearchService; +import org.elasticsearch.test.store.MockFSIndexStore; +import org.elasticsearch.test.transport.AssertingLocalTransport; +import org.elasticsearch.test.transport.MockTransportService; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.List; public abstract class NodeConfigurationSource { @@ -43,6 +51,18 @@ public abstract class NodeConfigurationSource { */ public abstract Settings nodeSettings(int nodeOrdinal); + /** Plugins that will be randomly added to the node */ + public Collection> mockPlugins() { + List> plugins = new ArrayList<>(); + plugins.add(MockTransportService.TestPlugin.class); + plugins.add(MockFSIndexStore.TestPlugin.class); + plugins.add(NodeMocksPlugin.class); + plugins.add(MockEngineFactoryPlugin.class); + plugins.add(MockSearchService.TestPlugin.class); + plugins.add(AssertingLocalTransport.TestPlugin.class); + return plugins; + } + /** Returns plugins that should be loaded on the node */ public Collection> nodePlugins() { return Collections.emptyList(); diff --git a/test-framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java b/test-framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java index e549c185616..484f65ea650 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java +++ b/test-framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java @@ -24,12 +24,15 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.NodeConfigurationSource; import java.io.IOException; import java.net.InetSocketAddress; import java.net.ServerSocket; +import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.Set; diff --git a/test-framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test-framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index 1514e254a7f..af468fa084c 100644 --- a/test-framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test-framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -52,8 +52,8 @@ public class InternalTestClusterTests extends ESTestCase { String nodePrefix = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); Path baseDir = createTempDir(); - InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true); - InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true); + InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, Collections.emptyList()); + InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, Collections.emptyList()); // TODO: this is not ideal - we should have a way to make sure ports are initialized in the same way assertClusters(cluster0, cluster1, false); @@ -110,8 +110,8 @@ public class InternalTestClusterTests extends ESTestCase { String nodePrefix = "foobar"; Path baseDir = createTempDir(); - InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true); - InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true); + InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, Collections.emptyList()); + InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, Collections.emptyList()); assertClusters(cluster0, cluster1, false); long seed = randomLong(); From 4523eaec889960ebaf183f3c4a943d47fc59d069 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Wed, 16 Dec 2015 12:36:13 -0800 Subject: [PATCH 110/167] Added plumbing for compile time script parameters. Closes #15464 --- .../TransportRenderSearchTemplateAction.java | 4 +- .../action/update/UpdateHelper.java | 3 +- .../index/query/QueryShardContext.java | 3 +- .../index/query/ScriptQueryBuilder.java | 5 +- .../script/ScriptScoreFunctionBuilder.java | 5 +- .../script/NativeScriptEngineService.java | 2 +- .../script/ScriptEngineService.java | 2 +- .../elasticsearch/script/ScriptService.java | 78 +++++++++++++------ .../elasticsearch/search/SearchService.java | 5 +- .../heuristics/ScriptHeuristic.java | 5 +- .../scripted/InternalScriptedMetric.java | 3 +- .../scripted/ScriptedMetricAggregator.java | 7 +- .../BucketScriptPipelineAggregator.java | 2 +- .../BucketSelectorPipelineAggregator.java | 3 +- .../support/ValuesSourceParser.java | 3 +- .../script/ScriptFieldsParseElement.java | 5 +- .../search/sort/ScriptSortParser.java | 3 +- .../suggest/phrase/PhraseSuggestParser.java | 3 +- .../elasticsearch/script/FileScriptTests.java | 4 +- .../script/NativeScriptTests.java | 7 +- .../script/ScriptContextTests.java | 14 ++-- .../script/ScriptModesTests.java | 2 +- .../script/ScriptServiceTests.java | 44 +++++------ .../aggregations/bucket/DateHistogramIT.java | 4 +- .../search/aggregations/metrics/AvgIT.java | 6 +- .../search/aggregations/metrics/SumIT.java | 6 +- .../aggregations/metrics/ValueCountIT.java | 4 +- .../org/elasticsearch/update/UpdateIT.java | 8 +- .../ExpressionScriptEngineService.java | 2 +- .../script/expression/ExpressionTests.java | 10 +-- .../groovy/GroovyScriptEngineService.java | 2 +- .../script/groovy/GroovySecurityTests.java | 6 +- .../mustache/MustacheScriptEngineService.java | 2 +- .../mustache/MustacheScriptEngineTests.java | 5 +- .../JavaScriptScriptEngineService.java | 6 +- .../JavaScriptScriptEngineTests.java | 25 +++--- .../JavaScriptScriptMultiThreadedTests.java | 7 +- .../javascript/JavaScriptSecurityTests.java | 13 ++-- .../script/javascript/SimpleBench.java | 3 +- .../plan/a/PlanAScriptEngineService.java | 29 +++++-- .../plan/a/FloatOverflowDisabledTests.java | 11 ++- .../plan/a/FloatOverflowEnabledTests.java | 11 ++- .../plan/a/IntegerOverflowDisabledTests.java | 11 ++- .../plan/a/IntegerOverflowEnabledTests.java | 11 ++- .../plan/a/ScriptEngineTests.java | 5 +- .../elasticsearch/plan/a/ScriptTestCase.java | 17 ++-- .../plan/a/WhenThingsGoWrongTests.java | 11 +++ .../python/PythonScriptEngineService.java | 6 +- .../python/PythonScriptEngineTests.java | 19 ++--- .../PythonScriptMultiThreadedTests.java | 5 +- .../script/python/PythonSecurityTests.java | 15 ++-- .../script/python/SimpleBench.java | 3 +- .../script/MockScriptEngine.java | 2 +- 53 files changed, 280 insertions(+), 197 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java index 5fe8297a6ba..f2bfb18c43f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java @@ -33,6 +33,8 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Collections; + public class TransportRenderSearchTemplateAction extends HandledTransportAction { private final ScriptService scriptService; @@ -55,7 +57,7 @@ public class TransportRenderSearchTemplateAction extends HandledTransportAction< @Override protected void doRun() throws Exception { - ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request); + ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request, Collections.emptyMap()); BytesReference processedTemplate = (BytesReference) executable.run(); RenderSearchTemplateResponse response = new RenderSearchTemplateResponse(); response.source(processedTemplate); diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 9f8b2a2e7be..d28ba2986e2 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -50,6 +50,7 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.lookup.SourceLookup; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -245,7 +246,7 @@ public class UpdateHelper extends AbstractComponent { private Map executeScript(UpdateRequest request, Map ctx) { try { if (scriptService != null) { - ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request); + ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request, Collections.emptyMap()); script.setNextVar("ctx", ctx); script.run(); // we need to unwrap the ctx... diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index 65dfb559e3f..faf482ead94 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -63,6 +63,7 @@ import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -364,7 +365,7 @@ public class QueryShardContext { * Executes the given template, and returns the response. */ public BytesReference executeQueryTemplate(Template template, SearchContext searchContext) { - ExecutableScript executable = getScriptService().executable(template, ScriptContext.Standard.SEARCH, searchContext); + ExecutableScript executable = getScriptService().executable(template, ScriptContext.Standard.SEARCH, searchContext, Collections.emptyMap()); return (BytesReference) executable.run(); } diff --git a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java index f69ac8c0548..6f14f15d3f2 100644 --- a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java @@ -33,6 +33,7 @@ import org.elasticsearch.script.Script.ScriptField; import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; +import java.util.Collections; import java.util.Objects; public class ScriptQueryBuilder extends AbstractQueryBuilder { @@ -80,7 +81,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder public ScriptQuery(Script script, ScriptService scriptService, SearchLookup searchLookup) { this.script = script; - this.searchScript = scriptService.search(searchLookup, script, ScriptContext.Standard.SEARCH); + this.searchScript = scriptService.search(searchLookup, script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); } @Override @@ -161,4 +162,4 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder protected boolean doEquals(ScriptQueryBuilder other) { return Objects.equals(script, other.script); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java index 92308466312..5fcd70b65dc 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java @@ -33,6 +33,7 @@ import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.SearchScript; import java.io.IOException; +import java.util.Collections; import java.util.Objects; /** @@ -89,10 +90,10 @@ public class ScriptScoreFunctionBuilder extends ScoreFunctionBuilder params) { NativeScriptFactory scriptFactory = scripts.get(script); if (scriptFactory != null) { return scriptFactory; diff --git a/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java b/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java index 993c95ad797..41befc9406f 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java @@ -36,7 +36,7 @@ public interface ScriptEngineService extends Closeable { boolean sandboxed(); - Object compile(String script); + Object compile(String script, Map params); ExecutableScript executable(CompiledScript compiledScript, @Nullable Map vars); diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 3b91f2d3110..c9e9f9a873d 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -67,6 +67,7 @@ import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collections; import java.util.HashMap; import java.util.Locale; import java.util.Map; @@ -96,9 +97,9 @@ public class ScriptService extends AbstractComponent implements Closeable { private final Map scriptEnginesByLang; private final Map scriptEnginesByExt; - private final ConcurrentMap staticCache = ConcurrentCollections.newConcurrentMap(); + private final ConcurrentMap staticCache = ConcurrentCollections.newConcurrentMap(); - private final Cache cache; + private final Cache cache; private final Path scriptsDirectory; private final ScriptModes scriptModes; @@ -153,7 +154,7 @@ public class ScriptService extends AbstractComponent implements Closeable { this.defaultLang = settings.get(DEFAULT_SCRIPTING_LANGUAGE_SETTING, DEFAULT_LANG); - CacheBuilder cacheBuilder = CacheBuilder.builder(); + CacheBuilder cacheBuilder = CacheBuilder.builder(); if (cacheMaxSize >= 0) { cacheBuilder.setMaximumWeight(cacheMaxSize); } @@ -224,7 +225,7 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Checks if a script can be executed and compiles it if needed, or returns the previously compiled and cached script. */ - public CompiledScript compile(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext) { + public CompiledScript compile(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext, Map params) { if (script == null) { throw new IllegalArgumentException("The parameter script (Script) must not be null."); } @@ -252,14 +253,14 @@ public class ScriptService extends AbstractComponent implements Closeable { " operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are not supported"); } - return compileInternal(script, headersContext); + return compileInternal(script, headersContext, params); } /** * Compiles a script straight-away, or returns the previously compiled and cached script, * without checking if it can be executed based on settings. */ - public CompiledScript compileInternal(Script script, HasContextAndHeaders context) { + public CompiledScript compileInternal(Script script, HasContextAndHeaders context, Map params) { if (script == null) { throw new IllegalArgumentException("The parameter script (Script) must not be null."); } @@ -277,7 +278,7 @@ public class ScriptService extends AbstractComponent implements Closeable { ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(lang); if (type == ScriptType.FILE) { - String cacheKey = getCacheKey(scriptEngineService, name, null); + CacheKey cacheKey = new CacheKey(scriptEngineService, name, null, params); //On disk scripts will be loaded into the staticCache by the listener CompiledScript compiledScript = staticCache.get(cacheKey); @@ -299,14 +300,14 @@ public class ScriptService extends AbstractComponent implements Closeable { code = getScriptFromIndex(indexedScript.lang, indexedScript.id, context); } - String cacheKey = getCacheKey(scriptEngineService, type == ScriptType.INLINE ? null : name, code); + CacheKey cacheKey = new CacheKey(scriptEngineService, type == ScriptType.INLINE ? null : name, code, params); CompiledScript compiledScript = cache.get(cacheKey); if (compiledScript == null) { //Either an un-cached inline script or indexed script //If the script type is inline the name will be the same as the code for identification in exceptions try { - compiledScript = new CompiledScript(type, name, lang, scriptEngineService.compile(code)); + compiledScript = new CompiledScript(type, name, lang, scriptEngineService.compile(code, params)); } catch (Exception exception) { throw new ScriptException("Failed to compile " + type + " script [" + name + "] using lang [" + lang + "]", exception); } @@ -364,7 +365,7 @@ public class ScriptService extends AbstractComponent implements Closeable { //we don't know yet what the script will be used for, but if all of the operations for this lang with //indexed scripts are disabled, it makes no sense to even compile it. if (isAnyScriptContextEnabled(scriptLang, scriptEngineService, ScriptType.INDEXED)) { - Object compiled = scriptEngineService.compile(template.getScript()); + Object compiled = scriptEngineService.compile(template.getScript(), Collections.emptyMap()); if (compiled == null) { throw new IllegalArgumentException("Unable to parse [" + template.getScript() + "] lang [" + scriptLang + "] (ScriptService.compile returned null)"); @@ -436,8 +437,8 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Compiles (or retrieves from cache) and executes the provided script */ - public ExecutableScript executable(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext) { - return executable(compile(script, scriptContext, headersContext), script.getParams()); + public ExecutableScript executable(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext, Map params) { + return executable(compile(script, scriptContext, headersContext, params), script.getParams()); } /** @@ -450,8 +451,8 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Compiles (or retrieves from cache) and executes the provided search script */ - public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext) { - CompiledScript compiledScript = compile(script, scriptContext, SearchContext.current()); + public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext, Map params) { + CompiledScript compiledScript = compile(script, scriptContext, SearchContext.current(), params); return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript, lookup, script.getParams()); } @@ -491,9 +492,9 @@ public class ScriptService extends AbstractComponent implements Closeable { * {@code ScriptEngineService}'s {@code scriptRemoved} method when the * script has been removed from the cache */ - private class ScriptCacheRemovalListener implements RemovalListener { + private class ScriptCacheRemovalListener implements RemovalListener { @Override - public void onRemoval(RemovalNotification notification) { + public void onRemoval(RemovalNotification notification) { scriptMetrics.onCacheEviction(); for (ScriptEngineService service : scriptEngines) { try { @@ -539,8 +540,8 @@ public class ScriptService extends AbstractComponent implements Closeable { logger.info("compiling script file [{}]", file.toAbsolutePath()); try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) { String script = Streams.copyToString(reader); - String cacheKey = getCacheKey(engineService, scriptNameExt.v1(), null); - staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.types()[0], engineService.compile(script))); + CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap()); + staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.types()[0], engineService.compile(script, Collections.emptyMap()))); scriptMetrics.onCompilation(); } } else { @@ -565,7 +566,7 @@ public class ScriptService extends AbstractComponent implements Closeable { ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); assert engineService != null; logger.info("removing script file [{}]", file.toAbsolutePath()); - staticCache.remove(getCacheKey(engineService, scriptNameExt.v1(), null)); + staticCache.remove(new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap())); } } @@ -625,11 +626,44 @@ public class ScriptService extends AbstractComponent implements Closeable { } } - private static String getCacheKey(ScriptEngineService scriptEngineService, String name, String code) { - String lang = scriptEngineService.types()[0]; - return lang + ":" + (name != null ? ":" + name : "") + (code != null ? ":" + code : ""); + private static final class CacheKey { + final String lang; + final String name; + final String code; + final Map params; + + private CacheKey(final ScriptEngineService service, final String name, final String code, final Map params) { + this.lang = service.types()[0]; + this.name = name; + this.code = code; + this.params = params; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + CacheKey cacheKey = (CacheKey)o; + + if (!lang.equals(cacheKey.lang)) return false; + if (name != null ? !name.equals(cacheKey.name) : cacheKey.name != null) return false; + if (code != null ? !code.equals(cacheKey.code) : cacheKey.code != null) return false; + return params.equals(cacheKey.params); + + } + + @Override + public int hashCode() { + int result = lang.hashCode(); + result = 31 * result + (name != null ? name.hashCode() : 0); + result = 31 * result + (code != null ? code.hashCode() : 0); + result = 31 * result + params.hashCode(); + return result; + } } + private static class IndexedScript { private final String lang; private final String id; diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index eb8414bb32e..00939a74cf8 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -91,6 +91,7 @@ import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -560,7 +561,7 @@ public class SearchService extends AbstractLifecycleComponent imp context.scrollContext().scroll = request.scroll(); } if (request.template() != null) { - ExecutableScript executable = this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, context); + ExecutableScript executable = this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, context, Collections.emptyMap()); BytesReference run = (BytesReference) executable.run(); try (XContentParser parser = XContentFactory.xContent(run).createParser(run)) { QueryParseContext queryParseContext = new QueryParseContext(indicesService.getIndicesQueryRegistry()); @@ -835,7 +836,7 @@ public class SearchService extends AbstractLifecycleComponent imp } if (source.scriptFields() != null) { for (org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField field : source.scriptFields()) { - SearchScript searchScript = context.scriptService().search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH); + SearchScript searchScript = context.scriptService().search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH, Collections.emptyMap()); context.scriptFields().add(new ScriptField(field.fieldName(), searchScript, field.ignoreFailure())); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java index 046ca717b9f..b6d1d56d07b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java @@ -37,6 +37,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -82,7 +83,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { @Override public void initialize(InternalAggregation.ReduceContext context) { - searchScript = context.scriptService().executable(script, ScriptContext.Standard.AGGS, context); + searchScript = context.scriptService().executable(script, ScriptContext.Standard.AGGS, context, Collections.emptyMap()); searchScript.setNextVar("_subset_freq", subsetDfHolder); searchScript.setNextVar("_subset_size", subsetSizeHolder); searchScript.setNextVar("_superset_freq", supersetDfHolder); @@ -170,7 +171,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { } ExecutableScript searchScript; try { - searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS, context); + searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS, context, Collections.emptyMap()); } catch (Exception e) { throw new ElasticsearchParseException("failed to parse [{}] significance heuristic. the script [{}] could not be loaded", e, script, heuristicName); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index d39a0335ac3..00c6b6b49bb 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -33,6 +33,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -91,7 +92,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement vars.putAll(firstAggregation.reduceScript.getParams()); } CompiledScript compiledScript = reduceContext.scriptService().compile(firstAggregation.reduceScript, - ScriptContext.Standard.AGGS, reduceContext); + ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap()); ExecutableScript script = reduceContext.scriptService().executable(compiledScript, vars); aggregation = script.run(); } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index 2c1caaa5241..6603c6289b2 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -39,6 +39,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -58,11 +59,11 @@ public class ScriptedMetricAggregator extends MetricsAggregator { this.params = params; ScriptService scriptService = context.searchContext().scriptService(); if (initScript != null) { - scriptService.executable(initScript, ScriptContext.Standard.AGGS, context.searchContext()).run(); + scriptService.executable(initScript, ScriptContext.Standard.AGGS, context.searchContext(), Collections.emptyMap()).run(); } - this.mapScript = scriptService.search(context.searchContext().lookup(), mapScript, ScriptContext.Standard.AGGS); + this.mapScript = scriptService.search(context.searchContext().lookup(), mapScript, ScriptContext.Standard.AGGS, Collections.emptyMap()); if (combineScript != null) { - this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS, context.searchContext()); + this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS, context.searchContext(), Collections.emptyMap()); } else { this.combineScript = null; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java index 789f8c961a3..e5ccbf6971a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java @@ -90,7 +90,7 @@ public class BucketScriptPipelineAggregator extends PipelineAggregator { InternalMultiBucketAggregation originalAgg = (InternalMultiBucketAggregation) aggregation; List buckets = originalAgg.getBuckets(); - CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext); + CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap()); List newBuckets = new ArrayList<>(); for (Bucket bucket : buckets) { Map vars = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java index 669a223b215..edc3b4e87ce 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java @@ -38,6 +38,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorStreams; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -88,7 +89,7 @@ public class BucketSelectorPipelineAggregator extends PipelineAggregator { InternalMultiBucketAggregation originalAgg = (InternalMultiBucketAggregation) aggregation; List buckets = originalAgg.getBuckets(); - CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext); + CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap()); List newBuckets = new ArrayList<>(); for (Bucket bucket : buckets) { Map vars = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java index 506c9d16d7c..a9dcc77ee9f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java @@ -43,6 +43,7 @@ import org.elasticsearch.search.internal.SearchContext; import org.joda.time.DateTimeZone; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -227,7 +228,7 @@ public class ValuesSourceParser { } private SearchScript createScript() { - return input.script == null ? null : context.scriptService().search(context.lookup(), input.script, ScriptContext.Standard.AGGS); + return input.script == null ? null : context.scriptService().search(context.lookup(), input.script, ScriptContext.Standard.AGGS, Collections.emptyMap()); } private static ValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType) { diff --git a/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java b/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java index 6dbdcbd589a..de1703b5c98 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java @@ -30,6 +30,7 @@ import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.internal.SearchContext; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -97,9 +98,9 @@ public class ScriptFieldsParseElement implements SearchParseElement { throw new SearchParseException(context, "must specify a script in script fields", parser.getTokenLocation()); } - SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH); + SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); context.scriptFields().add(new ScriptFieldsContext.ScriptField(fieldName, searchScript, ignoreException)); } } } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java index c465eaf6efb..e4fe2c08f75 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java @@ -50,6 +50,7 @@ import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -130,7 +131,7 @@ public class ScriptSortParser implements SortParser { if (type == null) { throw new SearchParseException(context, "_script sorting requires setting the type of the script", parser.getTokenLocation()); } - final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH); + final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); if (STRING_SORT_TYPE.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) { throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]", parser.getTokenLocation()); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java index 4bbdaf9c49e..9b083a91788 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java @@ -39,6 +39,7 @@ import org.elasticsearch.search.suggest.SuggestionSearchContext; import org.elasticsearch.search.suggest.phrase.PhraseSuggestionContext.DirectCandidateGenerator; import java.io.IOException; +import java.util.Collections; public final class PhraseSuggestParser implements SuggestContextParser { @@ -143,7 +144,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { } Template template = Template.parse(parser, parseFieldMatcher); CompiledScript compiledScript = suggester.scriptService().compile(template, ScriptContext.Standard.SEARCH, - headersContext); + headersContext, Collections.emptyMap()); suggestion.setCollateQueryScript(compiledScript); } else if ("params".equals(fieldName)) { suggestion.setCollateScriptParams(parser.map()); diff --git a/core/src/test/java/org/elasticsearch/script/FileScriptTests.java b/core/src/test/java/org/elasticsearch/script/FileScriptTests.java index fc888c79a8c..987aef90bc3 100644 --- a/core/src/test/java/org/elasticsearch/script/FileScriptTests.java +++ b/core/src/test/java/org/elasticsearch/script/FileScriptTests.java @@ -54,7 +54,7 @@ public class FileScriptTests extends ESTestCase { .put("script.engine." + MockScriptEngine.NAME + ".file.aggs", false).build(); ScriptService scriptService = makeScriptService(settings); Script script = new Script("script1", ScriptService.ScriptType.FILE, MockScriptEngine.NAME, null); - assertNotNull(scriptService.compile(script, ScriptContext.Standard.SEARCH, contextAndHeaders)); + assertNotNull(scriptService.compile(script, ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap())); } public void testAllOpsDisabled() throws Exception { @@ -68,7 +68,7 @@ public class FileScriptTests extends ESTestCase { Script script = new Script("script1", ScriptService.ScriptType.FILE, MockScriptEngine.NAME, null); for (ScriptContext context : ScriptContext.Standard.values()) { try { - scriptService.compile(script, context, contextAndHeaders); + scriptService.compile(script, context, contextAndHeaders, Collections.emptyMap()); fail(context.getKey() + " script should have been rejected"); } catch(Exception e) { assertTrue(e.getMessage(), e.getMessage().contains("scripts of type [file], operation [" + context.getKey() + "] and lang [" + MockScriptEngine.NAME + "] are disabled")); diff --git a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java index 02fad319846..47adeabe02f 100644 --- a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java +++ b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.watcher.ResourceWatcherService; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -62,7 +63,7 @@ public class NativeScriptTests extends ESTestCase { ScriptService scriptService = injector.getInstance(ScriptService.class); ExecutableScript executable = scriptService.executable(new Script("my", ScriptType.INLINE, NativeScriptEngineService.NAME, null), - ScriptContext.Standard.SEARCH, contextAndHeaders); + ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap()); assertThat(executable.run().toString(), equalTo("test")); terminate(injector.getInstance(ThreadPool.class)); } @@ -88,7 +89,7 @@ public class NativeScriptTests extends ESTestCase { for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) { assertThat(scriptService.compile(new Script("my", ScriptType.INLINE, NativeScriptEngineService.NAME, null), scriptContext, - contextAndHeaders), notNullValue()); + contextAndHeaders, Collections.emptyMap()), notNullValue()); } } @@ -110,4 +111,4 @@ public class NativeScriptTests extends ESTestCase { return "test"; } } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java b/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java index 0edaedbb28e..019eb7c74a0 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java @@ -58,7 +58,7 @@ public class ScriptContextTests extends ESTestCase { for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { try { Script script = new Script("1", scriptType, MockScriptEngine.NAME, null); - scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op"), contextAndHeaders); + scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op"), contextAndHeaders, Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (ScriptException e) { assertThat(e.getMessage(), containsString("scripts of type [" + scriptType + "], operation [" + PLUGIN_NAME + "_custom_globally_disabled_op] and lang [" + MockScriptEngine.NAME + "] are disabled")); @@ -71,16 +71,16 @@ public class ScriptContextTests extends ESTestCase { ScriptService scriptService = makeScriptService(); Script script = new Script("1", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, null); try { - scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op"), contextAndHeaders); + scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op"), contextAndHeaders, Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (ScriptException e) { assertTrue(e.getMessage(), e.getMessage().contains("scripts of type [inline], operation [" + PLUGIN_NAME + "_custom_exp_disabled_op] and lang [" + MockScriptEngine.NAME + "] are disabled")); } // still works for other script contexts - assertNotNull(scriptService.compile(script, ScriptContext.Standard.AGGS, contextAndHeaders)); - assertNotNull(scriptService.compile(script, ScriptContext.Standard.SEARCH, contextAndHeaders)); - assertNotNull(scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_op"), contextAndHeaders)); + assertNotNull(scriptService.compile(script, ScriptContext.Standard.AGGS, contextAndHeaders, Collections.emptyMap())); + assertNotNull(scriptService.compile(script, ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap())); + assertNotNull(scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_op"), contextAndHeaders, Collections.emptyMap())); } public void testUnknownPluginScriptContext() throws Exception { @@ -89,7 +89,7 @@ public class ScriptContextTests extends ESTestCase { for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { try { Script script = new Script("1", scriptType, MockScriptEngine.NAME, null); - scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "unknown"), contextAndHeaders); + scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "unknown"), contextAndHeaders, Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage(), e.getMessage().contains("script context [" + PLUGIN_NAME + "_unknown] not supported")); @@ -109,7 +109,7 @@ public class ScriptContextTests extends ESTestCase { for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { try { Script script = new Script("1", scriptType, MockScriptEngine.NAME, null); - scriptService.compile(script, context, contextAndHeaders); + scriptService.compile(script, context, contextAndHeaders, Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage(), e.getMessage().contains("script context [test] not supported")); diff --git a/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java b/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java index 3e476d2bebb..0afd72dab2b 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java @@ -252,7 +252,7 @@ public class ScriptModesTests extends ESTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return null; } diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index e81b0894eca..ab325e9e0c9 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -132,7 +132,7 @@ public class ScriptServiceTests extends ESTestCase { logger.info("--> verify that file with extension was correctly processed"); CompiledScript compiledScript = scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), - ScriptContext.Standard.SEARCH, contextAndHeaders); + ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap()); assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file")); logger.info("--> delete both files"); @@ -143,7 +143,7 @@ public class ScriptServiceTests extends ESTestCase { logger.info("--> verify that file with extension was correctly removed"); try { scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH, - contextAndHeaders); + contextAndHeaders, Collections.emptyMap()); fail("the script test_script should no longer exist"); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage(), containsString("Unable to find on disk file script [test_script] using lang [test]")); @@ -154,9 +154,9 @@ public class ScriptServiceTests extends ESTestCase { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); CompiledScript compiledScript1 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); CompiledScript compiledScript2 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @@ -164,9 +164,9 @@ public class ScriptServiceTests extends ESTestCase { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); CompiledScript compiledScript1 = scriptService.compile(new Script("script", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); CompiledScript compiledScript2 = scriptService.compile(new Script("script", ScriptType.INLINE, "test2", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @@ -175,9 +175,9 @@ public class ScriptServiceTests extends ESTestCase { buildScriptService(Settings.EMPTY); createFileScripts("test"); CompiledScript compiledScript1 = scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); CompiledScript compiledScript2 = scriptService.compile(new Script("file_script", ScriptType.FILE, "test2", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @@ -338,7 +338,7 @@ public class ScriptServiceTests extends ESTestCase { for (String type : scriptEngineService.types()) { try { scriptService.compile(new Script("test", randomFrom(ScriptType.values()), type, null), new ScriptContext.Plugin( - pluginName, unknownContext), contextAndHeaders); + pluginName, unknownContext), contextAndHeaders, Collections.emptyMap()); fail("script compilation should have been rejected"); } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("script context [" + pluginName + "_" + unknownContext + "] not supported")); @@ -349,20 +349,20 @@ public class ScriptServiceTests extends ESTestCase { public void testCompileCountedInCompilationStats() throws IOException { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } public void testExecutableCountedInCompilationStats() throws IOException { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } public void testSearchCountedInCompilationStats() throws IOException { buildScriptService(Settings.EMPTY); - scriptService.search(null, new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); + scriptService.search(null, new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } @@ -372,7 +372,7 @@ public class ScriptServiceTests extends ESTestCase { int numberOfCompilations = randomIntBetween(1, 1024); for (int i = 0; i < numberOfCompilations; i++) { scriptService - .compile(new Script(i + " + " + i, ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + .compile(new Script(i + " + " + i, ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); } assertEquals(numberOfCompilations, scriptService.stats().getCompilations()); } @@ -382,8 +382,8 @@ public class ScriptServiceTests extends ESTestCase { Settings.Builder builder = Settings.builder(); builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING, 1); buildScriptService(builder.build()); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } @@ -391,14 +391,14 @@ public class ScriptServiceTests extends ESTestCase { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); createFileScripts("test"); - scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } public void testIndexedScriptCountedInCompilationStats() throws IOException { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.compile(new Script("script", ScriptType.INDEXED, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.compile(new Script("script", ScriptType.INDEXED, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } @@ -407,8 +407,8 @@ public class ScriptServiceTests extends ESTestCase { Settings.Builder builder = Settings.builder(); builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING, 1); buildScriptService(builder.build()); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); - scriptService.executable(new Script("2+2", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); + scriptService.executable(new Script("2+2", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(2L, scriptService.stats().getCompilations()); assertEquals(1L, scriptService.stats().getCacheEvictions()); } @@ -424,7 +424,7 @@ public class ScriptServiceTests extends ESTestCase { private void assertCompileRejected(String lang, String script, ScriptType scriptType, ScriptContext scriptContext, HasContextAndHeaders contextAndHeaders) { try { - scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders); + scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders, Collections.emptyMap()); fail("compile should have been rejected for lang [" + lang + "], script_type [" + scriptType + "], scripted_op [" + scriptContext + "]"); } catch(ScriptException e) { //all good @@ -433,7 +433,7 @@ public class ScriptServiceTests extends ESTestCase { private void assertCompileAccepted(String lang, String script, ScriptType scriptType, ScriptContext scriptContext, HasContextAndHeaders contextAndHeaders) { - assertThat(scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders), notNullValue()); + assertThat(scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders, Collections.emptyMap()), notNullValue()); } public static class TestEngineService implements ScriptEngineService { @@ -454,7 +454,7 @@ public class ScriptServiceTests extends ESTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return "compiled_" + script; } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index 9a1d498ad6b..97cd659a1d9 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -1429,7 +1429,7 @@ public class DateHistogramIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } @@ -1555,7 +1555,7 @@ public class DateHistogramIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java index ac146706eb5..db02d6ccb03 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java @@ -364,7 +364,7 @@ public class AvgIT extends AbstractNumericTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } @@ -500,7 +500,7 @@ public class AvgIT extends AbstractNumericTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } @@ -585,4 +585,4 @@ public class AvgIT extends AbstractNumericTestCase { public void scriptRemoved(CompiledScript script) { } } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index d87de000108..2c27bde57dc 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -359,7 +359,7 @@ public class SumIT extends AbstractNumericTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } @@ -497,7 +497,7 @@ public class SumIT extends AbstractNumericTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } @@ -583,4 +583,4 @@ public class SumIT extends AbstractNumericTestCase { public void scriptRemoved(CompiledScript script) { } } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java index fde7256ad01..c4611546493 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java @@ -244,7 +244,7 @@ public class ValueCountIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } @@ -330,4 +330,4 @@ public class ValueCountIT extends ESIntegTestCase { public void scriptRemoved(CompiledScript script) { } } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/update/UpdateIT.java b/core/src/test/java/org/elasticsearch/update/UpdateIT.java index a789bb48774..09887d83541 100644 --- a/core/src/test/java/org/elasticsearch/update/UpdateIT.java +++ b/core/src/test/java/org/elasticsearch/update/UpdateIT.java @@ -120,7 +120,7 @@ public class UpdateIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return new Object(); // unused } @@ -218,7 +218,7 @@ public class UpdateIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } @@ -309,7 +309,7 @@ public class UpdateIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return new Object(); // unused } @@ -400,7 +400,7 @@ public class UpdateIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return new Object(); // unused } diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java index a7f93925119..cf6017a32ca 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java @@ -95,7 +95,7 @@ public class ExpressionScriptEngineService extends AbstractComponent implements } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { // classloader created here final SecurityManager sm = System.getSecurityManager(); if (sm != null) { diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java index b05b9630a14..198558381d3 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java @@ -33,23 +33,23 @@ public class ExpressionTests extends ESSingleNodeTestCase { public void testNeedsScores() { IndexService index = createIndex("test", Settings.EMPTY, "type", "d", "type=double"); - + ExpressionScriptEngineService service = new ExpressionScriptEngineService(Settings.EMPTY); SearchLookup lookup = new SearchLookup(index.mapperService(), index.fieldData(), null); - Object compiled = service.compile("1.2"); + Object compiled = service.compile("1.2", Collections.emptyMap()); SearchScript ss = service.search(new CompiledScript(ScriptType.INLINE, "randomName", "expression", compiled), lookup, Collections.emptyMap()); assertFalse(ss.needsScores()); - compiled = service.compile("doc['d'].value"); + compiled = service.compile("doc['d'].value", Collections.emptyMap()); ss = service.search(new CompiledScript(ScriptType.INLINE, "randomName", "expression", compiled), lookup, Collections.emptyMap()); assertFalse(ss.needsScores()); - compiled = service.compile("1/_score"); + compiled = service.compile("1/_score", Collections.emptyMap()); ss = service.search(new CompiledScript(ScriptType.INLINE, "randomName", "expression", compiled), lookup, Collections.emptyMap()); assertTrue(ss.needsScores()); - compiled = service.compile("doc['d'].value * _score"); + compiled = service.compile("doc['d'].value * _score", Collections.emptyMap()); ss = service.search(new CompiledScript(ScriptType.INLINE, "randomName", "expression", compiled), lookup, Collections.emptyMap()); assertTrue(ss.needsScores()); } diff --git a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java index 85f57694ce6..1ce5a2ab761 100644 --- a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java +++ b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java @@ -165,7 +165,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { try { // we reuse classloader, so do a security check just in case. SecurityManager sm = System.getSecurityManager(); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovySecurityTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovySecurityTests.java index 5f91631c021..8d9279ca0dd 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovySecurityTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovySecurityTests.java @@ -99,7 +99,7 @@ public class GroovySecurityTests extends ESTestCase { // filtered directly by our classloader assertFailure("getClass().getClassLoader().loadClass(\"java.lang.Runtime\").availableProcessors()", PrivilegedActionException.class); // unfortunately, we have access to other classloaders (due to indy mechanism needing getClassLoader permission) - // but we can't do much with them directly at least. + // but we can't do much with them directly at least. assertFailure("myobject.getClass().getClassLoader().loadClass(\"java.lang.Runtime\").availableProcessors()", SecurityException.class); assertFailure("d = new DateTime(); d.getClass().getDeclaredMethod(\"year\").setAccessible(true)", SecurityException.class); assertFailure("d = new DateTime(); d.\"${'get' + 'Class'}\"()." + @@ -133,9 +133,9 @@ public class GroovySecurityTests extends ESTestCase { vars.put("myarray", Arrays.asList("foo")); vars.put("myobject", new MyObject()); - se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "js", se.compile(script)), vars).run(); + se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "js", se.compile(script, Collections.emptyMap())), vars).run(); } - + public static class MyObject { public int getPrimitive() { return 0; } public Object getObject() { return "value"; } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java index 93172056071..78fc6571f12 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java @@ -85,7 +85,7 @@ public class MustacheScriptEngineService extends AbstractComponent implements Sc * @return a compiled template object for later execution. * */ @Override - public Object compile(String template) { + public Object compile(String template, Map params) { /** Factory to generate Mustache objects from. */ return (new JsonEscapingMustacheFactory()).compile(new FastStringReader(template), "query-template"); } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java index ce29bf246be..8e8c8981493 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java @@ -28,6 +28,7 @@ import org.junit.Before; import java.io.IOException; import java.io.StringWriter; import java.nio.charset.Charset; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -52,7 +53,7 @@ public class MustacheScriptEngineTests extends ESTestCase { + "\"negative\": {\"term\": {\"body\": {\"value\": \"solr\"}" + "}}, \"negative_boost\": {{boost_val}} } }}"; Map vars = new HashMap<>(); vars.put("boost_val", "0.3"); - BytesReference o = (BytesReference) qe.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "", "mustache", qe.compile(template)), vars).run(); + BytesReference o = (BytesReference) qe.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "", "mustache", qe.compile(template, Collections.emptyMap())), vars).run(); assertEquals("GET _search {\"query\": {\"boosting\": {\"positive\": {\"match\": {\"body\": \"gift\"}}," + "\"negative\": {\"term\": {\"body\": {\"value\": \"solr\"}}}, \"negative_boost\": 0.3 } }}", new String(o.toBytes(), Charset.forName("UTF-8"))); @@ -63,7 +64,7 @@ public class MustacheScriptEngineTests extends ESTestCase { Map vars = new HashMap<>(); vars.put("boost_val", "0.3"); vars.put("body_val", "\"quick brown\""); - BytesReference o = (BytesReference) qe.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "", "mustache", qe.compile(template)), vars).run(); + BytesReference o = (BytesReference) qe.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "", "mustache", qe.compile(template, Collections.emptyMap())), vars).run(); assertEquals("GET _search {\"query\": {\"boosting\": {\"positive\": {\"match\": {\"body\": \"gift\"}}," + "\"negative\": {\"term\": {\"body\": {\"value\": \"\\\"quick brown\\\"\"}}}, \"negative_boost\": 0.3 } }}", new String(o.toBytes(), Charset.forName("UTF-8"))); diff --git a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java index 33a4e55801b..825a8d358d9 100644 --- a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java +++ b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java @@ -62,7 +62,7 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements // one time initialization of rhino security manager integration private static final CodeSource DOMAIN; private static final int OPTIMIZATION_LEVEL = 1; - + static { try { DOMAIN = new CodeSource(new URL("file:" + BootstrapInfo.UNTRUSTED_CODEBASE), (Certificate[]) null); @@ -110,7 +110,7 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements if (securityDomain != DOMAIN) { throw new SecurityException("illegal securityDomain: " + securityDomain); } - + return super.createClassLoader(parent, securityDomain); } }); @@ -157,7 +157,7 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { Context ctx = Context.enter(); try { return ctx.compileString(script, generateScriptName(), 1, DOMAIN); diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java index fe9cc324f1c..9d8357bb582 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java @@ -29,6 +29,7 @@ import org.junit.After; import org.junit.Before; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -54,7 +55,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { public void testSimpleEquation() { Map vars = new HashMap(); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testSimpleEquation", "js", se.compile("1 + 2")), vars).run(); + Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testSimpleEquation", "js", se.compile("1 + 2", Collections.emptyMap())), vars).run(); assertThat(((Number) o).intValue(), equalTo(3)); } @@ -64,20 +65,20 @@ public class JavaScriptScriptEngineTests extends ESTestCase { Map obj2 = MapBuilder.newMapBuilder().put("prop2", "value2").map(); Map obj1 = MapBuilder.newMapBuilder().put("prop1", "value1").put("obj2", obj2).put("l", Arrays.asList("2", "1")).map(); vars.put("obj1", obj1); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "js", se.compile("obj1")), vars).run(); + Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "js", se.compile("obj1", Collections.emptyMap())), vars).run(); assertThat(o, instanceOf(Map.class)); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "js", se.compile("obj1.l[0]")), vars).run(); + o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "js", se.compile("obj1.l[0]", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("2")); } public void testJavaScriptObjectToMap() { Map vars = new HashMap(); Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testJavaScriptObjectToMap", "js", - se.compile("var obj1 = {}; obj1.prop1 = 'value1'; obj1.obj2 = {}; obj1.obj2.prop2 = 'value2'; obj1")), vars).run(); + se.compile("var obj1 = {}; obj1.prop1 = 'value1'; obj1.obj2 = {}; obj1.obj2.prop2 = 'value2'; obj1", Collections.emptyMap())), vars).run(); Map obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); @@ -92,7 +93,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { vars.put("ctx", ctx); ExecutableScript executable = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testJavaScriptObjectMapInter", "js", - se.compile("ctx.obj2 = {}; ctx.obj2.prop2 = 'value2'; ctx.obj1.prop1 = 'uvalue1'")), vars); + se.compile("ctx.obj2 = {}; ctx.obj2.prop2 = 'value2'; ctx.obj1.prop1 = 'uvalue1'", Collections.emptyMap())), vars); executable.run(); ctx = (Map) executable.unwrap(vars.get("ctx")); assertThat(ctx.containsKey("obj1"), equalTo(true)); @@ -106,7 +107,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { Map doc = new HashMap(); ctx.put("doc", doc); - Object compiled = se.compile("ctx.doc.field1 = ['value1', 'value2']"); + Object compiled = se.compile("ctx.doc.field1 = ['value1', 'value2']", Collections.emptyMap()); ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testJavaScriptInnerArrayCreation", "js", compiled), new HashMap()); script.setNextVar("ctx", ctx); @@ -124,21 +125,21 @@ public class JavaScriptScriptEngineTests extends ESTestCase { vars.put("l", Arrays.asList("1", "2", "3", obj1)); Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", - se.compile("l.length")), vars).run(); + se.compile("l.length", Collections.emptyMap())), vars).run(); assertThat(((Number) o).intValue(), equalTo(4)); o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", - se.compile("l[0]")), vars).run(); + se.compile("l[0]", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("1")); o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", - se.compile("l[3]")), vars).run(); + se.compile("l[3]", Collections.emptyMap())), vars).run(); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", - se.compile("l[3].prop1")), vars).run(); + se.compile("l[3].prop1", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("value1")); } @@ -146,7 +147,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { Map vars = new HashMap(); Map ctx = new HashMap(); vars.put("ctx", ctx); - Object compiledScript = se.compile("ctx.value"); + Object compiledScript = se.compile("ctx.value", Collections.emptyMap()); ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution1", "js", compiledScript), vars); @@ -161,7 +162,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { public void testChangingVarsCrossExecution2() { Map vars = new HashMap(); - Object compiledScript = se.compile("value"); + Object compiledScript = se.compile("value", Collections.emptyMap()); ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution2", "js", compiledScript), vars); diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java index 2308e666c51..2aa6e13a99f 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -40,7 +41,7 @@ import static org.hamcrest.Matchers.equalTo; public class JavaScriptScriptMultiThreadedTests extends ESTestCase { public void testExecutableNoRuntimeParams() throws Exception { final JavaScriptScriptEngineService se = new JavaScriptScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - final Object compiled = se.compile("x + y"); + final Object compiled = se.compile("x + y", Collections.emptyMap()); final AtomicBoolean failed = new AtomicBoolean(); Thread[] threads = new Thread[50]; @@ -82,7 +83,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { public void testExecutableWithRuntimeParams() throws Exception { final JavaScriptScriptEngineService se = new JavaScriptScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - final Object compiled = se.compile("x + y"); + final Object compiled = se.compile("x + y", Collections.emptyMap()); final AtomicBoolean failed = new AtomicBoolean(); Thread[] threads = new Thread[50]; @@ -124,7 +125,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { public void testExecute() throws Exception { final JavaScriptScriptEngineService se = new JavaScriptScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - final Object compiled = se.compile("x + y"); + final Object compiled = se.compile("x + y", Collections.emptyMap()); final AtomicBoolean failed = new AtomicBoolean(); Thread[] threads = new Thread[50]; diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptSecurityTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptSecurityTests.java index c6f9805f818..dccc36d1bf7 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptSecurityTests.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptSecurityTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.test.ESTestCase; import org.mozilla.javascript.EcmaError; import org.mozilla.javascript.WrappedException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -33,7 +34,7 @@ import java.util.Map; * Tests for the Javascript security permissions */ public class JavaScriptSecurityTests extends ESTestCase { - + private JavaScriptScriptEngineService se; @Override @@ -53,14 +54,14 @@ public class JavaScriptSecurityTests extends ESTestCase { /** runs a script */ private void doTest(String script) { Map vars = new HashMap(); - se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "js", se.compile(script)), vars).run(); + se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "js", se.compile(script, Collections.emptyMap())), vars).run(); } - + /** asserts that a script runs without exception */ private void assertSuccess(String script) { doTest(script); } - + /** assert that a security exception is hit */ private void assertFailure(String script, Class exceptionClass) { try { @@ -78,13 +79,13 @@ public class JavaScriptSecurityTests extends ESTestCase { } } } - + /** Test some javascripts that are ok */ public void testOK() { assertSuccess("1 + 2"); assertSuccess("Math.cos(Math.PI)"); } - + /** Test some javascripts that should hit security exception */ public void testNotOK() throws Exception { // sanity check :) diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java index bb7eb31c85d..3445c116057 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java @@ -25,6 +25,7 @@ import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptService; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -35,7 +36,7 @@ public class SimpleBench { public static void main(String[] args) { JavaScriptScriptEngineService se = new JavaScriptScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - Object compiled = se.compile("x + y"); + Object compiled = se.compile("x + y", Collections.emptyMap()); CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled); Map vars = new HashMap(); diff --git a/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/PlanAScriptEngineService.java b/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/PlanAScriptEngineService.java index 6b3cd834715..7795f74700b 100644 --- a/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/PlanAScriptEngineService.java +++ b/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/PlanAScriptEngineService.java @@ -28,6 +28,7 @@ import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.ScriptEngineService; +import org.elasticsearch.script.ScriptException; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.lookup.SearchLookup; @@ -37,15 +38,16 @@ import java.security.AccessController; import java.security.Permissions; import java.security.PrivilegedAction; import java.security.ProtectionDomain; +import java.util.HashMap; import java.util.Map; public class PlanAScriptEngineService extends AbstractComponent implements ScriptEngineService { public static final String NAME = "plan-a"; - // TODO: this should really be per-script since scripts do so many different things? - private static final CompilerSettings compilerSettings = new CompilerSettings(); - - public static final String NUMERIC_OVERFLOW = "plan-a.numeric_overflow"; + // default settings, used unless otherwise specified + private static final CompilerSettings DEFAULT_COMPILER_SETTINGS = new CompilerSettings(); + + public static final String NUMERIC_OVERFLOW = "numeric_overflow"; // TODO: how should custom definitions be specified? private Definition definition = null; @@ -53,7 +55,6 @@ public class PlanAScriptEngineService extends AbstractComponent implements Scrip @Inject public PlanAScriptEngineService(Settings settings) { super(settings); - compilerSettings.setNumericOverflow(settings.getAsBoolean(NUMERIC_OVERFLOW, compilerSettings.getNumericOverflow())); } public void setDefinition(final Definition definition) { @@ -86,7 +87,23 @@ public class PlanAScriptEngineService extends AbstractComponent implements Scrip } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { + final CompilerSettings compilerSettings; + if (params.isEmpty()) { + compilerSettings = DEFAULT_COMPILER_SETTINGS; + } else { + // custom settings + compilerSettings = new CompilerSettings(); + Map clone = new HashMap<>(params); + String value = clone.remove(NUMERIC_OVERFLOW); + if (value != null) { + // TODO: can we get a real boolean parser in here? + compilerSettings.setNumericOverflow(Boolean.parseBoolean(value)); + } + if (!clone.isEmpty()) { + throw new IllegalArgumentException("Unrecognized compile-time parameter(s): " + clone); + } + } // check we ourselves are not being called by unprivileged code SecurityManager sm = System.getSecurityManager(); if (sm != null) { diff --git a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowDisabledTests.java b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowDisabledTests.java index 94beac0c58c..4603a669df2 100644 --- a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowDisabledTests.java +++ b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowDisabledTests.java @@ -19,17 +19,16 @@ package org.elasticsearch.plan.a; -import org.elasticsearch.common.settings.Settings; +import java.util.Collections; +import java.util.Map; /** Tests floating point overflow with numeric overflow disabled */ public class FloatOverflowDisabledTests extends ScriptTestCase { + /** wire overflow to false for all tests */ @Override - protected Settings getSettings() { - Settings.Builder builder = Settings.builder(); - builder.put(super.getSettings()); - builder.put(PlanAScriptEngineService.NUMERIC_OVERFLOW, false); - return builder.build(); + public Object exec(String script, Map vars) { + return exec(script, vars, Collections.singletonMap(PlanAScriptEngineService.NUMERIC_OVERFLOW, "false")); } public void testAssignmentAdditionOverflow() { diff --git a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowEnabledTests.java b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowEnabledTests.java index ff1c315628f..02a738de71e 100644 --- a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowEnabledTests.java +++ b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowEnabledTests.java @@ -19,17 +19,16 @@ package org.elasticsearch.plan.a; -import org.elasticsearch.common.settings.Settings; +import java.util.Collections; +import java.util.Map; /** Tests floating point overflow with numeric overflow enabled */ public class FloatOverflowEnabledTests extends ScriptTestCase { + /** wire overflow to true for all tests */ @Override - protected Settings getSettings() { - Settings.Builder builder = Settings.builder(); - builder.put(super.getSettings()); - builder.put(PlanAScriptEngineService.NUMERIC_OVERFLOW, true); - return builder.build(); + public Object exec(String script, Map vars) { + return exec(script, vars, Collections.singletonMap(PlanAScriptEngineService.NUMERIC_OVERFLOW, "true")); } public void testAssignmentAdditionOverflow() { diff --git a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowDisabledTests.java b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowDisabledTests.java index 279ea0616d9..dbffb11f0d0 100644 --- a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowDisabledTests.java +++ b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowDisabledTests.java @@ -19,17 +19,16 @@ package org.elasticsearch.plan.a; -import org.elasticsearch.common.settings.Settings; +import java.util.Collections; +import java.util.Map; /** Tests integer overflow with numeric overflow disabled */ public class IntegerOverflowDisabledTests extends ScriptTestCase { + /** wire overflow to true for all tests */ @Override - protected Settings getSettings() { - Settings.Builder builder = Settings.builder(); - builder.put(super.getSettings()); - builder.put(PlanAScriptEngineService.NUMERIC_OVERFLOW, false); - return builder.build(); + public Object exec(String script, Map vars) { + return exec(script, vars, Collections.singletonMap(PlanAScriptEngineService.NUMERIC_OVERFLOW, "false")); } public void testAssignmentAdditionOverflow() { diff --git a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowEnabledTests.java b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowEnabledTests.java index 8abd2695915..cdab0e89fe6 100644 --- a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowEnabledTests.java +++ b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowEnabledTests.java @@ -19,17 +19,16 @@ package org.elasticsearch.plan.a; -import org.elasticsearch.common.settings.Settings; +import java.util.Collections; +import java.util.Map; /** Tests integer overflow with numeric overflow enabled */ public class IntegerOverflowEnabledTests extends ScriptTestCase { + /** wire overflow to true for all tests */ @Override - protected Settings getSettings() { - Settings.Builder builder = Settings.builder(); - builder.put(super.getSettings()); - builder.put(PlanAScriptEngineService.NUMERIC_OVERFLOW, true); - return builder.build(); + public Object exec(String script, Map vars) { + return exec(script, vars, Collections.singletonMap(PlanAScriptEngineService.NUMERIC_OVERFLOW, "true")); } public void testAssignmentAdditionOverflow() { diff --git a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptEngineTests.java b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptEngineTests.java index d2bbe02a625..e5084392f99 100644 --- a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptEngineTests.java +++ b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptEngineTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptService; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -78,7 +79,7 @@ public class ScriptEngineTests extends ScriptTestCase { Map ctx = new HashMap<>(); vars.put("ctx", ctx); - Object compiledScript = scriptEngine.compile("return ((Map)input.get(\"ctx\")).get(\"value\");"); + Object compiledScript = scriptEngine.compile("return ((Map)input.get(\"ctx\")).get(\"value\");", Collections.emptyMap()); ExecutableScript script = scriptEngine.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution1", "plan-a", compiledScript), vars); @@ -93,7 +94,7 @@ public class ScriptEngineTests extends ScriptTestCase { public void testChangingVarsCrossExecution2() { Map vars = new HashMap<>(); - Object compiledScript = scriptEngine.compile("return input.get(\"value\");"); + Object compiledScript = scriptEngine.compile("return input.get(\"value\");", Collections.emptyMap()); ExecutableScript script = scriptEngine.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution2", "plan-a", compiledScript), vars); diff --git a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptTestCase.java b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptTestCase.java index 253e37183f3..5b4948036f3 100644 --- a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptTestCase.java +++ b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptTestCase.java @@ -25,6 +25,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; +import java.util.Collections; import java.util.Map; /** @@ -34,17 +35,10 @@ import java.util.Map; */ public abstract class ScriptTestCase extends ESTestCase { protected PlanAScriptEngineService scriptEngine; - - /** Override to provide different compiler settings */ - protected Settings getSettings() { - Settings.Builder builder = Settings.builder(); - builder.put(PlanAScriptEngineService.NUMERIC_OVERFLOW, random().nextBoolean()); - return builder.build(); - } @Before public void setup() { - scriptEngine = new PlanAScriptEngineService(getSettings()); + scriptEngine = new PlanAScriptEngineService(Settings.EMPTY); } /** Compiles and returns the result of {@code script} */ @@ -54,7 +48,12 @@ public abstract class ScriptTestCase extends ESTestCase { /** Compiles and returns the result of {@code script} with access to {@code vars} */ public Object exec(String script, Map vars) { - Object object = scriptEngine.compile(script); + return exec(script, vars, Collections.singletonMap(PlanAScriptEngineService.NUMERIC_OVERFLOW, Boolean.toString(random().nextBoolean()))); + } + + /** Compiles and returns the result of {@code script} with access to {@code vars} and compile-time parameters */ + public Object exec(String script, Map vars, Map compileParams) { + Object object = scriptEngine.compile(script, compileParams); CompiledScript compiled = new CompiledScript(ScriptService.ScriptType.INLINE, getTestName(), "plan-a", object); return scriptEngine.executable(compiled, vars).run(); } diff --git a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/WhenThingsGoWrongTests.java b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/WhenThingsGoWrongTests.java index de2c1c9ea3e..277778e7e76 100644 --- a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/WhenThingsGoWrongTests.java +++ b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/WhenThingsGoWrongTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.plan.a; +import java.util.Collections; + public class WhenThingsGoWrongTests extends ScriptTestCase { public void testNullPointer() { try { @@ -38,4 +40,13 @@ public class WhenThingsGoWrongTests extends ScriptTestCase { fail("should have hit cce"); } catch (ClassCastException expected) {} } + + public void testBogusParameter() { + try { + exec("return 5;", null, Collections.singletonMap("bogusParameterKey", "bogusParameterValue")); + fail("should have hit IAE"); + } catch (IllegalArgumentException expected) { + assertTrue(expected.getMessage().contains("Unrecognized compile-time parameter")); + } + } } diff --git a/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java b/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java index 1930f530671..c4f109cc782 100644 --- a/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java +++ b/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java @@ -60,7 +60,7 @@ import org.python.util.PythonInterpreter; public class PythonScriptEngineService extends AbstractComponent implements ScriptEngineService { private final PythonInterpreter interp; - + @Inject public PythonScriptEngineService(Settings settings) { super(settings); @@ -110,7 +110,7 @@ public class PythonScriptEngineService extends AbstractComponent implements Scri } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { // classloader created here SecurityManager sm = System.getSecurityManager(); if (sm != null) { @@ -293,7 +293,7 @@ public class PythonScriptEngineService extends AbstractComponent implements Scri if (value == null) { return null; } else if (value instanceof PyObject) { - // seems like this is enough, inner PyDictionary will do the conversion for us for example, so expose it directly + // seems like this is enough, inner PyDictionary will do the conversion for us for example, so expose it directly return ((PyObject) value).__tojava__(Object.class); } return value; diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java index e713bd67c92..a0bfab43c54 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java @@ -29,6 +29,7 @@ import org.junit.After; import org.junit.Before; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -53,7 +54,7 @@ public class PythonScriptEngineTests extends ESTestCase { public void testSimpleEquation() { Map vars = new HashMap(); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testSimpleEquation", "python", se.compile("1 + 2")), vars).run(); + Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testSimpleEquation", "python", se.compile("1 + 2", Collections.emptyMap())), vars).run(); assertThat(((Number) o).intValue(), equalTo(3)); } @@ -63,13 +64,13 @@ public class PythonScriptEngineTests extends ESTestCase { Map obj2 = MapBuilder.newMapBuilder().put("prop2", "value2").map(); Map obj1 = MapBuilder.newMapBuilder().put("prop1", "value1").put("obj2", obj2).put("l", Arrays.asList("2", "1")).map(); vars.put("obj1", obj1); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "python", se.compile("obj1")), vars).run(); + Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "python", se.compile("obj1", Collections.emptyMap())), vars).run(); assertThat(o, instanceOf(Map.class)); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "python", se.compile("obj1['l'][0]")), vars).run(); + o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "python", se.compile("obj1['l'][0]", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("2")); } @@ -82,7 +83,7 @@ public class PythonScriptEngineTests extends ESTestCase { vars.put("ctx", ctx); ExecutableScript executable = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testObjectInterMap", "python", - se.compile("ctx['obj2'] = { 'prop2' : 'value2' }; ctx['obj1']['prop1'] = 'uvalue1'")), vars); + se.compile("ctx['obj2'] = { 'prop2' : 'value2' }; ctx['obj1']['prop1'] = 'uvalue1'", Collections.emptyMap())), vars); executable.run(); ctx = (Map) executable.unwrap(vars.get("ctx")); assertThat(ctx.containsKey("obj1"), equalTo(true)); @@ -100,15 +101,15 @@ public class PythonScriptEngineTests extends ESTestCase { // Object o = se.execute(se.compile("l.length"), vars); // assertThat(((Number) o).intValue(), equalTo(4)); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[0]")), vars).run(); + Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[0]", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("1")); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[3]")), vars).run(); + o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[3]", Collections.emptyMap())), vars).run(); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[3]['prop1']")), vars).run(); + o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[3]['prop1']", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("value1")); } @@ -116,7 +117,7 @@ public class PythonScriptEngineTests extends ESTestCase { Map vars = new HashMap(); Map ctx = new HashMap(); vars.put("ctx", ctx); - Object compiledScript = se.compile("ctx['value']"); + Object compiledScript = se.compile("ctx['value']", Collections.emptyMap()); ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution1", "python", compiledScript), vars); ctx.put("value", 1); @@ -131,7 +132,7 @@ public class PythonScriptEngineTests extends ESTestCase { public void testChangingVarsCrossExecution2() { Map vars = new HashMap(); Map ctx = new HashMap(); - Object compiledScript = se.compile("value"); + Object compiledScript = se.compile("value", Collections.emptyMap()); ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution2", "python", compiledScript), vars); script.setNextVar("value", 1); diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java index 7b9663f6b6a..06d3da03ab8 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -41,7 +42,7 @@ public class PythonScriptMultiThreadedTests extends ESTestCase { public void testExecutableNoRuntimeParams() throws Exception { final PythonScriptEngineService se = new PythonScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - final Object compiled = se.compile("x + y"); + final Object compiled = se.compile("x + y", Collections.emptyMap()); final CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "python", compiled); final AtomicBoolean failed = new AtomicBoolean(); @@ -127,7 +128,7 @@ public class PythonScriptMultiThreadedTests extends ESTestCase { public void testExecute() throws Exception { final PythonScriptEngineService se = new PythonScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - final Object compiled = se.compile("x + y"); + final Object compiled = se.compile("x + y", Collections.emptyMap()); final CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "testExecute", "python", compiled); final AtomicBoolean failed = new AtomicBoolean(); diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java index e90ac503f13..22471129e82 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.test.ESTestCase; import org.python.core.PyException; import java.text.DecimalFormatSymbols; +import java.util.Collections; import java.util.HashMap; import java.util.Locale; import java.util.Map; @@ -34,7 +35,7 @@ import java.util.Map; * Tests for Python security permissions */ public class PythonSecurityTests extends ESTestCase { - + private PythonScriptEngineService se; @Override @@ -54,14 +55,14 @@ public class PythonSecurityTests extends ESTestCase { /** runs a script */ private void doTest(String script) { Map vars = new HashMap(); - se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "python", se.compile(script)), vars).run(); + se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "python", se.compile(script, Collections.emptyMap())), vars).run(); } - + /** asserts that a script runs without exception */ private void assertSuccess(String script) { doTest(script); } - + /** assert that a security exception is hit */ private void assertFailure(String script) { try { @@ -76,13 +77,13 @@ public class PythonSecurityTests extends ESTestCase { } } } - + /** Test some py scripts that are ok */ public void testOK() { assertSuccess("1 + 2"); assertSuccess("from java.lang import Math\nMath.cos(0)"); } - + /** Test some py scripts that should hit security exception */ public void testNotOK() { // sanity check :) @@ -93,7 +94,7 @@ public class PythonSecurityTests extends ESTestCase { // no files assertFailure("from java.io import File\nFile.createTempFile(\"test\", \"tmp\")"); } - + /** Test again from a new thread, python has complex threadlocal configuration */ public void testNotOKFromSeparateThread() throws Exception { Thread t = new Thread() { diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java index 60e792c34b5..d9559aef16c 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java @@ -25,6 +25,7 @@ import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptService; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -35,7 +36,7 @@ public class SimpleBench { public static void main(String[] args) { PythonScriptEngineService se = new PythonScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - Object compiled = se.compile("x + y"); + Object compiled = se.compile("x + y", Collections.emptyMap()); CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "SimpleBench", "python", compiled); diff --git a/test-framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test-framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index bfd40900456..aec90aa50d4 100644 --- a/test-framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test-framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -72,7 +72,7 @@ public class MockScriptEngine implements ScriptEngineService { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } From 6692e42d9ae944e3ebdd3ef96c4ef40dbbc6b87f Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Thu, 17 Dec 2015 02:35:00 -0500 Subject: [PATCH 111/167] thirdPartyAudit round 2 This fixes the `lenient` parameter to be `missingClasses`. I will remove this boolean and we can handle them via the normal whitelist. It also adds a check for sheisty classes (jar hell with the jdk). This is inspired by the lucene "sheisty" classes check, but it has false positives. This check is more evil, it validates every class file against the extension classloader as a resource, to see if it exists there. If so: jar hell. This jar hell is a problem for several reasons: 1. causes insanely-hard-to-debug problems (like bugs in forbidden-apis) 2. hides problems (like internal api access) 3. the code you think is executing, is not really executing 4. security permissions are not what you think they are 5. brings in unnecessary dependencies 6. its jar hell The more difficult problems are stuff like jython, where these classes are simply 'uberjared' directly in, so you cant just fix them by removing a bogus dependency. And there is a legit reason for them to do that, they want to support java 1.4. --- .../precommit/ThirdPartyAuditTask.groovy | 71 +++- core/build.gradle | 2 +- modules/lang-expression/build.gradle | 2 +- modules/lang-groovy/build.gradle | 2 +- plugins/discovery-azure/build.gradle | 11 +- .../licenses/activation-1.1.jar.sha1 | 1 - .../licenses/activation-LICENSE.txt | 119 ------ .../licenses/activation-NOTICE.txt | 1 - .../licenses/jaxb-api-2.2.2.jar.sha1 | 1 - .../discovery-azure/licenses/stax-LICENSE.txt | 202 ----------- .../discovery-azure/licenses/stax-NOTICE.txt | 1 - .../licenses/stax-api-1.0-2.jar.sha1 | 1 - plugins/discovery-ec2/build.gradle | 2 +- plugins/discovery-gce/build.gradle | 2 +- plugins/lang-plan-a/build.gradle | 2 +- plugins/lang-python/build.gradle | 342 +++++++++++++++++- plugins/mapper-attachments/build.gradle | 3 +- .../licenses/stax-api-1.0.1.jar.sha1 | 1 - .../licenses/stax-api-LICENSE.txt | 202 ----------- .../licenses/stax-api-NOTICE.txt | 0 plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-s3/build.gradle | 2 +- qa/evil-tests/build.gradle | 2 +- test-framework/build.gradle | 2 +- 24 files changed, 416 insertions(+), 560 deletions(-) delete mode 100644 plugins/discovery-azure/licenses/activation-1.1.jar.sha1 delete mode 100644 plugins/discovery-azure/licenses/activation-LICENSE.txt delete mode 100644 plugins/discovery-azure/licenses/activation-NOTICE.txt delete mode 100644 plugins/discovery-azure/licenses/jaxb-api-2.2.2.jar.sha1 delete mode 100644 plugins/discovery-azure/licenses/stax-LICENSE.txt delete mode 100644 plugins/discovery-azure/licenses/stax-NOTICE.txt delete mode 100644 plugins/discovery-azure/licenses/stax-api-1.0-2.jar.sha1 delete mode 100644 plugins/mapper-attachments/licenses/stax-api-1.0.1.jar.sha1 delete mode 100644 plugins/mapper-attachments/licenses/stax-api-LICENSE.txt delete mode 100644 plugins/mapper-attachments/licenses/stax-api-NOTICE.txt diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy index 86ac767d06c..83fe9115083 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy @@ -18,6 +18,12 @@ */ package org.elasticsearch.gradle.precommit +import java.nio.file.Files +import java.nio.file.FileVisitResult +import java.nio.file.Path +import java.nio.file.SimpleFileVisitor +import java.nio.file.attribute.BasicFileAttributes + import org.gradle.api.DefaultTask import org.gradle.api.artifacts.UnknownConfigurationException import org.gradle.api.file.FileCollection @@ -32,7 +38,7 @@ import org.apache.tools.ant.Project public class ThirdPartyAuditTask extends DefaultTask { // true to be lenient about MISSING CLASSES - private boolean lenient; + private boolean missingClasses; // patterns for classes to exclude, because we understand their issues private String[] excludes = new String[0]; @@ -43,19 +49,19 @@ public class ThirdPartyAuditTask extends DefaultTask { } /** - * Set to true to be lenient with dependencies. By default this check will fail if it finds + * Set to true to be lenient with missing classes. By default this check will fail if it finds * MISSING CLASSES. This means the set of jars is incomplete. However, in some cases * this can be due to intentional exclusions that are well-tested and understood. */ - public void setLenient(boolean value) { - lenient = value; + public void setMissingClasses(boolean value) { + missingClasses = value; } /** * Returns true if leniency about missing classes is enabled. */ - public boolean isLenient() { - return lenient; + public boolean isMissingClasses() { + return missingClasses; } /** @@ -118,9 +124,10 @@ public class ThirdPartyAuditTask extends DefaultTask { } logger.error("[thirdPartyAudit] Scanning: " + names) - // warn that you won't see any forbidden apis warnings - if (lenient) { - logger.warn("[thirdPartyAudit] WARNING: leniency is enabled, will not fail if classes are missing!") + // warn that classes are missing + // TODO: move these to excludes list! + if (missingClasses) { + logger.warn("[thirdPartyAudit] WARNING: CLASSES ARE MISSING! Expect NoClassDefFoundError in bug reports from users!") } // TODO: forbidden-apis + zipfileset gives O(n^2) behavior unless we dump to a tmpdir first, @@ -135,7 +142,7 @@ public class ThirdPartyAuditTask extends DefaultTask { for (File jar : jars) { ant.unzip(src: jar.getAbsolutePath(), dest: tmpDir.getAbsolutePath()) } - + // convert exclusion class names to binary file names String[] excludedFiles = new String[excludes.length]; for (int i = 0; i < excludes.length; i++) { @@ -146,13 +153,55 @@ public class ThirdPartyAuditTask extends DefaultTask { } } + // jarHellReprise + checkSheistyClasses(tmpDir.toPath(), new HashSet<>(Arrays.asList(excludedFiles))); + ant.thirdPartyAudit(internalRuntimeForbidden: true, failOnUnsupportedJava: false, - failOnMissingClasses: !lenient, + failOnMissingClasses: !missingClasses, classpath: project.configurations.testCompile.asPath) { fileset(dir: tmpDir, excludes: excludedFiles.join(',')) } // clean up our mess (if we succeed) ant.delete(dir: tmpDir.getAbsolutePath()) } + + /** + * check for sheisty classes: if they also exist in the extensions classloader, its jar hell with the jdk! + */ + private void checkSheistyClasses(Path root, Set excluded) { + // system.parent = extensions loader. + // note: for jigsaw, this evilness will need modifications (e.g. use jrt filesystem!). + // but groovy/gradle needs to work at all first! + ClassLoader ext = ClassLoader.getSystemClassLoader().getParent() + assert ext != null + + Set sheistySet = new TreeSet<>(); + Files.walkFileTree(root, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + String entry = root.relativize(file).toString() + if (entry.endsWith(".class")) { + if (ext.getResource(entry) != null) { + sheistySet.add(entry); + } + } + return FileVisitResult.CONTINUE; + } + }); + + // check if we are ok + if (sheistySet.isEmpty()) { + return; + } + + // leniency against exclusions list + sheistySet.removeAll(excluded); + + if (sheistySet.isEmpty()) { + logger.warn("[thirdPartyAudit] WARNING: JAR HELL WITH JDK! Expect insanely hard-to-debug problems!") + } else { + throw new IllegalStateException("JAR HELL WITH JDK! " + sheistySet); + } + } } diff --git a/core/build.gradle b/core/build.gradle index 302757ccc8b..f4eb2c0aebd 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -112,7 +112,7 @@ forbiddenPatterns { } // classes are missing, e.g. org.jboss.marshalling.Marshaller -thirdPartyAudit.lenient = true +thirdPartyAudit.missingClasses = true // uses internal sun ssl classes! thirdPartyAudit.excludes = [ // uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name) diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle index aac94570a36..5563fdafe36 100644 --- a/modules/lang-expression/build.gradle +++ b/modules/lang-expression/build.gradle @@ -35,7 +35,7 @@ dependencyLicenses { // do we or do we not depend on asm-tree, that is the question // classes are missing, e.g. org.objectweb.asm.tree.LabelNode -thirdPartyAudit.lenient = true +thirdPartyAudit.missingClasses = true compileJava.options.compilerArgs << '-Xlint:-rawtypes' compileTestJava.options.compilerArgs << '-Xlint:-rawtypes' diff --git a/modules/lang-groovy/build.gradle b/modules/lang-groovy/build.gradle index 76686a760a2..7ffb5626d4a 100644 --- a/modules/lang-groovy/build.gradle +++ b/modules/lang-groovy/build.gradle @@ -37,7 +37,7 @@ integTest { } // classes are missing, e.g. jline.console.completer.Completer -thirdPartyAudit.lenient = true +thirdPartyAudit.missingClasses = true thirdPartyAudit.excludes = [ // uses internal java api: sun.misc.Unsafe 'groovy.json.internal.FastStringUtils', diff --git a/plugins/discovery-azure/build.gradle b/plugins/discovery-azure/build.gradle index 6f21364ea6d..ce80a441760 100644 --- a/plugins/discovery-azure/build.gradle +++ b/plugins/discovery-azure/build.gradle @@ -37,15 +37,12 @@ dependencies { compile "commons-lang:commons-lang:2.6" compile "commons-io:commons-io:2.4" compile 'javax.mail:mail:1.4.5' - compile 'javax.activation:activation:1.1' compile 'javax.inject:javax.inject:1' compile "com.sun.jersey:jersey-client:${versions.jersey}" compile "com.sun.jersey:jersey-core:${versions.jersey}" compile "com.sun.jersey:jersey-json:${versions.jersey}" compile 'org.codehaus.jettison:jettison:1.1' compile 'com.sun.xml.bind:jaxb-impl:2.2.3-1' - compile 'javax.xml.bind:jaxb-api:2.2.2' - compile 'javax.xml.stream:stax-api:1.0-2' compile 'org.codehaus.jackson:jackson-core-asl:1.9.2' compile 'org.codehaus.jackson:jackson-mapper-asl:1.9.2' compile 'org.codehaus.jackson:jackson-jaxrs:1.9.2' @@ -57,7 +54,6 @@ dependencyLicenses { mapping from: /jackson-.*/, to: 'jackson' mapping from: /jersey-.*/, to: 'jersey' mapping from: /jaxb-.*/, to: 'jaxb' - mapping from: /stax-.*/, to: 'stax' } compileJava.options.compilerArgs << '-Xlint:-path,-serial,-static,-unchecked' @@ -67,9 +63,10 @@ compileJava.options.compilerArgs << '-Xlint:-deprecation' compileTestJava.options.compilerArgs << '-Xlint:-static' // classes are missing, e.g. org.osgi.framework.BundleActivator -thirdPartyAudit.lenient = true -// WE ARE JAR HELLING WITH THE JDK AND THAT IS WHY THIS HAPPENS -// TODO: fix this!!!!!!!!!!! +thirdPartyAudit.missingClasses = true +// TODO: figure out what is happening and fix this!!!!!!!!!!! +// there might be still some undetected jar hell! +// we need to fix https://github.com/policeman-tools/forbidden-apis/issues/91 first thirdPartyAudit.excludes = [ // uses internal java api: com.sun.xml.fastinfoset.stax.StAXDocumentParser 'com.sun.xml.bind.v2.runtime.unmarshaller.FastInfosetConnector', diff --git a/plugins/discovery-azure/licenses/activation-1.1.jar.sha1 b/plugins/discovery-azure/licenses/activation-1.1.jar.sha1 deleted file mode 100644 index c4ee8fa5eb8..00000000000 --- a/plugins/discovery-azure/licenses/activation-1.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e6cb541461c2834bdea3eb920f1884d1eb508b50 diff --git a/plugins/discovery-azure/licenses/activation-LICENSE.txt b/plugins/discovery-azure/licenses/activation-LICENSE.txt deleted file mode 100644 index 1154e0aeec5..00000000000 --- a/plugins/discovery-azure/licenses/activation-LICENSE.txt +++ /dev/null @@ -1,119 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 - -1. Definitions. - -1.1. Contributor means each individual or entity that creates or contributes to the creation of Modifications. - -1.2. Contributor Version means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. - -1.3. Covered Software means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. - -1.4. Executable means the Covered Software in any form other than Source Code. - -1.5. Initial Developer means the individual or entity that first makes Original Software available under this License. - -1.6. Larger Work means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. - -1.7. License means this document. - -1.8. Licensable means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. - -1.9. Modifications means the Source Code and Executable form of any of the following: - -A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; - -B. Any new file that contains any part of the Original Software or previous Modification; or - -C. Any new file that is contributed or otherwise made available under the terms of this License. - -1.10. Original Software means the Source Code and Executable form of computer software code that is originally released under this License. - -1.11. Patent Claims means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. - -1.12. Source Code means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. - -1.13. You (or Your) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, You includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, control means (a)áthe power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b)áownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -2. License Grants. - -2.1. The Initial Developer Grant. -Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: -(a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and -(b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). -(c) The licenses granted in Sectionsá2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. -(d) Notwithstanding Sectioná2.1(b) above, no patent license is granted: (1)áfor code that You delete from the Original Software, or (2)áfor infringements caused by: (i)áthe modification of the Original Software, or (ii)áthe combination of the Original Software with other software or devices. - -2.2. Contributor Grant. -Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: -(a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and -(b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1)áModifications made by that Contributor (or portions thereof); and (2)áthe combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). -(c) The licenses granted in Sectionsá2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. -(d) Notwithstanding Sectioná2.2(b) above, no patent license is granted: (1)áfor any code that Contributor has deleted from the Contributor Version; (2)áfor infringements caused by: (i)áthird party modifications of Contributor Version, or (ii)áthe combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3)áunder Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. - -3. Distribution Obligations. - -3.1. Availability of Source Code. - -Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. - -3.2. Modifications. - -The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. - -3.3. Required Notices. -You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. - -3.4. Application of Additional Terms. -You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. - -3.5. Distribution of Executable Versions. -You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipients rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. - -3.6. Larger Works. -You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. - -4. Versions of the License. - -4.1. New Versions. -Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. - -4.2. Effect of New Versions. - -You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. -4.3. Modified Versions. - -When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a)árename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b)áotherwise make it clear that the license contains terms which differ from this License. - -5. DISCLAIMER OF WARRANTY. - -COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN AS IS BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - -6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. - -6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as Participant) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sectionsá2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. - -6.3. In the event of termination under Sectionsá6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - -UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTYS NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - -The Covered Software is a commercial item, as that term is defined in 48áC.F.R.á2.101 (Oct. 1995), consisting of commercial computer software (as that term is defined at 48 C.F.R. á252.227-7014(a)(1)) and commercial computer software documentation as such terms are used in 48áC.F.R.á12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. - -9. MISCELLANEOUS. - -This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdictions conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - -As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. - -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) -The GlassFish code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California. - - - diff --git a/plugins/discovery-azure/licenses/activation-NOTICE.txt b/plugins/discovery-azure/licenses/activation-NOTICE.txt deleted file mode 100644 index 8d1c8b69c3f..00000000000 --- a/plugins/discovery-azure/licenses/activation-NOTICE.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/plugins/discovery-azure/licenses/jaxb-api-2.2.2.jar.sha1 b/plugins/discovery-azure/licenses/jaxb-api-2.2.2.jar.sha1 deleted file mode 100644 index a145d47cec9..00000000000 --- a/plugins/discovery-azure/licenses/jaxb-api-2.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aeb3021ca93dde265796d82015beecdcff95bf09 diff --git a/plugins/discovery-azure/licenses/stax-LICENSE.txt b/plugins/discovery-azure/licenses/stax-LICENSE.txt deleted file mode 100644 index d6456956733..00000000000 --- a/plugins/discovery-azure/licenses/stax-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/plugins/discovery-azure/licenses/stax-NOTICE.txt b/plugins/discovery-azure/licenses/stax-NOTICE.txt deleted file mode 100644 index 8d1c8b69c3f..00000000000 --- a/plugins/discovery-azure/licenses/stax-NOTICE.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/plugins/discovery-azure/licenses/stax-api-1.0-2.jar.sha1 b/plugins/discovery-azure/licenses/stax-api-1.0-2.jar.sha1 deleted file mode 100644 index fb00ad889b6..00000000000 --- a/plugins/discovery-azure/licenses/stax-api-1.0-2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6337b0de8b25e53e81b922352fbea9f9f57ba0b diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 14767c6783b..355dbc55164 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -50,7 +50,7 @@ test { } // classes are missing, e.g. org.apache.avalon.framework.logger.Logger -thirdPartyAudit.lenient = true +thirdPartyAudit.missingClasses = true thirdPartyAudit.excludes = [ // uses internal java api: com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl // uses internal java api: com.sun.org.apache.xml.internal.dtm.ref.DTMManagerDefault diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 2ec479a3e80..b054e0f37b8 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -33,4 +33,4 @@ test { } // classes are missing, e.g. org.apache.log.Logger -thirdPartyAudit.lenient = true +thirdPartyAudit.missingClasses = true diff --git a/plugins/lang-plan-a/build.gradle b/plugins/lang-plan-a/build.gradle index c23c3a30efc..5f0ddafcc97 100644 --- a/plugins/lang-plan-a/build.gradle +++ b/plugins/lang-plan-a/build.gradle @@ -34,7 +34,7 @@ compileJava.options.compilerArgs << '-Xlint:-cast,-fallthrough,-rawtypes' compileTestJava.options.compilerArgs << '-Xlint:-unchecked' // classes are missing, e.g. org.objectweb.asm.tree.LabelNode -thirdPartyAudit.lenient = true +thirdPartyAudit.missingClasses = true // regeneration logic, comes in via ant right now // don't port it to gradle, it works fine. diff --git a/plugins/lang-python/build.gradle b/plugins/lang-python/build.gradle index a7faedf6a78..1c33ad2d5ee 100644 --- a/plugins/lang-python/build.gradle +++ b/plugins/lang-python/build.gradle @@ -37,7 +37,7 @@ integTest { } // classes are missing, e.g. org.tukaani.xz.FilterOptions -thirdPartyAudit.lenient = true +thirdPartyAudit.missingClasses = true thirdPartyAudit.excludes = [ // uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name) 'org.python.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', @@ -72,4 +72,344 @@ thirdPartyAudit.excludes = [ 'org.python.netty.util.internal.chmv8.ForkJoinPool$WorkQueue', 'org.python.netty.util.internal.chmv8.ForkJoinTask', 'org.python.netty.util.internal.chmv8.ForkJoinTask$1', + + // "uberjaring" (but not shading) classes that have been in the JDK since 1.5 + // nice job python. + 'javax.xml.XMLConstants', + 'javax.xml.datatype.DatatypeConfigurationException', + 'javax.xml.datatype.DatatypeConstants$1', + 'javax.xml.datatype.DatatypeConstants$Field', + 'javax.xml.datatype.DatatypeConstants', + 'javax.xml.datatype.DatatypeFactory', + 'javax.xml.datatype.Duration', + 'javax.xml.datatype.FactoryFinder', + 'javax.xml.datatype.SecuritySupport$1', + 'javax.xml.datatype.SecuritySupport$2', + 'javax.xml.datatype.SecuritySupport$3', + 'javax.xml.datatype.SecuritySupport$4', + 'javax.xml.datatype.SecuritySupport$5', + 'javax.xml.datatype.SecuritySupport', + 'javax.xml.datatype.XMLGregorianCalendar', + 'javax.xml.namespace.NamespaceContext', + 'javax.xml.namespace.QName$1', + 'javax.xml.namespace.QName', + 'javax.xml.parsers.DocumentBuilder', + 'javax.xml.parsers.DocumentBuilderFactory', + 'javax.xml.parsers.FactoryConfigurationError', + 'javax.xml.parsers.FactoryFinder', + 'javax.xml.parsers.ParserConfigurationException', + 'javax.xml.parsers.SAXParser', + 'javax.xml.parsers.SAXParserFactory', + 'javax.xml.parsers.SecuritySupport$1', + 'javax.xml.parsers.SecuritySupport$2', + 'javax.xml.parsers.SecuritySupport$3', + 'javax.xml.parsers.SecuritySupport$4', + 'javax.xml.parsers.SecuritySupport$5', + 'javax.xml.parsers.SecuritySupport', + 'javax.xml.stream.EventFilter', + 'javax.xml.stream.FactoryConfigurationError', + 'javax.xml.stream.FactoryFinder', + 'javax.xml.stream.Location', + 'javax.xml.stream.SecuritySupport$1', + 'javax.xml.stream.SecuritySupport$2', + 'javax.xml.stream.SecuritySupport$3', + 'javax.xml.stream.SecuritySupport$4', + 'javax.xml.stream.SecuritySupport$5', + 'javax.xml.stream.SecuritySupport', + 'javax.xml.stream.StreamFilter', + 'javax.xml.stream.XMLEventFactory', + 'javax.xml.stream.XMLEventReader', + 'javax.xml.stream.XMLEventWriter', + 'javax.xml.stream.XMLInputFactory', + 'javax.xml.stream.XMLOutputFactory', + 'javax.xml.stream.XMLReporter', + 'javax.xml.stream.XMLResolver', + 'javax.xml.stream.XMLStreamConstants', + 'javax.xml.stream.XMLStreamException', + 'javax.xml.stream.XMLStreamReader', + 'javax.xml.stream.XMLStreamWriter', + 'javax.xml.stream.events.Attribute', + 'javax.xml.stream.events.Characters', + 'javax.xml.stream.events.Comment', + 'javax.xml.stream.events.DTD', + 'javax.xml.stream.events.EndDocument', + 'javax.xml.stream.events.EndElement', + 'javax.xml.stream.events.EntityDeclaration', + 'javax.xml.stream.events.EntityReference', + 'javax.xml.stream.events.Namespace', + 'javax.xml.stream.events.NotationDeclaration', + 'javax.xml.stream.events.ProcessingInstruction', + 'javax.xml.stream.events.StartDocument', + 'javax.xml.stream.events.StartElement', + 'javax.xml.stream.events.XMLEvent', + 'javax.xml.stream.util.EventReaderDelegate', + 'javax.xml.stream.util.StreamReaderDelegate', + 'javax.xml.stream.util.XMLEventAllocator', + 'javax.xml.stream.util.XMLEventConsumer', + 'javax.xml.transform.ErrorListener', + 'javax.xml.transform.FactoryFinder', + 'javax.xml.transform.OutputKeys', + 'javax.xml.transform.Result', + 'javax.xml.transform.SecuritySupport$1', + 'javax.xml.transform.SecuritySupport$2', + 'javax.xml.transform.SecuritySupport$3', + 'javax.xml.transform.SecuritySupport$4', + 'javax.xml.transform.SecuritySupport$5', + 'javax.xml.transform.SecuritySupport', + 'javax.xml.transform.Source', + 'javax.xml.transform.SourceLocator', + 'javax.xml.transform.Templates', + 'javax.xml.transform.Transformer', + 'javax.xml.transform.TransformerConfigurationException', + 'javax.xml.transform.TransformerException', + 'javax.xml.transform.TransformerFactory', + 'javax.xml.transform.TransformerFactoryConfigurationError', + 'javax.xml.transform.URIResolver', + 'javax.xml.transform.dom.DOMLocator', + 'javax.xml.transform.dom.DOMResult', + 'javax.xml.transform.dom.DOMSource', + 'javax.xml.transform.sax.SAXResult', + 'javax.xml.transform.sax.SAXSource', + 'javax.xml.transform.sax.SAXTransformerFactory', + 'javax.xml.transform.sax.TemplatesHandler', + 'javax.xml.transform.sax.TransformerHandler', + 'javax.xml.transform.stax.StAXResult', + 'javax.xml.transform.stax.StAXSource', + 'javax.xml.transform.stream.StreamResult', + 'javax.xml.transform.stream.StreamSource', + 'javax.xml.validation.Schema', + 'javax.xml.validation.SchemaFactory', + 'javax.xml.validation.SchemaFactoryFinder$1', + 'javax.xml.validation.SchemaFactoryFinder$2', + 'javax.xml.validation.SchemaFactoryFinder', + 'javax.xml.validation.SchemaFactoryLoader', + 'javax.xml.validation.SecuritySupport$1', + 'javax.xml.validation.SecuritySupport$2', + 'javax.xml.validation.SecuritySupport$3', + 'javax.xml.validation.SecuritySupport$4', + 'javax.xml.validation.SecuritySupport$5', + 'javax.xml.validation.SecuritySupport$6', + 'javax.xml.validation.SecuritySupport$7', + 'javax.xml.validation.SecuritySupport$8', + 'javax.xml.validation.SecuritySupport', + 'javax.xml.validation.TypeInfoProvider', + 'javax.xml.validation.Validator', + 'javax.xml.validation.ValidatorHandler', + 'javax.xml.xpath.SecuritySupport$1', + 'javax.xml.xpath.SecuritySupport$2', + 'javax.xml.xpath.SecuritySupport$3', + 'javax.xml.xpath.SecuritySupport$4', + 'javax.xml.xpath.SecuritySupport$5', + 'javax.xml.xpath.SecuritySupport$6', + 'javax.xml.xpath.SecuritySupport$7', + 'javax.xml.xpath.SecuritySupport$8', + 'javax.xml.xpath.SecuritySupport', + 'javax.xml.xpath.XPath', + 'javax.xml.xpath.XPathConstants', + 'javax.xml.xpath.XPathException', + 'javax.xml.xpath.XPathExpression', + 'javax.xml.xpath.XPathExpressionException', + 'javax.xml.xpath.XPathFactory', + 'javax.xml.xpath.XPathFactoryConfigurationException', + 'javax.xml.xpath.XPathFactoryFinder$1', + 'javax.xml.xpath.XPathFactoryFinder$2', + 'javax.xml.xpath.XPathFactoryFinder', + 'javax.xml.xpath.XPathFunction', + 'javax.xml.xpath.XPathFunctionException', + 'javax.xml.xpath.XPathFunctionResolver', + 'javax.xml.xpath.XPathVariableResolver', + 'org.w3c.dom.Attr', + 'org.w3c.dom.CDATASection', + 'org.w3c.dom.CharacterData', + 'org.w3c.dom.Comment', + 'org.w3c.dom.DOMConfiguration', + 'org.w3c.dom.DOMError', + 'org.w3c.dom.DOMErrorHandler', + 'org.w3c.dom.DOMException', + 'org.w3c.dom.DOMImplementation', + 'org.w3c.dom.DOMImplementationList', + 'org.w3c.dom.DOMImplementationSource', + 'org.w3c.dom.DOMLocator', + 'org.w3c.dom.DOMStringList', + 'org.w3c.dom.Document', + 'org.w3c.dom.DocumentFragment', + 'org.w3c.dom.DocumentType', + 'org.w3c.dom.Element', + 'org.w3c.dom.Entity', + 'org.w3c.dom.EntityReference', + 'org.w3c.dom.NameList', + 'org.w3c.dom.NamedNodeMap', + 'org.w3c.dom.Node', + 'org.w3c.dom.NodeList', + 'org.w3c.dom.Notation', + 'org.w3c.dom.ProcessingInstruction', + 'org.w3c.dom.Text', + 'org.w3c.dom.TypeInfo', + 'org.w3c.dom.UserDataHandler', + 'org.w3c.dom.bootstrap.DOMImplementationRegistry$1', + 'org.w3c.dom.bootstrap.DOMImplementationRegistry$2', + 'org.w3c.dom.bootstrap.DOMImplementationRegistry$3', + 'org.w3c.dom.bootstrap.DOMImplementationRegistry$4', + 'org.w3c.dom.bootstrap.DOMImplementationRegistry', + 'org.w3c.dom.css.CSS2Properties', + 'org.w3c.dom.css.CSSCharsetRule', + 'org.w3c.dom.css.CSSFontFaceRule', + 'org.w3c.dom.css.CSSImportRule', + 'org.w3c.dom.css.CSSMediaRule', + 'org.w3c.dom.css.CSSPageRule', + 'org.w3c.dom.css.CSSPrimitiveValue', + 'org.w3c.dom.css.CSSRule', + 'org.w3c.dom.css.CSSRuleList', + 'org.w3c.dom.css.CSSStyleDeclaration', + 'org.w3c.dom.css.CSSStyleRule', + 'org.w3c.dom.css.CSSStyleSheet', + 'org.w3c.dom.css.CSSUnknownRule', + 'org.w3c.dom.css.CSSValue', + 'org.w3c.dom.css.CSSValueList', + 'org.w3c.dom.css.Counter', + 'org.w3c.dom.css.DOMImplementationCSS', + 'org.w3c.dom.css.DocumentCSS', + 'org.w3c.dom.css.ElementCSSInlineStyle', + 'org.w3c.dom.css.RGBColor', + 'org.w3c.dom.css.Rect', + 'org.w3c.dom.css.ViewCSS', + 'org.w3c.dom.events.DocumentEvent', + 'org.w3c.dom.events.Event', + 'org.w3c.dom.events.EventException', + 'org.w3c.dom.events.EventListener', + 'org.w3c.dom.events.EventTarget', + 'org.w3c.dom.events.MouseEvent', + 'org.w3c.dom.events.MutationEvent', + 'org.w3c.dom.events.UIEvent', + 'org.w3c.dom.html.HTMLAnchorElement', + 'org.w3c.dom.html.HTMLAppletElement', + 'org.w3c.dom.html.HTMLAreaElement', + 'org.w3c.dom.html.HTMLBRElement', + 'org.w3c.dom.html.HTMLBaseElement', + 'org.w3c.dom.html.HTMLBaseFontElement', + 'org.w3c.dom.html.HTMLBodyElement', + 'org.w3c.dom.html.HTMLButtonElement', + 'org.w3c.dom.html.HTMLCollection', + 'org.w3c.dom.html.HTMLDListElement', + 'org.w3c.dom.html.HTMLDOMImplementation', + 'org.w3c.dom.html.HTMLDirectoryElement', + 'org.w3c.dom.html.HTMLDivElement', + 'org.w3c.dom.html.HTMLDocument', + 'org.w3c.dom.html.HTMLElement', + 'org.w3c.dom.html.HTMLFieldSetElement', + 'org.w3c.dom.html.HTMLFontElement', + 'org.w3c.dom.html.HTMLFormElement', + 'org.w3c.dom.html.HTMLFrameElement', + 'org.w3c.dom.html.HTMLFrameSetElement', + 'org.w3c.dom.html.HTMLHRElement', + 'org.w3c.dom.html.HTMLHeadElement', + 'org.w3c.dom.html.HTMLHeadingElement', + 'org.w3c.dom.html.HTMLHtmlElement', + 'org.w3c.dom.html.HTMLIFrameElement', + 'org.w3c.dom.html.HTMLImageElement', + 'org.w3c.dom.html.HTMLInputElement', + 'org.w3c.dom.html.HTMLIsIndexElement', + 'org.w3c.dom.html.HTMLLIElement', + 'org.w3c.dom.html.HTMLLabelElement', + 'org.w3c.dom.html.HTMLLegendElement', + 'org.w3c.dom.html.HTMLLinkElement', + 'org.w3c.dom.html.HTMLMapElement', + 'org.w3c.dom.html.HTMLMenuElement', + 'org.w3c.dom.html.HTMLMetaElement', + 'org.w3c.dom.html.HTMLModElement', + 'org.w3c.dom.html.HTMLOListElement', + 'org.w3c.dom.html.HTMLObjectElement', + 'org.w3c.dom.html.HTMLOptGroupElement', + 'org.w3c.dom.html.HTMLOptionElement', + 'org.w3c.dom.html.HTMLParagraphElement', + 'org.w3c.dom.html.HTMLParamElement', + 'org.w3c.dom.html.HTMLPreElement', + 'org.w3c.dom.html.HTMLQuoteElement', + 'org.w3c.dom.html.HTMLScriptElement', + 'org.w3c.dom.html.HTMLSelectElement', + 'org.w3c.dom.html.HTMLStyleElement', + 'org.w3c.dom.html.HTMLTableCaptionElement', + 'org.w3c.dom.html.HTMLTableCellElement', + 'org.w3c.dom.html.HTMLTableColElement', + 'org.w3c.dom.html.HTMLTableElement', + 'org.w3c.dom.html.HTMLTableRowElement', + 'org.w3c.dom.html.HTMLTableSectionElement', + 'org.w3c.dom.html.HTMLTextAreaElement', + 'org.w3c.dom.html.HTMLTitleElement', + 'org.w3c.dom.html.HTMLUListElement', + 'org.w3c.dom.ls.DOMImplementationLS', + 'org.w3c.dom.ls.LSException', + 'org.w3c.dom.ls.LSInput', + 'org.w3c.dom.ls.LSLoadEvent', + 'org.w3c.dom.ls.LSOutput', + 'org.w3c.dom.ls.LSParser', + 'org.w3c.dom.ls.LSParserFilter', + 'org.w3c.dom.ls.LSProgressEvent', + 'org.w3c.dom.ls.LSResourceResolver', + 'org.w3c.dom.ls.LSSerializer', + 'org.w3c.dom.ls.LSSerializerFilter', + 'org.w3c.dom.ranges.DocumentRange', + 'org.w3c.dom.ranges.Range', + 'org.w3c.dom.ranges.RangeException', + 'org.w3c.dom.stylesheets.DocumentStyle', + 'org.w3c.dom.stylesheets.LinkStyle', + 'org.w3c.dom.stylesheets.MediaList', + 'org.w3c.dom.stylesheets.StyleSheet', + 'org.w3c.dom.stylesheets.StyleSheetList', + 'org.w3c.dom.traversal.DocumentTraversal', + 'org.w3c.dom.traversal.NodeFilter', + 'org.w3c.dom.traversal.NodeIterator', + 'org.w3c.dom.traversal.TreeWalker', + 'org.w3c.dom.views.AbstractView', + 'org.w3c.dom.views.DocumentView', + 'org.w3c.dom.xpath.XPathEvaluator', + 'org.w3c.dom.xpath.XPathException', + 'org.w3c.dom.xpath.XPathExpression', + 'org.w3c.dom.xpath.XPathNSResolver', + 'org.w3c.dom.xpath.XPathNamespace', + 'org.w3c.dom.xpath.XPathResult', + 'org.xml.sax.AttributeList', + 'org.xml.sax.Attributes', + 'org.xml.sax.ContentHandler', + 'org.xml.sax.DTDHandler', + 'org.xml.sax.DocumentHandler', + 'org.xml.sax.EntityResolver', + 'org.xml.sax.ErrorHandler', + 'org.xml.sax.HandlerBase', + 'org.xml.sax.InputSource', + 'org.xml.sax.Locator', + 'org.xml.sax.Parser', + 'org.xml.sax.SAXException', + 'org.xml.sax.SAXNotRecognizedException', + 'org.xml.sax.SAXNotSupportedException', + 'org.xml.sax.SAXParseException', + 'org.xml.sax.XMLFilter', + 'org.xml.sax.XMLReader', + 'org.xml.sax.ext.Attributes2', + 'org.xml.sax.ext.Attributes2Impl', + 'org.xml.sax.ext.DeclHandler', + 'org.xml.sax.ext.DefaultHandler2', + 'org.xml.sax.ext.EntityResolver2', + 'org.xml.sax.ext.LexicalHandler', + 'org.xml.sax.ext.Locator2', + 'org.xml.sax.ext.Locator2Impl', + 'org.xml.sax.helpers.AttributeListImpl', + 'org.xml.sax.helpers.AttributesImpl', + 'org.xml.sax.helpers.DefaultHandler', + 'org.xml.sax.helpers.LocatorImpl', + 'org.xml.sax.helpers.NamespaceSupport$Context', + 'org.xml.sax.helpers.NamespaceSupport', + 'org.xml.sax.helpers.NewInstance', + 'org.xml.sax.helpers.ParserAdapter$AttributeListAdapter', + 'org.xml.sax.helpers.ParserAdapter', + 'org.xml.sax.helpers.ParserFactory', + 'org.xml.sax.helpers.SecuritySupport$1', + 'org.xml.sax.helpers.SecuritySupport$2', + 'org.xml.sax.helpers.SecuritySupport$3', + 'org.xml.sax.helpers.SecuritySupport$4', + 'org.xml.sax.helpers.SecuritySupport', + 'org.xml.sax.helpers.XMLFilterImpl', + 'org.xml.sax.helpers.XMLReaderAdapter$AttributesAdapter', + 'org.xml.sax.helpers.XMLReaderAdapter', + 'org.xml.sax.helpers.XMLReaderFactory', ] diff --git a/plugins/mapper-attachments/build.gradle b/plugins/mapper-attachments/build.gradle index f3c414a0718..58f2dceb740 100644 --- a/plugins/mapper-attachments/build.gradle +++ b/plugins/mapper-attachments/build.gradle @@ -55,7 +55,6 @@ dependencies { compile "org.apache.poi:poi-ooxml-schemas:${versions.poi}" compile "commons-codec:commons-codec:${versions.commonscodec}" compile 'org.apache.xmlbeans:xmlbeans:2.6.0' - compile 'stax:stax-api:1.0.1' // MS Office compile "org.apache.poi:poi-scratchpad:${versions.poi}" // Apple iWork @@ -71,7 +70,7 @@ forbiddenPatterns { } // classes are missing, e.g. org.openxmlformats.schemas.drawingml.x2006.chart.CTExtensionList -thirdPartyAudit.lenient = true +thirdPartyAudit.missingClasses = true thirdPartyAudit.excludes = [ // uses internal java api: com.sun.syndication (SyndFeedInput, SyndFeed, SyndEntry, SyndContent) 'org.apache.tika.parser.feed.FeedParser', diff --git a/plugins/mapper-attachments/licenses/stax-api-1.0.1.jar.sha1 b/plugins/mapper-attachments/licenses/stax-api-1.0.1.jar.sha1 deleted file mode 100644 index 4426e34685d..00000000000 --- a/plugins/mapper-attachments/licenses/stax-api-1.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49c100caf72d658aca8e58bd74a4ba90fa2b0d70 \ No newline at end of file diff --git a/plugins/mapper-attachments/licenses/stax-api-LICENSE.txt b/plugins/mapper-attachments/licenses/stax-api-LICENSE.txt deleted file mode 100644 index d6456956733..00000000000 --- a/plugins/mapper-attachments/licenses/stax-api-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/plugins/mapper-attachments/licenses/stax-api-NOTICE.txt b/plugins/mapper-attachments/licenses/stax-api-NOTICE.txt deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index c09a3ff4d67..ca444768590 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -203,4 +203,4 @@ integTest { } // classes are missing, e.g. org.mockito.Mockito -thirdPartyAudit.lenient = true +thirdPartyAudit.missingClasses = true diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 82797788f8e..90e4dd2d956 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -51,7 +51,7 @@ test { } // classes are missing, e.g. org.apache.log.Logger -thirdPartyAudit.lenient = true +thirdPartyAudit.missingClasses = true thirdPartyAudit.excludes = [ // uses internal java api: com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl // uses internal java api: com.sun.org.apache.xml.internal.dtm.ref.DTMManagerDefault diff --git a/qa/evil-tests/build.gradle b/qa/evil-tests/build.gradle index 02ed75fccbb..3782f368af4 100644 --- a/qa/evil-tests/build.gradle +++ b/qa/evil-tests/build.gradle @@ -36,7 +36,7 @@ test { } // classes are missing, com.ibm.icu.lang.UCharacter -thirdPartyAudit.lenient = true +thirdPartyAudit.missingClasses = true thirdPartyAudit.excludes = [ // uses internal java api: sun.misc.Unsafe 'com.google.common.cache.Striped64', diff --git a/test-framework/build.gradle b/test-framework/build.gradle index 2263413bbde..6930abb3d23 100644 --- a/test-framework/build.gradle +++ b/test-framework/build.gradle @@ -48,4 +48,4 @@ forbiddenApisMain { dependencyLicenses.enabled = false // we intentionally exclude the ant tasks because people were depending on them from their tests!!!!!!! -thirdPartyAudit.lenient = true +thirdPartyAudit.missingClasses = true From c1d8d3b28b288c6c2f5b9ada2b3e356e18734a53 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Thu, 17 Dec 2015 05:26:54 -0500 Subject: [PATCH 112/167] AllTermQuery's scorer should skip segments that never saw the requested term --- .../common/lucene/all/AllTermQuery.java | 4 +++ .../common/lucene/all/SimpleAllTests.java | 35 +++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java index 7191c96e33e..4fe90aed9e4 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java @@ -149,6 +149,10 @@ public final class AllTermQuery extends Query { return null; } final TermState state = termStates.get(context.ord); + if (state == null) { + // Term does not exist in this segment + return null; + } termsEnum.seekExact(term.bytes(), state); PostingsEnum docs = termsEnum.postings(null, PostingsEnum.PAYLOADS); assert docs != null; diff --git a/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java b/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java index f4f3034528f..7ee238ae7f2 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java @@ -220,6 +220,41 @@ public class SimpleAllTests extends ESTestCase { indexWriter.close(); } + public void testTermMissingFromOneSegment() throws Exception { + Directory dir = new RAMDirectory(); + IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + + Document doc = new Document(); + doc.add(new Field("_id", "1", StoredField.TYPE)); + AllEntries allEntries = new AllEntries(); + allEntries.addText("field", "something", 2.0f); + allEntries.reset(); + doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); + + indexWriter.addDocument(doc); + indexWriter.commit(); + + doc = new Document(); + doc.add(new Field("_id", "2", StoredField.TYPE)); + allEntries = new AllEntries(); + allEntries.addText("field", "else", 1.0f); + allEntries.reset(); + doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); + + indexWriter.addDocument(doc); + + IndexReader reader = DirectoryReader.open(indexWriter, true); + assertEquals(2, reader.leaves().size()); + IndexSearcher searcher = new IndexSearcher(reader); + + // "something" only appears in the first segment: + Query query = new AllTermQuery(new Term("_all", "something")); + TopDocs docs = searcher.search(query, 10); + assertEquals(1, docs.totalHits); + + indexWriter.close(); + } + public void testMultipleTokensAllNoBoost() throws Exception { Directory dir = new RAMDirectory(); IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); From 86d6e28b7f40aec6d50e7af7208fa156df51d90a Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 17 Dec 2015 13:03:08 +0100 Subject: [PATCH 113/167] MetaDataMappingService should call MapperService.merge with the original mapping update. Currently MetaDataMappingService parses the mapping updates, reserializes it and finally calls MapperService.merge with the serialized mapping. Given that mapping serialization only writes differences from the default, this is a bit unfair to parsers since they can't know whether some option has been explicitly set or not. Furthermore this can cause bugs with metadata fields given that these fields use existing field types as defaults. This commit changes MetaDataMappingService to call MapperService.merge with the original mapping update. --- .../metadata/MetaDataMappingService.java | 37 +++--- .../index/mapper/MapperService.java | 109 +++++++++--------- 2 files changed, 72 insertions(+), 74 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index bbaeb5a11d7..8093d93ccce 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -236,8 +236,8 @@ public class MetaDataMappingService extends AbstractComponent { } private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException { - Map newMappers = new HashMap<>(); - Map existingMappers = new HashMap<>(); + String mappingType = request.type(); + CompressedXContent mappingUpdateSource = new CompressedXContent(request.source()); for (String index : request.indices()) { IndexService indexService = indicesService.indexServiceSafe(index); // try and parse it (no need to add it here) so we can bail early in case of parsing exception @@ -245,9 +245,9 @@ public class MetaDataMappingService extends AbstractComponent { DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type()); if (MapperService.DEFAULT_MAPPING.equals(request.type())) { // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default - newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false); + newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, false); } else { - newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null); + newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, existingMapper == null); if (existingMapper != null) { // first, simulate // this will just throw exceptions in case of problems @@ -270,36 +270,31 @@ public class MetaDataMappingService extends AbstractComponent { } } } - newMappers.put(index, newMapper); - if (existingMapper != null) { - existingMappers.put(index, existingMapper); + if (mappingType == null) { + mappingType = newMapper.type(); + } else if (mappingType.equals(newMapper.type()) == false) { + throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition"); } } + assert mappingType != null; - String mappingType = request.type(); - if (mappingType == null) { - mappingType = newMappers.values().iterator().next().type(); - } else if (!mappingType.equals(newMappers.values().iterator().next().type())) { - throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition"); - } if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); } final Map mappings = new HashMap<>(); - for (Map.Entry entry : newMappers.entrySet()) { - String index = entry.getKey(); + for (String index : request.indices()) { // do the actual merge here on the master, and update the mapping source - DocumentMapper newMapper = entry.getValue(); IndexService indexService = indicesService.indexService(index); if (indexService == null) { continue; } CompressedXContent existingSource = null; - if (existingMappers.containsKey(entry.getKey())) { - existingSource = existingMappers.get(entry.getKey()).mappingSource(); + DocumentMapper existingMapper = indexService.mapperService().documentMapper(mappingType); + if (existingMapper != null) { + existingSource = existingMapper.mappingSource(); } - DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes()); + DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, true, request.updateAllTypes()); CompressedXContent updatedSource = mergedMapper.mappingSource(); if (existingSource != null) { @@ -318,9 +313,9 @@ public class MetaDataMappingService extends AbstractComponent { } else { mappings.put(index, new MappingMetaData(mergedMapper)); if (logger.isDebugEnabled()) { - logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource); + logger.debug("[{}] create_mapping [{}] with source [{}]", index, mappingType, updatedSource); } else if (logger.isInfoEnabled()) { - logger.info("[{}] create_mapping [{}]", index, newMapper.type()); + logger.info("[{}] create_mapping [{}]", index, mappingType); } } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index de35b4712ea..37e99e8c90c 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -198,6 +198,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { public DocumentMapper merge(String type, CompressedXContent mappingSource, boolean applyDefault, boolean updateAllTypes) { if (DEFAULT_MAPPING.equals(type)) { // verify we can parse it + // NOTE: never apply the default here DocumentMapper mapper = documentParser.parseCompressed(type, mappingSource); // still add it as a document mapper so we have it registered and, for example, persisted back into // the cluster meta data if needed, or checked for existence @@ -211,68 +212,70 @@ public class MapperService extends AbstractIndexComponent implements Closeable { } return mapper; } else { - return merge(parse(type, mappingSource, applyDefault), updateAllTypes); + try (ReleasableLock lock = mappingWriteLock.acquire()) { + // only apply the default mapping if we don't have the type yet + applyDefault &= mappers.containsKey(type) == false; + return merge(parse(type, mappingSource, applyDefault), updateAllTypes); + } } } // never expose this to the outside world, we need to reparse the doc mapper so we get fresh // instances of field mappers to properly remove existing doc mapper private DocumentMapper merge(DocumentMapper mapper, boolean updateAllTypes) { - try (ReleasableLock lock = mappingWriteLock.acquire()) { - if (mapper.type().length() == 0) { - throw new InvalidTypeNameException("mapping type name is empty"); - } - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) { - throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]"); - } - if (mapper.type().charAt(0) == '_') { - throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'"); - } - if (mapper.type().contains("#")) { - throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it"); - } - if (mapper.type().contains(",")) { - throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it"); - } - if (mapper.type().equals(mapper.parentFieldMapper().type())) { - throw new IllegalArgumentException("The [_parent.type] option can't point to the same type"); - } - if (typeNameStartsWithIllegalDot(mapper)) { - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { - throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'"); - } else { - logger.warn("Type [{}] starts with a '.', it is recommended not to start a type name with a '.'", mapper.type()); - } - } - // we can add new field/object mappers while the old ones are there - // since we get new instances of those, and when we remove, we remove - // by instance equality - DocumentMapper oldMapper = mappers.get(mapper.type()); - - if (oldMapper != null) { - oldMapper.merge(mapper.mapping(), false, updateAllTypes); - return oldMapper; + if (mapper.type().length() == 0) { + throw new InvalidTypeNameException("mapping type name is empty"); + } + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) { + throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]"); + } + if (mapper.type().charAt(0) == '_') { + throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'"); + } + if (mapper.type().contains("#")) { + throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it"); + } + if (mapper.type().contains(",")) { + throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it"); + } + if (mapper.type().equals(mapper.parentFieldMapper().type())) { + throw new IllegalArgumentException("The [_parent.type] option can't point to the same type"); + } + if (typeNameStartsWithIllegalDot(mapper)) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { + throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'"); } else { - Tuple, Collection> newMappers = checkMappersCompatibility( - mapper.type(), mapper.mapping(), updateAllTypes); - Collection newObjectMappers = newMappers.v1(); - Collection newFieldMappers = newMappers.v2(); - addMappers(mapper.type(), newObjectMappers, newFieldMappers); - - for (DocumentTypeListener typeListener : typeListeners) { - typeListener.beforeCreate(mapper); - } - mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map(); - if (mapper.parentFieldMapper().active()) { - Set newParentTypes = new HashSet<>(parentTypes.size() + 1); - newParentTypes.addAll(parentTypes); - newParentTypes.add(mapper.parentFieldMapper().type()); - parentTypes = unmodifiableSet(newParentTypes); - } - assert assertSerialization(mapper); - return mapper; + logger.warn("Type [{}] starts with a '.', it is recommended not to start a type name with a '.'", mapper.type()); } } + // we can add new field/object mappers while the old ones are there + // since we get new instances of those, and when we remove, we remove + // by instance equality + DocumentMapper oldMapper = mappers.get(mapper.type()); + + if (oldMapper != null) { + oldMapper.merge(mapper.mapping(), false, updateAllTypes); + return oldMapper; + } else { + Tuple, Collection> newMappers = checkMappersCompatibility( + mapper.type(), mapper.mapping(), updateAllTypes); + Collection newObjectMappers = newMappers.v1(); + Collection newFieldMappers = newMappers.v2(); + addMappers(mapper.type(), newObjectMappers, newFieldMappers); + + for (DocumentTypeListener typeListener : typeListeners) { + typeListener.beforeCreate(mapper); + } + mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map(); + if (mapper.parentFieldMapper().active()) { + Set newParentTypes = new HashSet<>(parentTypes.size() + 1); + newParentTypes.addAll(parentTypes); + newParentTypes.add(mapper.parentFieldMapper().type()); + parentTypes = unmodifiableSet(newParentTypes); + } + assert assertSerialization(mapper); + return mapper; + } } private boolean typeNameStartsWithIllegalDot(DocumentMapper mapper) { From b2b7b30d10f03d6df342c7fdb36e32047907ccc4 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Wed, 18 Nov 2015 13:00:14 +0100 Subject: [PATCH 114/167] BulkProcessor retries after request handling has been rejected due to a full thread pool With this commit we introduce limited retries with a backoff logic to BulkProcessor when a bulk request has been rejeced with an EsRejectedExecutionException. Fixes #14620. --- .../action/bulk/BackoffPolicy.java | 203 +++++++++++++++ .../action/bulk/BulkProcessor.java | 101 ++------ .../action/bulk/BulkRequestHandler.java | 160 ++++++++++++ .../org/elasticsearch/action/bulk/Retry.java | 237 ++++++++++++++++++ .../action/bulk/BulkProcessorRetryIT.java | 164 ++++++++++++ .../elasticsearch/action/bulk/RetryTests.java | 201 +++++++++++++++ 6 files changed, 988 insertions(+), 78 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java create mode 100644 core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java create mode 100644 core/src/main/java/org/elasticsearch/action/bulk/Retry.java create mode 100644 core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java create mode 100644 core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java b/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java new file mode 100644 index 00000000000..a0ccca0fb5c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java @@ -0,0 +1,203 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.common.unit.TimeValue; + +import java.util.Iterator; +import java.util.NoSuchElementException; + +/** + * Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to resource constraints (i.e. the client's internal + * thread pool is full), the backoff policy decides how long the bulk processor will wait before the operation is retried internally. + * + * Notes for implementing custom subclasses: + * + * The underlying mathematical principle of BackoffPolicy are progressions which can be either finite or infinite although + * the latter should not be used for retrying. A progression can be mapped to a java.util.Iterator with the following + * semantics: + * + *
    + *
  • #hasNext() determines whether the progression has more elements. Return true for infinite progressions
  • + *
  • #next() determines the next element in the progression, i.e. the next wait time period
  • + *
+ * + * Note that backoff policies are exposed as Iterables in order to be consumed multiple times. + */ +public abstract class BackoffPolicy implements Iterable { + private static final BackoffPolicy NO_BACKOFF = new NoBackoff(); + + /** + * Creates a backoff policy that will not allow any backoff, i.e. an operation will fail after the first attempt. + * + * @return A backoff policy without any backoff period. The returned instance is thread safe. + */ + public static BackoffPolicy noBackoff() { + return NO_BACKOFF; + } + + /** + * Creates an new constant backoff policy with the provided configuration. + * + * @param delay The delay defines how long to wait between retry attempts. Must not be null. + * Must be <= Integer.MAX_VALUE ms. + * @param maxNumberOfRetries The maximum number of retries. Must be a non-negative number. + * @return A backoff policy with a constant wait time between retries. The returned instance is thread safe but each + * iterator created from it should only be used by a single thread. + */ + public static BackoffPolicy constantBackoff(TimeValue delay, int maxNumberOfRetries) { + return new ConstantBackoff(checkDelay(delay), maxNumberOfRetries); + } + + /** + * Creates an new exponential backoff policy with a default configuration of 50 ms initial wait period and 8 retries taking + * roughly 5.1 seconds in total. + * + * @return A backoff policy with an exponential increase in wait time for retries. The returned instance is thread safe but each + * iterator created from it should only be used by a single thread. + */ + public static BackoffPolicy exponentialBackoff() { + return exponentialBackoff(TimeValue.timeValueMillis(50), 8); + } + + /** + * Creates an new exponential backoff policy with the provided configuration. + * + * @param initialDelay The initial delay defines how long to wait for the first retry attempt. Must not be null. + * Must be <= Integer.MAX_VALUE ms. + * @param maxNumberOfRetries The maximum number of retries. Must be a non-negative number. + * @return A backoff policy with an exponential increase in wait time for retries. The returned instance is thread safe but each + * iterator created from it should only be used by a single thread. + */ + public static BackoffPolicy exponentialBackoff(TimeValue initialDelay, int maxNumberOfRetries) { + return new ExponentialBackoff((int) checkDelay(initialDelay).millis(), maxNumberOfRetries); + } + + private static TimeValue checkDelay(TimeValue delay) { + if (delay.millis() > Integer.MAX_VALUE) { + throw new IllegalArgumentException("delay must be <= " + Integer.MAX_VALUE + " ms"); + } + return delay; + } + + private static class NoBackoff extends BackoffPolicy { + @Override + public Iterator iterator() { + return new Iterator() { + @Override + public boolean hasNext() { + return false; + } + + @Override + public TimeValue next() { + throw new NoSuchElementException("No backoff"); + } + }; + } + } + + private static class ExponentialBackoff extends BackoffPolicy { + private final int start; + + private final int numberOfElements; + + private ExponentialBackoff(int start, int numberOfElements) { + assert start >= 0; + assert numberOfElements >= 0; + this.start = start; + this.numberOfElements = numberOfElements; + } + + @Override + public Iterator iterator() { + return new ExponentialBackoffIterator(start, numberOfElements); + } + } + + private static class ExponentialBackoffIterator implements Iterator { + private final int numberOfElements; + + private final int start; + + private int currentlyConsumed; + + private ExponentialBackoffIterator(int start, int numberOfElements) { + this.start = start; + this.numberOfElements = numberOfElements; + } + + @Override + public boolean hasNext() { + return currentlyConsumed < numberOfElements; + } + + @Override + public TimeValue next() { + if (!hasNext()) { + throw new NoSuchElementException("Only up to " + numberOfElements + " elements"); + } + int result = start + 10 * ((int) Math.exp(0.8d * (currentlyConsumed)) - 1); + currentlyConsumed++; + return TimeValue.timeValueMillis(result); + } + } + + private static final class ConstantBackoff extends BackoffPolicy { + private final TimeValue delay; + + private final int numberOfElements; + + public ConstantBackoff(TimeValue delay, int numberOfElements) { + assert numberOfElements >= 0; + this.delay = delay; + this.numberOfElements = numberOfElements; + } + + @Override + public Iterator iterator() { + return new ConstantBackoffIterator(delay, numberOfElements); + } + } + + private static final class ConstantBackoffIterator implements Iterator { + private final TimeValue delay; + private final int numberOfElements; + private int curr; + + public ConstantBackoffIterator(TimeValue delay, int numberOfElements) { + this.delay = delay; + this.numberOfElements = numberOfElements; + } + + @Override + public boolean hasNext() { + return curr < numberOfElements; + } + + @Override + public TimeValue next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + curr++; + return delay; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 2a7c185ad8a..316ec7a548e 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -48,7 +47,7 @@ public class BulkProcessor implements Closeable { /** * A listener for the execution. */ - public static interface Listener { + public interface Listener { /** * Callback before the bulk is executed. @@ -79,6 +78,7 @@ public class BulkProcessor implements Closeable { private int bulkActions = 1000; private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB); private TimeValue flushInterval = null; + private BackoffPolicy backoffPolicy = BackoffPolicy.noBackoff(); /** * Creates a builder of bulk processor with the client to use and the listener that will be used @@ -136,11 +136,25 @@ public class BulkProcessor implements Closeable { return this; } + /** + * Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally + * in case they have failed due to resource constraints (i.e. a thread pool was full). + * + * The default is to not back off, i.e. failing immediately. + */ + public Builder setBackoffPolicy(BackoffPolicy backoffPolicy) { + if (backoffPolicy == null) { + throw new NullPointerException("'backoffPolicy' must not be null. To disable backoff, pass BackoffPolicy.noBackoff()"); + } + this.backoffPolicy = backoffPolicy; + return this; + } + /** * Builds a new bulk processor. */ public BulkProcessor build() { - return new BulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval); + return new BulkProcessor(client, backoffPolicy, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval); } } @@ -152,38 +166,27 @@ public class BulkProcessor implements Closeable { return new Builder(client, listener); } - private final Client client; - private final Listener listener; - - private final String name; - - private final int concurrentRequests; private final int bulkActions; private final long bulkSize; - private final TimeValue flushInterval; - private final Semaphore semaphore; + private final ScheduledThreadPoolExecutor scheduler; private final ScheduledFuture scheduledFuture; private final AtomicLong executionIdGen = new AtomicLong(); private BulkRequest bulkRequest; + private final BulkRequestHandler bulkRequestHandler; private volatile boolean closed = false; - BulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) { - this.client = client; - this.listener = listener; - this.name = name; - this.concurrentRequests = concurrentRequests; + BulkProcessor(Client client, BackoffPolicy backoffPolicy, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) { this.bulkActions = bulkActions; this.bulkSize = bulkSize.bytes(); - this.semaphore = new Semaphore(concurrentRequests); this.bulkRequest = new BulkRequest(); + this.bulkRequestHandler = (concurrentRequests == 0) ? BulkRequestHandler.syncHandler(client, backoffPolicy, listener) : BulkRequestHandler.asyncHandler(client, backoffPolicy, listener, concurrentRequests); - this.flushInterval = flushInterval; if (flushInterval != null) { this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor")); this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); @@ -231,14 +234,7 @@ public class BulkProcessor implements Closeable { if (bulkRequest.numberOfActions() > 0) { execute(); } - if (this.concurrentRequests < 1) { - return true; - } - if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) { - semaphore.release(this.concurrentRequests); - return true; - } - return false; + return this.bulkRequestHandler.awaitClose(timeout, unit); } /** @@ -308,58 +304,7 @@ public class BulkProcessor implements Closeable { final long executionId = executionIdGen.incrementAndGet(); this.bulkRequest = new BulkRequest(); - - if (concurrentRequests == 0) { - // execute in a blocking fashion... - boolean afterCalled = false; - try { - listener.beforeBulk(executionId, bulkRequest); - BulkResponse bulkItemResponses = client.bulk(bulkRequest).actionGet(); - afterCalled = true; - listener.afterBulk(executionId, bulkRequest, bulkItemResponses); - } catch (Exception e) { - if (!afterCalled) { - listener.afterBulk(executionId, bulkRequest, e); - } - } - } else { - boolean success = false; - boolean acquired = false; - try { - listener.beforeBulk(executionId, bulkRequest); - semaphore.acquire(); - acquired = true; - client.bulk(bulkRequest, new ActionListener() { - @Override - public void onResponse(BulkResponse response) { - try { - listener.afterBulk(executionId, bulkRequest, response); - } finally { - semaphore.release(); - } - } - - @Override - public void onFailure(Throwable e) { - try { - listener.afterBulk(executionId, bulkRequest, e); - } finally { - semaphore.release(); - } - } - }); - success = true; - } catch (InterruptedException e) { - Thread.interrupted(); - listener.afterBulk(executionId, bulkRequest, e); - } catch (Throwable t) { - listener.afterBulk(executionId, bulkRequest, t); - } finally { - if (!success && acquired) { // if we fail on client.bulk() release the semaphore - semaphore.release(); - } - } - } + this.bulkRequestHandler.execute(bulkRequest, executionId); } private boolean isOverTheLimit() { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java new file mode 100644 index 00000000000..ffc985bd510 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java @@ -0,0 +1,160 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; + +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; + +/** + * Abstracts the low-level details of bulk request handling + */ +abstract class BulkRequestHandler { + protected final ESLogger logger; + protected final Client client; + + protected BulkRequestHandler(Client client) { + this.client = client; + this.logger = Loggers.getLogger(getClass(), client.settings()); + } + + + public abstract void execute(BulkRequest bulkRequest, long executionId); + + public abstract boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException; + + + public static BulkRequestHandler syncHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener) { + return new SyncBulkRequestHandler(client, backoffPolicy, listener); + } + + public static BulkRequestHandler asyncHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, int concurrentRequests) { + return new AsyncBulkRequestHandler(client, backoffPolicy, listener, concurrentRequests); + } + + private static class SyncBulkRequestHandler extends BulkRequestHandler { + private final BulkProcessor.Listener listener; + private final BackoffPolicy backoffPolicy; + + public SyncBulkRequestHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener) { + super(client); + this.backoffPolicy = backoffPolicy; + this.listener = listener; + } + + @Override + public void execute(BulkRequest bulkRequest, long executionId) { + boolean afterCalled = false; + try { + listener.beforeBulk(executionId, bulkRequest); + BulkResponse bulkResponse = Retry + .on(EsRejectedExecutionException.class) + .policy(backoffPolicy) + .withSyncBackoff(client, bulkRequest); + afterCalled = true; + listener.afterBulk(executionId, bulkRequest, bulkResponse); + } catch (Exception e) { + if (!afterCalled) { + logger.warn("Failed to executed bulk request {}.", e, executionId); + listener.afterBulk(executionId, bulkRequest, e); + } + } + } + + @Override + public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + // we are "closed" immediately as there is no request in flight + return true; + } + } + + private static class AsyncBulkRequestHandler extends BulkRequestHandler { + private final BackoffPolicy backoffPolicy; + private final BulkProcessor.Listener listener; + private final Semaphore semaphore; + private final int concurrentRequests; + + private AsyncBulkRequestHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, int concurrentRequests) { + super(client); + this.backoffPolicy = backoffPolicy; + assert concurrentRequests > 0; + this.listener = listener; + this.concurrentRequests = concurrentRequests; + this.semaphore = new Semaphore(concurrentRequests); + } + + @Override + public void execute(BulkRequest bulkRequest, long executionId) { + boolean bulkRequestSetupSuccessful = false; + boolean acquired = false; + try { + listener.beforeBulk(executionId, bulkRequest); + semaphore.acquire(); + acquired = true; + Retry.on(EsRejectedExecutionException.class) + .policy(backoffPolicy) + .withAsyncBackoff(client, bulkRequest, new ActionListener() { + @Override + public void onResponse(BulkResponse response) { + try { + listener.afterBulk(executionId, bulkRequest, response); + } finally { + semaphore.release(); + } + } + + @Override + public void onFailure(Throwable e) { + try { + listener.afterBulk(executionId, bulkRequest, e); + } finally { + semaphore.release(); + } + } + }); + bulkRequestSetupSuccessful = true; + } catch (InterruptedException e) { + // This is intentionally wrong to avoid changing the behaviour implicitly with this PR. It will be fixed in #14833 + Thread.interrupted(); + listener.afterBulk(executionId, bulkRequest, e); + } catch (Throwable t) { + logger.warn("Failed to executed bulk request {}.", t, executionId); + listener.afterBulk(executionId, bulkRequest, t); + } finally { + if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore + semaphore.release(); + } + } + } + + @Override + public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) { + semaphore.release(this.concurrentRequests); + return true; + } + return false; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/bulk/Retry.java b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java new file mode 100644 index 00000000000..477e61045ba --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -0,0 +1,237 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.*; +import java.util.function.Predicate; + +/** + * Encapsulates synchronous and asynchronous retry logic. + */ +class Retry { + private final Class retryOnThrowable; + + private BackoffPolicy backoffPolicy; + + public static Retry on(Class retryOnThrowable) { + return new Retry(retryOnThrowable); + } + + /** + * @param backoffPolicy The backoff policy that defines how long and how often to wait for retries. + */ + public Retry policy(BackoffPolicy backoffPolicy) { + this.backoffPolicy = backoffPolicy; + return this; + } + + Retry(Class retryOnThrowable) { + this.retryOnThrowable = retryOnThrowable; + } + + /** + * Invokes #bulk(BulkRequest, ActionListener) on the provided client. Backs off on the provided exception and delegates results to the + * provided listener. + * + * @param client Client invoking the bulk request. + * @param bulkRequest The bulk request that should be executed. + * @param listener A listener that is invoked when the bulk request finishes or completes with an exception. The listener is not + */ + public void withAsyncBackoff(Client client, BulkRequest bulkRequest, ActionListener listener) { + AsyncRetryHandler r = new AsyncRetryHandler(retryOnThrowable, backoffPolicy, client, listener); + r.execute(bulkRequest); + + } + + /** + * Invokes #bulk(BulkRequest) on the provided client. Backs off on the provided exception. + * + * @param client Client invoking the bulk request. + * @param bulkRequest The bulk request that should be executed. + * @return the bulk response as returned by the client. + * @throws Exception Any exception thrown by the callable. + */ + public BulkResponse withSyncBackoff(Client client, BulkRequest bulkRequest) throws Exception { + return SyncRetryHandler + .create(retryOnThrowable, backoffPolicy, client) + .executeBlocking(bulkRequest) + .actionGet(); + } + + static class AbstractRetryHandler implements ActionListener { + private final ESLogger logger; + private final Client client; + private final ActionListener listener; + private final Iterator backoff; + private final Class retryOnThrowable; + // Access only when holding a client-side lock, see also #addResponses() + private final List responses = new ArrayList<>(); + private final long startTimestampNanos; + // needed to construct the next bulk request based on the response to the previous one + // volatile as we're called from a scheduled thread + private volatile BulkRequest currentBulkRequest; + private volatile ScheduledFuture scheduledRequestFuture; + + public AbstractRetryHandler(Class retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener listener) { + this.retryOnThrowable = retryOnThrowable; + this.backoff = backoffPolicy.iterator(); + this.client = client; + this.listener = listener; + this.logger = Loggers.getLogger(getClass(), client.settings()); + // in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood + this.startTimestampNanos = System.nanoTime(); + } + + @Override + public void onResponse(BulkResponse bulkItemResponses) { + if (!bulkItemResponses.hasFailures()) { + // we're done here, include all responses + addResponses(bulkItemResponses, (r -> true)); + finishHim(); + } else { + if (canRetry(bulkItemResponses)) { + addResponses(bulkItemResponses, (r -> !r.isFailed())); + retry(createBulkRequestForRetry(bulkItemResponses)); + } else { + addResponses(bulkItemResponses, (r -> true)); + finishHim(); + } + } + } + + @Override + public void onFailure(Throwable e) { + try { + listener.onFailure(e); + } finally { + FutureUtils.cancel(scheduledRequestFuture); + } + } + + private void retry(BulkRequest bulkRequestForRetry) { + assert backoff.hasNext(); + TimeValue next = backoff.next(); + logger.trace("Retry of bulk request scheduled in {} ms.", next.millis()); + scheduledRequestFuture = client.threadPool().schedule(next, ThreadPool.Names.SAME, (() -> this.execute(bulkRequestForRetry))); + } + + private BulkRequest createBulkRequestForRetry(BulkResponse bulkItemResponses) { + BulkRequest requestToReissue = new BulkRequest(); + int index = 0; + for (BulkItemResponse bulkItemResponse : bulkItemResponses.getItems()) { + if (bulkItemResponse.isFailed()) { + requestToReissue.add(currentBulkRequest.requests().get(index)); + } + index++; + } + return requestToReissue; + } + + private boolean canRetry(BulkResponse bulkItemResponses) { + if (!backoff.hasNext()) { + return false; + } + for (BulkItemResponse bulkItemResponse : bulkItemResponses) { + if (bulkItemResponse.isFailed()) { + Throwable cause = bulkItemResponse.getFailure().getCause(); + Throwable rootCause = ExceptionsHelper.unwrapCause(cause); + if (!rootCause.getClass().equals(retryOnThrowable)) { + return false; + } + } + } + return true; + } + + private void finishHim() { + try { + listener.onResponse(getAccumulatedResponse()); + } finally { + FutureUtils.cancel(scheduledRequestFuture); + } + } + + private void addResponses(BulkResponse response, Predicate filter) { + for (BulkItemResponse bulkItemResponse : response) { + if (filter.test(bulkItemResponse)) { + // Use client-side lock here to avoid visibility issues. This method may be called multiple times + // (based on how many retries we have to issue) and relying that the response handling code will be + // scheduled on the same thread is fragile. + synchronized (responses) { + responses.add(bulkItemResponse); + } + } + } + } + + private BulkResponse getAccumulatedResponse() { + BulkItemResponse[] itemResponses; + synchronized (responses) { + itemResponses = responses.toArray(new BulkItemResponse[1]); + } + long stopTimestamp = System.nanoTime(); + long totalLatencyMs = TimeValue.timeValueNanos(stopTimestamp - startTimestampNanos).millis(); + return new BulkResponse(itemResponses, totalLatencyMs); + } + + public void execute(BulkRequest bulkRequest) { + this.currentBulkRequest = bulkRequest; + client.bulk(bulkRequest, this); + } + } + + static class AsyncRetryHandler extends AbstractRetryHandler { + public AsyncRetryHandler(Class retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener listener) { + super(retryOnThrowable, backoffPolicy, client, listener); + } + } + + static class SyncRetryHandler extends AbstractRetryHandler { + private final PlainActionFuture actionFuture; + + public static SyncRetryHandler create(Class retryOnThrowable, BackoffPolicy backoffPolicy, Client client) { + PlainActionFuture actionFuture = PlainActionFuture.newFuture(); + return new SyncRetryHandler(retryOnThrowable, backoffPolicy, client, actionFuture); + } + + public SyncRetryHandler(Class retryOnThrowable, BackoffPolicy backoffPolicy, Client client, PlainActionFuture actionFuture) { + super(retryOnThrowable, backoffPolicy, client, actionFuture); + this.actionFuture = actionFuture; + } + + public ActionFuture executeBlocking(BulkRequest bulkRequest) { + super.execute(bulkRequest); + return actionFuture; + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java new file mode 100644 index 00000000000..3c38e2ef0fa --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java @@ -0,0 +1,164 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.test.ESIntegTestCase; +import org.hamcrest.Matcher; + +import java.util.Collections; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 2) +public class BulkProcessorRetryIT extends ESIntegTestCase { + private static final String INDEX_NAME = "test"; + private static final String TYPE_NAME = "type"; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + //Have very low pool and queue sizes to overwhelm internal pools easily + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("threadpool.generic.size", 1) + .put("threadpool.generic.queue_size", 1) + // don't mess with this one! It's quite sensitive to a low queue size + // (see also ThreadedActionListener which is happily spawning threads even when we already got rejected) + //.put("threadpool.listener.queue_size", 1) + .put("threadpool.get.queue_size", 1) + // default is 50 + .put("threadpool.bulk.queue_size", 20) + .build(); + } + + + public void testBulkRejectionLoadWithoutBackoff() throws Throwable { + boolean rejectedExecutionExpected = true; + executeBulkRejectionLoad(BackoffPolicy.noBackoff(), rejectedExecutionExpected); + } + + public void testBulkRejectionLoadWithBackoff() throws Throwable { + boolean rejectedExecutionExpected = false; + executeBulkRejectionLoad(BackoffPolicy.exponentialBackoff(), rejectedExecutionExpected); + } + + private void executeBulkRejectionLoad(BackoffPolicy backoffPolicy, boolean rejectedExecutionExpected) throws Throwable { + int numberOfAsyncOps = randomIntBetween(600, 700); + final CountDownLatch latch = new CountDownLatch(numberOfAsyncOps); + final Set responses = Collections.newSetFromMap(new ConcurrentHashMap<>()); + + assertAcked(prepareCreate(INDEX_NAME)); + ensureGreen(); + + BulkProcessor bulkProcessor = BulkProcessor.builder(client(), new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + // no op + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + responses.add(response); + latch.countDown(); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + responses.add(failure); + latch.countDown(); + } + }).setBulkActions(1) + // zero means that we're in the sync case, more means that we're in the async case + .setConcurrentRequests(randomIntBetween(0, 100)) + .setBackoffPolicy(backoffPolicy) + .build(); + indexDocs(bulkProcessor, numberOfAsyncOps); + latch.await(10, TimeUnit.SECONDS); + bulkProcessor.close(); + + assertThat(responses.size(), equalTo(numberOfAsyncOps)); + + // validate all responses + for (Object response : responses) { + if (response instanceof BulkResponse) { + BulkResponse bulkResponse = (BulkResponse) response; + for (BulkItemResponse bulkItemResponse : bulkResponse.getItems()) { + if (bulkItemResponse.isFailed()) { + BulkItemResponse.Failure failure = bulkItemResponse.getFailure(); + Throwable rootCause = ExceptionsHelper.unwrapCause(failure.getCause()); + if (rootCause instanceof EsRejectedExecutionException) { + if (rejectedExecutionExpected == false) { + // we're not expecting that we overwhelmed it even once + throw new AssertionError("Unexpected failure reason", rootCause); + } + } else { + throw new AssertionError("Unexpected failure", rootCause); + } + } + } + } else { + Throwable t = (Throwable) response; + // we're not expecting any other errors + throw new AssertionError("Unexpected failure", t); + } + } + + client().admin().indices().refresh(new RefreshRequest()).get(); + + // validate we did not create any duplicates due to retries + Matcher searchResultCount; + if (rejectedExecutionExpected) { + // it is ok if we lost some index operations to rejected executions + searchResultCount = lessThanOrEqualTo((long) numberOfAsyncOps); + } else { + searchResultCount = equalTo((long) numberOfAsyncOps); + } + + SearchResponse results = client() + .prepareSearch(INDEX_NAME) + .setTypes(TYPE_NAME) + .setQuery(QueryBuilders.matchAllQuery()) + .setSize(0) + .get(); + assertThat(results.getHits().totalHits(), searchResultCount); + } + + private static void indexDocs(BulkProcessor processor, int numDocs) { + for (int i = 1; i <= numDocs; i++) { + processor.add(client() + .prepareIndex() + .setIndex(INDEX_NAME) + .setType(TYPE_NAME) + .setId(Integer.toString(i)) + .setSource("field", randomRealisticUnicodeOfLengthBetween(1, 30)) + .request()); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java new file mode 100644 index 00000000000..e974a6220ed --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -0,0 +1,201 @@ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.rest.NoOpClient; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.CountDownLatch; + +import static org.hamcrest.Matchers.*; + +public class RetryTests extends ESTestCase { + // no need to wait fof a long time in tests + private static final TimeValue DELAY = TimeValue.timeValueMillis(1L); + private static final int CALLS_TO_FAIL = 5; + + private MockBulkClient bulkClient; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + this.bulkClient = new MockBulkClient(getTestName(), CALLS_TO_FAIL); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + this.bulkClient.close(); + } + + private BulkRequest createBulkRequest() { + BulkRequest request = new BulkRequest(); + request.add(new UpdateRequest("shop", "products", "1")); + request.add(new UpdateRequest("shop", "products", "2")); + request.add(new UpdateRequest("shop", "products", "3")); + request.add(new UpdateRequest("shop", "products", "4")); + request.add(new UpdateRequest("shop", "products", "5")); + return request; + } + + public void testSyncRetryBacksOff() throws Exception { + BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL); + + BulkRequest bulkRequest = createBulkRequest(); + BulkResponse response = Retry + .on(EsRejectedExecutionException.class) + .policy(backoff) + .withSyncBackoff(bulkClient, bulkRequest); + + assertFalse(response.hasFailures()); + assertThat(response.getItems().length, equalTo(bulkRequest.numberOfActions())); + } + + public void testSyncRetryFailsAfterBackoff() throws Exception { + BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL - 1); + + BulkRequest bulkRequest = createBulkRequest(); + BulkResponse response = Retry + .on(EsRejectedExecutionException.class) + .policy(backoff) + .withSyncBackoff(bulkClient, bulkRequest); + + assertTrue(response.hasFailures()); + assertThat(response.getItems().length, equalTo(bulkRequest.numberOfActions())); + } + + public void testAsyncRetryBacksOff() throws Exception { + BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL); + AssertingListener listener = new AssertingListener(); + + BulkRequest bulkRequest = createBulkRequest(); + Retry.on(EsRejectedExecutionException.class) + .policy(backoff) + .withAsyncBackoff(bulkClient, bulkRequest, listener); + + listener.awaitCallbacksCalled(); + listener.assertOnResponseCalled(); + listener.assertResponseWithoutFailures(); + listener.assertResponseWithNumberOfItems(bulkRequest.numberOfActions()); + listener.assertOnFailureNeverCalled(); + } + + public void testAsyncRetryFailsAfterBacksOff() throws Exception { + BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL - 1); + AssertingListener listener = new AssertingListener(); + + BulkRequest bulkRequest = createBulkRequest(); + Retry.on(EsRejectedExecutionException.class) + .policy(backoff) + .withAsyncBackoff(bulkClient, bulkRequest, listener); + + listener.awaitCallbacksCalled(); + + listener.assertOnResponseCalled(); + listener.assertResponseWithFailures(); + listener.assertResponseWithNumberOfItems(bulkRequest.numberOfActions()); + listener.assertOnFailureNeverCalled(); + } + + private static class AssertingListener implements ActionListener { + private final CountDownLatch latch; + private int countOnResponseCalled = 0; + private Throwable lastFailure; + private BulkResponse response; + + private AssertingListener() { + latch = new CountDownLatch(1); + } + + public void awaitCallbacksCalled() throws InterruptedException { + latch.await(); + } + + @Override + public void onResponse(BulkResponse bulkItemResponses) { + latch.countDown(); + this.response = bulkItemResponses; + countOnResponseCalled++; + } + + @Override + public void onFailure(Throwable e) { + latch.countDown(); + this.lastFailure = e; + } + + public void assertOnResponseCalled() { + assertThat(countOnResponseCalled, equalTo(1)); + } + + public void assertResponseWithNumberOfItems(int numItems) { + assertThat(response.getItems().length, equalTo(numItems)); + } + + public void assertResponseWithoutFailures() { + assertThat(response, notNullValue()); + assertFalse("Response should not have failures", response.hasFailures()); + } + + public void assertResponseWithFailures() { + assertThat(response, notNullValue()); + assertTrue("Response should have failures", response.hasFailures()); + } + + public void assertOnFailureNeverCalled() { + assertThat(lastFailure, nullValue()); + } + } + + private static class MockBulkClient extends NoOpClient { + private int numberOfCallsToFail; + + private MockBulkClient(String testName, int numberOfCallsToFail) { + super(testName); + this.numberOfCallsToFail = numberOfCallsToFail; + } + + @Override + public ActionFuture bulk(BulkRequest request) { + PlainActionFuture responseFuture = new PlainActionFuture<>(); + bulk(request, responseFuture); + return responseFuture; + } + + @Override + public void bulk(BulkRequest request, ActionListener listener) { + // do everything synchronously, that's fine for a test + boolean shouldFail = numberOfCallsToFail > 0; + numberOfCallsToFail--; + + BulkItemResponse[] itemResponses = new BulkItemResponse[request.requests().size()]; + // if we have to fail, we need to fail at least once "reliably", the rest can be random + int itemToFail = randomInt(request.requests().size() - 1); + for (int idx = 0; idx < request.requests().size(); idx++) { + if (shouldFail && (randomBoolean() || idx == itemToFail)) { + itemResponses[idx] = failedResponse(); + } else { + itemResponses[idx] = successfulResponse(); + } + } + listener.onResponse(new BulkResponse(itemResponses, 1000L)); + } + + private BulkItemResponse successfulResponse() { + return new BulkItemResponse(1, "update", new DeleteResponse()); + } + + private BulkItemResponse failedResponse() { + return new BulkItemResponse(1, "update", new BulkItemResponse.Failure("test", "test", "1", new EsRejectedExecutionException("pool full"))); + } + } +} From 028b206a1117b79a16c6e85fdcbf5652f2848fff Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 17 Dec 2015 08:24:52 -0500 Subject: [PATCH 115/167] Simpler using compressed oops flag representation This commit modifies the internal representation of the JVM flag UseCompressedOops to just be a String. This means we can just store the value of the flag or "unknown" directly so that we do not have to engage in shenanigans with three-valued logic around a boxed boolean. Relates #15489 --- .../elasticsearch/env/NodeEnvironment.java | 8 ++--- .../elasticsearch/monitor/jvm/JvmInfo.java | 34 +++++++++---------- 2 files changed, 20 insertions(+), 22 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 86b6b704a72..93e95dfaa96 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -296,10 +296,10 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { } private void maybeLogHeapDetails() { - ByteSizeValue maxHeapSize = JvmInfo.jvmInfo().getMem().getHeapMax(); - Boolean usingCompressedOops = JvmInfo.jvmInfo().usingCompressedOops(); - String usingCompressedOopsStatus = usingCompressedOops == null ? "unknown" : Boolean.toString(usingCompressedOops); - logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, usingCompressedOopsStatus); + JvmInfo jvmInfo = JvmInfo.jvmInfo(); + ByteSizeValue maxHeapSize = jvmInfo.getMem().getHeapMax(); + String useCompressedOops = jvmInfo.useCompressedOops(); + logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops); } private static String toString(Collection items) { diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java index 8e1fb9ba7db..22f0ed912d0 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java @@ -116,11 +116,10 @@ public class JvmInfo implements Streamable, ToXContent { Method vmOptionMethod = clazz.getMethod("getVMOption", String.class); Object useCompressedOopsVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseCompressedOops"); Method valueMethod = vmOptionClazz.getMethod("getValue"); - String value = (String)valueMethod.invoke(useCompressedOopsVmOption); - info.usingCompressedOops = Boolean.parseBoolean(value); + info.useCompressedOops = (String)valueMethod.invoke(useCompressedOopsVmOption); } catch (Throwable t) { // unable to deduce the state of compressed oops - // usingCompressedOops will hold its default value of null + info.useCompressedOops = "unknown"; } INSTANCE = info; @@ -157,7 +156,7 @@ public class JvmInfo implements Streamable, ToXContent { String[] gcCollectors = Strings.EMPTY_ARRAY; String[] memoryPools = Strings.EMPTY_ARRAY; - private Boolean usingCompressedOops; + private String useCompressedOops; private JvmInfo() { } @@ -282,8 +281,16 @@ public class JvmInfo implements Streamable, ToXContent { return this.systemProperties; } - public Boolean usingCompressedOops() { - return this.usingCompressedOops; + /** + * The value of the JVM flag UseCompressedOops, if available otherwise + * "unknown". The value "unknown" indicates that an attempt was + * made to obtain the value of the flag on this JVM and the attempt + * failed. + * + * @return the value of the JVM flag UseCompressedOops or "unknown" + */ + public String useCompressedOops() { + return this.useCompressedOops; } @Override @@ -307,7 +314,7 @@ public class JvmInfo implements Streamable, ToXContent { builder.field(Fields.GC_COLLECTORS, gcCollectors); builder.field(Fields.MEMORY_POOLS, memoryPools); - builder.field(Fields.USING_COMPRESSED_OOPS, usingCompressedOops == null ? "unknown" : Boolean.toString(usingCompressedOops)); + builder.field(Fields.USING_COMPRESSED_OOPS, useCompressedOops); builder.endObject(); return builder; @@ -368,11 +375,7 @@ public class JvmInfo implements Streamable, ToXContent { mem.readFrom(in); gcCollectors = in.readStringArray(); memoryPools = in.readStringArray(); - if (in.readBoolean()) { - usingCompressedOops = in.readBoolean(); - } else { - usingCompressedOops = null; - } + useCompressedOops = in.readOptionalString(); } @Override @@ -397,12 +400,7 @@ public class JvmInfo implements Streamable, ToXContent { mem.writeTo(out); out.writeStringArray(gcCollectors); out.writeStringArray(memoryPools); - if (usingCompressedOops != null) { - out.writeBoolean(true); - out.writeBoolean(usingCompressedOops); - } else { - out.writeBoolean(false); - } + out.writeOptionalString(useCompressedOops); } public static class Mem implements Streamable { From 207ccc3cd69b920def8488a8d1711eecd7f58760 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Thu, 17 Dec 2015 14:30:31 +0100 Subject: [PATCH 116/167] Add missing header in RetryTests --- .../elasticsearch/action/bulk/RetryTests.java | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index e974a6220ed..8f07b58fe6f 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -1,3 +1,21 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionFuture; From 9149630749ea76d1294f8c3409f57a76a1af6d05 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 16 Dec 2015 18:45:22 -0500 Subject: [PATCH 117/167] Reroute once per batch of shard failures This commit modifies the behavior after publication of a new cluster state to only invoke the reroute logic once per batch of shard failures rather than once per shard failure. --- .../action/shard/ShardStateAction.java | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 3a01ced6ebf..a04a6d7bd51 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -148,20 +148,9 @@ public class ShardStateAction extends AbstractComponent { @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { try { - int numberOfUnassignedShards = newState.getRoutingNodes().unassigned().size(); - if (oldState != newState && numberOfUnassignedShards > 0) { - String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shard [%s]", numberOfUnassignedShards, request.shardRouting); - if (logger.isTraceEnabled()) { - logger.trace(reason + ", scheduling a reroute"); - } - routingService.reroute(reason); - } - } finally { - try { - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } catch (Throwable channelThrowable) { - logger.warn("failed to send response while failing shard [{}]", channelThrowable, request.shardRouting); - } + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } catch (Throwable channelThrowable) { + logger.warn("failed to send response while failing shard [{}]", channelThrowable, request.shardRouting); } } } @@ -189,6 +178,18 @@ public class ShardStateAction extends AbstractComponent { } return batchResultBuilder.build(maybeUpdatedState); } + + @Override + public void clusterStatePublished(ClusterState newClusterState) { + int numberOfUnassignedShards = newClusterState.getRoutingNodes().unassigned().size(); + if (numberOfUnassignedShards > 0) { + String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards); + if (logger.isTraceEnabled()) { + logger.trace(reason + ", scheduling a reroute"); + } + routingService.reroute(reason); + } + } } private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler(); From 8543d7795e6982b1a4481bb2498b3a358c3e8683 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 17 Dec 2015 15:12:16 +0100 Subject: [PATCH 118/167] Convert several more settings --- .../org/elasticsearch/common/settings/ClusterSettings.java | 4 +++- .../java/org/elasticsearch/common/settings/Setting.java | 5 ++++- .../java/org/elasticsearch/common/settings/Settings.java | 6 +++++- .../main/java/org/elasticsearch/transport/Transport.java | 4 ++++ .../java/org/elasticsearch/transport/TransportService.java | 6 ++++-- .../org/elasticsearch/transport/netty/NettyTransport.java | 2 +- .../elasticsearch/gateway/ReusePeerRecoverySharedTest.java | 4 ++-- 7 files changed, 23 insertions(+), 8 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 4e922ca28e3..ac976268a0c 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -36,6 +36,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.ttl.IndicesTTLService; import org.elasticsearch.search.SearchService; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import java.util.*; @@ -134,5 +135,6 @@ public final class ClusterSettings extends AbstractScopedSettings { ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING, HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, - HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING))); + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, + Transport.TRANSPORT_PROFILES_SETTING))); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 42f77a18c9c..236df5c567b 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -312,6 +312,9 @@ public class Setting extends ToXContentToBytes { } public static Setting> listSetting(String key, List defaultStringValue, Function singleValueParser, boolean dynamic, Scope scope) { + return listSetting(key, (s) -> defaultStringValue, singleValueParser, dynamic, scope); + } + public static Setting> listSetting(String key, Function> defaultStringValue, Function singleValueParser, boolean dynamic, Scope scope) { Function> parser = (s) -> { try (XContentParser xContentParser = XContentType.JSON.xContent().createParser(s)){ XContentParser.Token token = xContentParser.nextToken(); @@ -330,7 +333,7 @@ public class Setting extends ToXContentToBytes { throw new IllegalArgumentException("failed to parse array", e); } }; - return new Setting>(key, arrayToParsableString(defaultStringValue.toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) { + return new Setting>(key, (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) { private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?"); @Override public String getRaw(Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index f8dd5d4f1f6..989b05d4bf2 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -652,7 +652,11 @@ public final class Settings implements ToXContent { * Returns group settings for the given setting prefix. */ public Map getAsGroups() throws SettingsException { - return getGroupsInternal("", false); + return getAsGroups(false); + } + + public Map getAsGroups(boolean ignoreNonGrouped) throws SettingsException { + return getGroupsInternal("", ignoreNonGrouped); } /** diff --git a/core/src/main/java/org/elasticsearch/transport/Transport.java b/core/src/main/java/org/elasticsearch/transport/Transport.java index 10fa9b239dc..8e4b7a47b1b 100644 --- a/core/src/main/java/org/elasticsearch/transport/Transport.java +++ b/core/src/main/java/org/elasticsearch/transport/Transport.java @@ -21,6 +21,8 @@ package org.elasticsearch.transport; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.LifecycleComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -34,6 +36,8 @@ import java.util.Map; public interface Transport extends LifecycleComponent { + Setting TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", false, Setting.Scope.CLUSTER); + public static class TransportSettings { public static final String TRANSPORT_TCP_COMPRESS = "transport.tcp.compress"; } diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index 916c4863e07..444f52b9c03 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -47,6 +47,7 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; import java.util.function.Supplier; import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; @@ -84,8 +85,9 @@ public class TransportService extends AbstractLifecycleComponent> TRACE_LOG_INCLUDE_SETTING = Setting.listSetting("transport.tracer.include", Collections.emptyList(), (s) -> s, true, Setting.Scope.CLUSTER); - public static final Setting> TRACE_LOG_EXCLUDE_SETTING = Setting.listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), (s) -> s, true, Setting.Scope.CLUSTER); + public static final Setting> TRACE_LOG_INCLUDE_SETTING = Setting.listSetting("transport.tracer.include", Collections.emptyList(), Function.identity(), true, Setting.Scope.CLUSTER); + public static final Setting> TRACE_LOG_EXCLUDE_SETTING = Setting.listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), Function.identity(), true, Setting.Scope.CLUSTER); + private final ESLogger tracerLog; diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index 2f1c52a0ac2..37f1dc4fa0b 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -295,7 +295,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem this.serverOpenChannels = openChannels; // extract default profile first and create standard bootstrap - Map profiles = settings.getGroups("transport.profiles", true); + Map profiles = TRANSPORT_PROFILES_SETTING.get(settings()).getAsGroups(true); if (!profiles.containsKey(DEFAULT_PROFILE)) { profiles = new HashMap<>(profiles); profiles.put(DEFAULT_PROFILE, Settings.EMPTY); diff --git a/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java index 5e16fcebe86..936a6fa09a0 100644 --- a/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java +++ b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java @@ -90,7 +90,7 @@ public class ReusePeerRecoverySharedTest { // Disable allocations while we are closing nodes client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE)).get(); + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)).get(); logger.info("--> full cluster restart"); restartCluster.run(); @@ -105,7 +105,7 @@ public class ReusePeerRecoverySharedTest { logger.info("--> disabling allocation while the cluster is shut down", useSyncIds ? "" : " a second time"); // Disable allocations while we are closing nodes client().admin().cluster().prepareUpdateSettings().setTransientSettings( - settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE)) + settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)) .get(); logger.info("--> full cluster restart"); restartCluster.run(); From 3f87b0809eb52de81eaf0a547d28c8de2fe587ce Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Thu, 17 Dec 2015 15:22:59 +0100 Subject: [PATCH 119/167] Fix visibility issue in RetryTests --- .../test/java/org/elasticsearch/action/bulk/RetryTests.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index 8f07b58fe6f..5728f7b54bc 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -126,9 +126,9 @@ public class RetryTests extends ESTestCase { private static class AssertingListener implements ActionListener { private final CountDownLatch latch; - private int countOnResponseCalled = 0; - private Throwable lastFailure; - private BulkResponse response; + private volatile int countOnResponseCalled = 0; + private volatile Throwable lastFailure; + private volatile BulkResponse response; private AssertingListener() { latch = new CountDownLatch(1); From b71845bf9b7f16486c5445f5c2d0b362405ecba0 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 17 Dec 2015 15:10:25 +0100 Subject: [PATCH 120/167] Remove the unused mergeScheduleFuture from IndexShard. Closes #15512 --- .../elasticsearch/index/shard/IndexShard.java | 34 +++++++++++++++---- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index c0bf9244673..3c0e1c64105 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -19,7 +19,11 @@ package org.elasticsearch.index.shard; -import org.apache.lucene.index.*; +import org.apache.lucene.index.CheckIndex; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy; +import org.apache.lucene.index.SnapshotDeletionPolicy; +import org.apache.lucene.index.Term; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.store.AlreadyClosedException; @@ -61,7 +65,16 @@ import org.elasticsearch.index.cache.bitset.ShardBitsetFilterCache; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.cache.request.ShardRequestCache; import org.elasticsearch.index.codec.CodecService; -import org.elasticsearch.index.engine.*; +import org.elasticsearch.index.engine.CommitStats; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineClosedException; +import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.engine.InternalEngineFactory; +import org.elasticsearch.index.engine.RefreshFailedEngineException; +import org.elasticsearch.index.engine.Segment; +import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.ShardFieldData; @@ -70,7 +83,12 @@ import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.ShardGetService; import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.indexing.ShardIndexingService; -import org.elasticsearch.index.mapper.*; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.DocumentMapperForType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.percolator.PercolateStats; import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; @@ -108,7 +126,12 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.channels.ClosedByInterruptException; import java.nio.charset.StandardCharsets; -import java.util.*; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; @@ -151,7 +174,6 @@ public class IndexShard extends AbstractIndexShardComponent { private TimeValue refreshInterval; private volatile ScheduledFuture refreshScheduledFuture; - private volatile ScheduledFuture mergeScheduleFuture; protected volatile ShardRouting shardRouting; protected volatile IndexShardState state; protected final AtomicReference currentEngineReference = new AtomicReference<>(); @@ -766,8 +788,6 @@ public class IndexShard extends AbstractIndexShardComponent { if (state != IndexShardState.CLOSED) { FutureUtils.cancel(refreshScheduledFuture); refreshScheduledFuture = null; - FutureUtils.cancel(mergeScheduleFuture); - mergeScheduleFuture = null; } changeState(IndexShardState.CLOSED, reason); indexShardOperationCounter.decRef(); From 3a442db9bd4843bb2b12bad9279ecb35f05315cc Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Mon, 7 Dec 2015 15:09:45 +0100 Subject: [PATCH 121/167] Allocate primary shards based on allocation ids Closes #15281 --- .../shards/IndicesShardStoresResponse.java | 55 ++- .../TransportIndicesShardStoresAction.java | 16 +- .../cluster/metadata/IndexMetaData.java | 2 +- .../metadata/MetaDataIndexStateService.java | 9 - .../cluster/routing/ShardRouting.java | 9 +- .../decider/DiskThresholdDecider.java | 5 +- .../decider/EnableAllocationDecider.java | 7 +- .../gateway/PrimaryShardAllocator.java | 196 +++++++--- .../gateway/ReplicaShardAllocator.java | 11 +- ...ransportNodesListGatewayStartedShards.java | 23 +- .../elasticsearch/index/shard/IndexShard.java | 3 +- .../admin/indices/create/CreateIndexIT.java | 7 + .../shards/IndicesShardStoreRequestIT.java | 3 +- .../IndicesShardStoreResponseTests.java | 21 +- .../cluster/routing/PrimaryAllocationIT.java | 103 +++++ .../gateway/PrimaryShardAllocatorTests.java | 358 ++++++++++++------ .../gateway/QuorumGatewayIT.java | 74 +--- .../gateway/ReplicaShardAllocatorTests.java | 18 +- .../index/shard/IndexShardTests.java | 8 +- .../indices/state/SimpleIndexStateIT.java | 11 +- docs/reference/index-modules.asciidoc | 13 - .../indices/shadow-replicas.asciidoc | 5 +- docs/reference/indices/shard-stores.asciidoc | 8 +- docs/reference/migration/migrate_3_0.asciidoc | 21 + .../cluster/shards_allocation.asciidoc | 5 +- .../test/ESAllocationTestCase.java | 4 +- 26 files changed, 645 insertions(+), 350 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index 84b39d4c689..380f6e00890 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -56,13 +56,14 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon public static class StoreStatus implements Streamable, ToXContent, Comparable { private DiscoveryNode node; private long version; + private String allocationId; private Throwable storeException; - private Allocation allocation; + private AllocationStatus allocationStatus; /** * The status of the shard store with respect to the cluster */ - public enum Allocation { + public enum AllocationStatus { /** * Allocated as primary @@ -81,16 +82,16 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon private final byte id; - Allocation(byte id) { + AllocationStatus(byte id) { this.id = id; } - private static Allocation fromId(byte id) { + private static AllocationStatus fromId(byte id) { switch (id) { case 0: return PRIMARY; case 1: return REPLICA; case 2: return UNUSED; - default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]"); + default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]"); } } @@ -99,11 +100,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon case 0: return "primary"; case 1: return "replica"; case 2: return "unused"; - default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]"); + default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]"); } } - private static Allocation readFrom(StreamInput in) throws IOException { + private static AllocationStatus readFrom(StreamInput in) throws IOException { return fromId(in.readByte()); } @@ -115,10 +116,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon private StoreStatus() { } - public StoreStatus(DiscoveryNode node, long version, Allocation allocation, Throwable storeException) { + public StoreStatus(DiscoveryNode node, long version, String allocationId, AllocationStatus allocationStatus, Throwable storeException) { this.node = node; this.version = version; - this.allocation = allocation; + this.allocationId = allocationId; + this.allocationStatus = allocationStatus; this.storeException = storeException; } @@ -130,13 +132,20 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon } /** - * Version of the store, used to select the store that will be - * used as a primary. + * Version of the store */ public long getVersion() { return version; } + /** + * AllocationStatus id of the store, used to select the store that will be + * used as a primary. + */ + public String getAllocationId() { + return allocationId; + } + /** * Exception while trying to open the * shard index or from when the shard failed @@ -146,13 +155,13 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon } /** - * The allocation status of the store. - * {@link Allocation#PRIMARY} indicates a primary shard copy - * {@link Allocation#REPLICA} indicates a replica shard copy - * {@link Allocation#UNUSED} indicates an unused shard copy + * The allocationStatus status of the store. + * {@link AllocationStatus#PRIMARY} indicates a primary shard copy + * {@link AllocationStatus#REPLICA} indicates a replica shard copy + * {@link AllocationStatus#UNUSED} indicates an unused shard copy */ - public Allocation getAllocation() { - return allocation; + public AllocationStatus getAllocationStatus() { + return allocationStatus; } static StoreStatus readStoreStatus(StreamInput in) throws IOException { @@ -165,7 +174,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon public void readFrom(StreamInput in) throws IOException { node = DiscoveryNode.readNode(in); version = in.readLong(); - allocation = Allocation.readFrom(in); + allocationId = in.readOptionalString(); + allocationStatus = AllocationStatus.readFrom(in); if (in.readBoolean()) { storeException = in.readThrowable(); } @@ -175,7 +185,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon public void writeTo(StreamOutput out) throws IOException { node.writeTo(out); out.writeLong(version); - allocation.writeTo(out); + out.writeOptionalString(allocationId); + allocationStatus.writeTo(out); if (storeException != null) { out.writeBoolean(true); out.writeThrowable(storeException); @@ -188,7 +199,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { node.toXContent(builder, params); builder.field(Fields.VERSION, version); - builder.field(Fields.ALLOCATED, allocation.value()); + builder.field(Fields.ALLOCATION_ID, allocationId); + builder.field(Fields.ALLOCATED, allocationStatus.value()); if (storeException != null) { builder.startObject(Fields.STORE_EXCEPTION); ElasticsearchException.toXContent(builder, params, storeException); @@ -206,7 +218,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon } else { int compare = Long.compare(other.version, version); if (compare == 0) { - return Integer.compare(allocation.id, other.allocation.id); + return Integer.compare(allocationStatus.id, other.allocationStatus.id); } return compare; } @@ -379,6 +391,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon static final XContentBuilderString STORES = new XContentBuilderString("stores"); // StoreStatus fields static final XContentBuilderString VERSION = new XContentBuilderString("version"); + static final XContentBuilderString ALLOCATION_ID = new XContentBuilderString("allocation_id"); static final XContentBuilderString STORE_EXCEPTION = new XContentBuilderString("store_exception"); static final XContentBuilderString ALLOCATED = new XContentBuilderString("allocation"); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 336ebc254b4..d345c0e7d45 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -179,8 +179,8 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc } for (NodeGatewayStartedShards response : fetchResponse.responses) { if (shardExistsInNode(response)) { - IndicesShardStoresResponse.StoreStatus.Allocation allocation = getAllocation(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode()); - storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), allocation, response.storeException())); + IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode()); + storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), response.allocationId(), allocationStatus, response.storeException())); } } CollectionUtil.timSort(storeStatuses); @@ -193,27 +193,27 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder))); } - private IndicesShardStoresResponse.StoreStatus.Allocation getAllocation(String index, int shardID, DiscoveryNode node) { + private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) { for (ShardRouting shardRouting : routingNodes.node(node.id())) { ShardId shardId = shardRouting.shardId(); if (shardId.id() == shardID && shardId.getIndex().equals(index)) { if (shardRouting.primary()) { - return IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY; + return IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY; } else if (shardRouting.assignedToNode()) { - return IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA; + return IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA; } else { - return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED; + return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED; } } } - return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED; + return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED; } /** * A shard exists/existed in a node only if shard state file exists in the node */ private boolean shardExistsInNode(final NodeGatewayStartedShards response) { - return response.storeException() != null || response.version() != -1; + return response.storeException() != null || response.version() != -1 || response.allocationId() != null; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 669d71477ca..93961bf1fbb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -621,7 +621,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild public int numberOfReplicas() { return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1); } - + public Builder creationDate(long creationDate) { settings = settingsBuilder().put(settings).put(SETTING_CREATION_DATE, creationDate).build(); return this; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index 1fa1b702f66..b38e99d4493 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -47,7 +47,6 @@ import org.elasticsearch.rest.RestStatus; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Locale; /** * Service responsible for submitting open/close index requests @@ -92,14 +91,6 @@ public class MetaDataIndexStateService extends AbstractComponent { } if (indexMetaData.getState() != IndexMetaData.State.CLOSE) { - IndexRoutingTable indexRoutingTable = currentState.routingTable().index(index); - for (IndexShardRoutingTable shard : indexRoutingTable) { - for (ShardRouting shardRouting : shard) { - if (shardRouting.primary() == true && shardRouting.allocatedPostIndexCreate() == false) { - throw new IndexPrimaryShardNotAllocatedException(new Index(index)); - } - } - } indicesToClose.add(index); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 8dd71e3fba5..5ffaee0f2f9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -267,7 +269,7 @@ public final class ShardRouting implements Streamable, ToXContent { return shardIdentifier; } - public boolean allocatedPostIndexCreate() { + public boolean allocatedPostIndexCreate(IndexMetaData indexMetaData) { if (active()) { return true; } @@ -279,6 +281,11 @@ public final class ShardRouting implements Streamable, ToXContent { return false; } + if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_3_0_0)) { + // when no shards with this id have ever been active for this index + return false; + } + return true; } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index a02c72c5745..0df4959a5c5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -22,13 +22,13 @@ package org.elasticsearch.cluster.routing.allocation.decider; import com.carrotsearch.hppc.ObjectLookupContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; @@ -360,7 +360,8 @@ public class DiskThresholdDecider extends AllocationDecider { } // a flag for whether the primary shard has been previously allocated - boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(); + IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex()); + boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData); // checks for exact byte comparisons if (freeBytes < freeBytesThresholdLow.bytes()) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 0bbd4935044..a34cd33f7a1 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation.decider; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; @@ -82,8 +83,8 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored"); } - Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings(); - String enableIndexValue = indexSettings.get(INDEX_ROUTING_ALLOCATION_ENABLE); + IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex()); + String enableIndexValue = indexMetaData.getSettings().get(INDEX_ROUTING_ALLOCATION_ENABLE); final Allocation enable; if (enableIndexValue != null) { enable = Allocation.parse(enableIndexValue); @@ -96,7 +97,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe case NONE: return allocation.decision(Decision.NO, NAME, "no allocations are allowed"); case NEW_PRIMARIES: - if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate() == false) { + if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData) == false) { return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed"); } else { return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden"); diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index e560b4458b7..79bfbdac8c2 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -20,6 +20,7 @@ package org.elasticsearch.gateway; import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -30,8 +31,10 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; import java.util.*; +import java.util.stream.Collectors; /** * The primary shard allocator allocates primary shard that were not created as @@ -39,6 +42,7 @@ import java.util.*; */ public abstract class PrimaryShardAllocator extends AbstractComponent { + @Deprecated public static final String INDEX_RECOVERY_INITIAL_SHARDS = "index.recovery.initial_shards"; private final String initialShards; @@ -56,13 +60,21 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); while (unassignedIterator.hasNext()) { - ShardRouting shard = unassignedIterator.next(); + final ShardRouting shard = unassignedIterator.next(); - if (needToFindPrimaryCopy(shard) == false) { + if (shard.primary() == false) { continue; } - AsyncShardFetch.FetchResult shardState = fetchData(shard, allocation); + final IndexMetaData indexMetaData = metaData.index(shard.getIndex()); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings, Collections.emptyList()); + + if (shard.allocatedPostIndexCreate(indexMetaData) == false) { + // when we create a fresh index + continue; + } + + final AsyncShardFetch.FetchResult shardState = fetchData(shard, allocation); if (shardState.hasData() == false) { logger.trace("{}: ignoring allocation, still fetching shard started state", shard); allocation.setHasPendingAsyncFetch(); @@ -70,25 +82,50 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { continue; } - IndexMetaData indexMetaData = metaData.index(shard.getIndex()); - Settings indexSettings = Settings.builder().put(settings).put(indexMetaData.getSettings()).build(); + final Set lastActiveAllocationIds = indexMetaData.activeAllocationIds(shard.id()); + final boolean snapshotRestore = shard.restoreSource() != null; + final boolean recoverOnAnyNode = recoverOnAnyNode(indexSettings); - NodesAndVersions nodesAndVersions = buildNodesAndVersions(shard, recoverOnAnyNode(indexSettings), allocation.getIgnoreNodes(shard.shardId()), shardState); - logger.debug("[{}][{}] found {} allocations of {}, highest version: [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound, shard, nodesAndVersions.highestVersion); + final NodesAndVersions nodesAndVersions; + final boolean enoughAllocationsFound; - if (isEnoughAllocationsFound(shard, indexMetaData, nodesAndVersions) == false) { - // if we are restoring this shard we still can allocate - if (shard.restoreSource() == null) { + if (lastActiveAllocationIds.isEmpty()) { + assert indexSettings.getIndexVersionCreated().before(Version.V_3_0_0) : "trying to allocated a primary with an empty allocation id set, but index is new"; + // when we load an old index (after upgrading cluster) or restore a snapshot of an old index + // fall back to old version-based allocation mode + // Note that once the shard has been active, lastActiveAllocationIds will be non-empty + nodesAndVersions = buildNodesAndVersions(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState); + if (snapshotRestore || recoverOnAnyNode) { + enoughAllocationsFound = nodesAndVersions.allocationsFound > 0; + } else { + enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(shard, indexMetaData, nodesAndVersions); + } + logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}, highest version: [{}]", shard.index(), shard.id(), Version.V_3_0_0, nodesAndVersions.allocationsFound, shard, nodesAndVersions.highestVersion); + } else { + assert lastActiveAllocationIds.isEmpty() == false; + // use allocation ids to select nodes + nodesAndVersions = buildAllocationIdBasedNodes(shard, snapshotRestore || recoverOnAnyNode, + allocation.getIgnoreNodes(shard.shardId()), lastActiveAllocationIds, shardState); + enoughAllocationsFound = nodesAndVersions.allocationsFound > 0; + logger.debug("[{}][{}]: found {} allocations of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound, shard, lastActiveAllocationIds); + } + + if (enoughAllocationsFound == false){ + if (snapshotRestore) { + // let BalancedShardsAllocator take care of allocating this shard + logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.restoreSource()); + } else if (recoverOnAnyNode) { + // let BalancedShardsAllocator take care of allocating this shard + logger.debug("[{}][{}]: missing local data, recover from any node", shard.index(), shard.id()); + } else { // we can't really allocate, so ignore it and continue unassignedIterator.removeAndIgnore(); logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound); - } else { - logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.restoreSource()); } continue; } - NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesAndVersions); + final NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesAndVersions.nodes); if (nodesToAllocate.yesNodes.isEmpty() == false) { DiscoveryNode node = nodesToAllocate.yesNodes.get(0); logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node); @@ -109,63 +146,99 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { } /** - * Does the shard need to find a primary copy? + * Builds a list of nodes. If matchAnyShard is set to false, only nodes that have an allocation id matching + * lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but + * entries with matching allocation id are always at the front of the list. */ - boolean needToFindPrimaryCopy(ShardRouting shard) { - if (shard.primary() == false) { - return false; + protected NodesAndVersions buildAllocationIdBasedNodes(ShardRouting shard, boolean matchAnyShard, Set ignoreNodes, + Set lastActiveAllocationIds, AsyncShardFetch.FetchResult shardState) { + List matchingNodes = new ArrayList<>(); + List nonMatchingNodes = new ArrayList<>(); + long highestVersion = -1; + for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : shardState.getData().values()) { + DiscoveryNode node = nodeShardState.getNode(); + String allocationId = nodeShardState.allocationId(); + + if (ignoreNodes.contains(node.id())) { + continue; + } + + if (nodeShardState.storeException() == null) { + if (allocationId == null && nodeShardState.version() != -1) { + // old shard with no allocation id, assign dummy value so that it gets added below in case of matchAnyShard + allocationId = "_n/a_"; + } + + logger.trace("[{}] on node [{}] has allocation id [{}] of shard", shard, nodeShardState.getNode(), allocationId); + } else { + logger.trace("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", nodeShardState.storeException(), shard, nodeShardState.getNode(), allocationId); + allocationId = null; + } + + if (allocationId != null) { + if (lastActiveAllocationIds.contains(allocationId)) { + matchingNodes.add(node); + highestVersion = Math.max(highestVersion, nodeShardState.version()); + } else if (matchAnyShard) { + nonMatchingNodes.add(node); + highestVersion = Math.max(highestVersion, nodeShardState.version()); + } + } } - // this is an API allocation, ignore since we know there is no data... - if (shard.allocatedPostIndexCreate() == false) { - return false; - } + List nodes = new ArrayList<>(); + nodes.addAll(matchingNodes); + nodes.addAll(nonMatchingNodes); - return true; + if (logger.isTraceEnabled()) { + logger.trace("{} candidates for allocation: {}", shard, nodes.stream().map(DiscoveryNode::name).collect(Collectors.joining(", "))); + } + return new NodesAndVersions(nodes, nodes.size(), highestVersion); } - private boolean isEnoughAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesAndVersions nodesAndVersions) { + /** + * used by old version-based allocation + */ + private boolean isEnoughVersionBasedAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesAndVersions nodesAndVersions) { // check if the counts meets the minimum set int requiredAllocation = 1; // if we restore from a repository one copy is more then enough - if (shard.restoreSource() == null) { - try { - String initialShards = indexMetaData.getSettings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards)); - if ("quorum".equals(initialShards)) { - if (indexMetaData.getNumberOfReplicas() > 1) { - requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2) + 1; - } - } else if ("quorum-1".equals(initialShards) || "half".equals(initialShards)) { - if (indexMetaData.getNumberOfReplicas() > 2) { - requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2); - } - } else if ("one".equals(initialShards)) { - requiredAllocation = 1; - } else if ("full".equals(initialShards) || "all".equals(initialShards)) { - requiredAllocation = indexMetaData.getNumberOfReplicas() + 1; - } else if ("full-1".equals(initialShards) || "all-1".equals(initialShards)) { - if (indexMetaData.getNumberOfReplicas() > 1) { - requiredAllocation = indexMetaData.getNumberOfReplicas(); - } - } else { - requiredAllocation = Integer.parseInt(initialShards); + try { + String initialShards = indexMetaData.getSettings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards)); + if ("quorum".equals(initialShards)) { + if (indexMetaData.getNumberOfReplicas() > 1) { + requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2) + 1; } - } catch (Exception e) { - logger.warn("[{}][{}] failed to derived initial_shards from value {}, ignore allocation for {}", shard.index(), shard.id(), initialShards, shard); + } else if ("quorum-1".equals(initialShards) || "half".equals(initialShards)) { + if (indexMetaData.getNumberOfReplicas() > 2) { + requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2); + } + } else if ("one".equals(initialShards)) { + requiredAllocation = 1; + } else if ("full".equals(initialShards) || "all".equals(initialShards)) { + requiredAllocation = indexMetaData.getNumberOfReplicas() + 1; + } else if ("full-1".equals(initialShards) || "all-1".equals(initialShards)) { + if (indexMetaData.getNumberOfReplicas() > 1) { + requiredAllocation = indexMetaData.getNumberOfReplicas(); + } + } else { + requiredAllocation = Integer.parseInt(initialShards); } + } catch (Exception e) { + logger.warn("[{}][{}] failed to derived initial_shards from value {}, ignore allocation for {}", shard.index(), shard.id(), initialShards, shard); } return nodesAndVersions.allocationsFound >= requiredAllocation; } /** - * Based on the nodes and versions, build the list of yes/no/throttle nodes that the shard applies to. + * Split the list of nodes to lists of yes/no/throttle nodes based on allocation deciders */ - private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, NodesAndVersions nodesAndVersions) { + private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, List nodes) { List yesNodes = new ArrayList<>(); List throttledNodes = new ArrayList<>(); List noNodes = new ArrayList<>(); - for (DiscoveryNode discoNode : nodesAndVersions.nodes) { + for (DiscoveryNode discoNode : nodes) { RoutingNode node = allocation.routingNodes().node(discoNode.id()); if (node == null) { continue; @@ -184,9 +257,11 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { } /** - * Builds a list of nodes and version + * Builds a list of nodes. If matchAnyShard is set to false, only nodes that have the highest shard version + * are added to the list. Otherwise, any node that has a shard is added to the list, but entries with highest + * version are always at the front of the list. */ - NodesAndVersions buildNodesAndVersions(ShardRouting shard, boolean recoveryOnAnyNode, Set ignoreNodes, + NodesAndVersions buildNodesAndVersions(ShardRouting shard, boolean matchAnyShard, Set ignoreNodes, AsyncShardFetch.FetchResult shardState) { final Map nodesWithVersion = new HashMap<>(); int numberOfAllocationsFound = 0; @@ -208,20 +283,15 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { version = -1; } - if (recoveryOnAnyNode) { - numberOfAllocationsFound++; - if (version > highestVersion) { - highestVersion = version; - } - // We always put the node without clearing the map - nodesWithVersion.put(node, version); - } else if (version != -1) { + if (version != -1) { numberOfAllocationsFound++; // If we've found a new "best" candidate, clear the // current candidates and add it if (version > highestVersion) { highestVersion = version; - nodesWithVersion.clear(); + if (matchAnyShard == false) { + nodesWithVersion.clear(); + } nodesWithVersion.put(node, version); } else if (version == highestVersion) { // If the candidate is the same, add it to the @@ -258,9 +328,9 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { * Return {@code true} if the index is configured to allow shards to be * recovered on any node */ - private boolean recoverOnAnyNode(Settings idxSettings) { - return IndexMetaData.isOnSharedFilesystem(idxSettings) && - idxSettings.getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false); + private boolean recoverOnAnyNode(IndexSettings indexSettings) { + return indexSettings.isOnSharedFilesystem() + && indexSettings.getSettings().getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false); } protected abstract AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation); diff --git a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index c87f4d94755..0b5f2bc58d9 100644 --- a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -24,6 +24,8 @@ import com.carrotsearch.hppc.ObjectLongMap; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectLongCursor; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; @@ -56,6 +58,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { */ public boolean processExistingRecoveries(RoutingAllocation allocation) { boolean changed = false; + MetaData metaData = allocation.metaData(); for (RoutingNodes.RoutingNodesIterator nodes = allocation.routingNodes().nodes(); nodes.hasNext(); ) { nodes.next(); for (RoutingNodes.RoutingNodeIterator it = nodes.nodeShards(); it.hasNext(); ) { @@ -69,8 +72,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { if (shard.relocatingNodeId() != null) { continue; } + // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... - if (shard.allocatedPostIndexCreate() == false) { + IndexMetaData indexMetaData = metaData.index(shard.getIndex()); + if (shard.allocatedPostIndexCreate(indexMetaData) == false) { continue; } @@ -114,6 +119,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { boolean changed = false; final RoutingNodes routingNodes = allocation.routingNodes(); final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); + MetaData metaData = allocation.metaData(); while (unassignedIterator.hasNext()) { ShardRouting shard = unassignedIterator.next(); if (shard.primary()) { @@ -121,7 +127,8 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { } // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... - if (shard.allocatedPostIndexCreate() == false) { + IndexMetaData indexMetaData = metaData.index(shard.getIndex()); + if (shard.allocatedPostIndexCreate(indexMetaData) == false) { continue; } diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index d91b4bd8cdd..539ac924262 100644 --- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -139,7 +139,8 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction Store.tryOpenIndex(shardPath.resolveIndex()); } catch (Exception exception) { logger.trace("{} can't open index for shard [{}] in path [{}]", exception, shardId, shardStateMetaData, (shardPath != null) ? shardPath.resolveIndex() : ""); - return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, exception); + String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null; + return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, allocationId, exception); } } // old shard metadata doesn't have the actual index UUID so we need to check if the actual uuid in the metadata @@ -149,11 +150,12 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction logger.warn("{} shard state info found but indexUUID didn't match expected [{}] actual [{}]", shardId, indexUUID, shardStateMetaData.indexUUID); } else { logger.debug("{} shard state info found: [{}]", shardId, shardStateMetaData); - return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version); + String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null; + return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, allocationId); } } logger.trace("{} no local shard info found", shardId); - return new NodeGatewayStartedShards(clusterService.localNode(), -1); + return new NodeGatewayStartedShards(clusterService.localNode(), -1, null); } catch (Exception e) { throw new ElasticsearchException("failed to load started shards", e); } @@ -277,17 +279,19 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction public static class NodeGatewayStartedShards extends BaseNodeResponse { private long version = -1; + private String allocationId = null; private Throwable storeException = null; public NodeGatewayStartedShards() { } - public NodeGatewayStartedShards(DiscoveryNode node, long version) { - this(node, version, null); + public NodeGatewayStartedShards(DiscoveryNode node, long version, String allocationId) { + this(node, version, allocationId, null); } - public NodeGatewayStartedShards(DiscoveryNode node, long version, Throwable storeException) { + public NodeGatewayStartedShards(DiscoveryNode node, long version, String allocationId, Throwable storeException) { super(node); this.version = version; + this.allocationId = allocationId; this.storeException = storeException; } @@ -295,6 +299,10 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction return this.version; } + public String allocationId() { + return this.allocationId; + } + public Throwable storeException() { return this.storeException; } @@ -303,16 +311,17 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction public void readFrom(StreamInput in) throws IOException { super.readFrom(in); version = in.readLong(); + allocationId = in.readOptionalString(); if (in.readBoolean()) { storeException = in.readThrowable(); } - } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeLong(version); + out.writeOptionalString(allocationId); if (storeException != null) { out.writeBoolean(true); out.writeThrowable(storeException); diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index c0bf9244673..03c0611d172 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1099,7 +1099,8 @@ public class IndexShard extends AbstractIndexShardComponent { // we are the first primary, recover from the gateway // if its post api allocation, the index should exists assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard"; - final boolean shouldExist = shardRouting.allocatedPostIndexCreate(); + boolean shouldExist = shardRouting.allocatedPostIndexCreate(idxSettings.getIndexMetaData()); + StoreRecovery storeRecovery = new StoreRecovery(shardId, logger); return storeRecovery.recoverFromStore(this, shouldExist, localNode); } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index bb154218215..3ce9e99f4dc 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -285,4 +285,11 @@ public class CreateIndexIT extends ESIntegTestCase { assertThat(messages.toString(), containsString("mapper [text] is used by multiple types")); } } + + public void testRestartIndexCreationAfterFullClusterRestart() throws Exception { + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "none")).get(); + client().admin().indices().prepareCreate("test").setSettings(indexSettings()).get(); + internalCluster().fullRestart(); + ensureGreen("test"); + } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index ffb9e630b70..ebd32ccb482 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -87,6 +87,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase { for (ObjectCursor> shardStoreStatuses : shardStores.values()) { for (IndicesShardStoresResponse.StoreStatus storeStatus : shardStoreStatuses.value) { assertThat(storeStatus.getVersion(), greaterThan(-1l)); + assertThat(storeStatus.getAllocationId(), notNullValue()); assertThat(storeStatus.getNode(), notNullValue()); assertThat(storeStatus.getStoreException(), nullValue()); } @@ -108,7 +109,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase { assertThat(shardStoresStatuses.size(), equalTo(unassignedShards.size())); for (IntObjectCursor> storesStatus : shardStoresStatuses) { assertThat("must report for one store", storesStatus.value.size(), equalTo(1)); - assertThat("reported store should be primary", storesStatus.value.get(0).getAllocation(), equalTo(IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY)); + assertThat("reported store should be primary", storesStatus.value.get(0).getAllocationStatus(), equalTo(IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY)); } logger.info("--> enable allocation"); enableAllocation(index); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java index cf197a27faf..6d1159c82a5 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.admin.indices.shards; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -44,9 +45,9 @@ public class IndicesShardStoreResponseTests extends ESTestCase { DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT); DiscoveryNode node2 = new DiscoveryNode("node2", DummyTransportAddress.INSTANCE, Version.CURRENT); List storeStatusList = new ArrayList<>(); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, 2, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, null)); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED, new IOException("corrupted"))); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, 2, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, new IOException("corrupted"))); storeStatuses.put(0, storeStatusList); storeStatuses.put(1, storeStatusList); ImmutableOpenIntMap> storesMap = storeStatuses.build(); @@ -89,8 +90,10 @@ public class IndicesShardStoreResponseTests extends ESTestCase { IndicesShardStoresResponse.StoreStatus storeStatus = storeStatusList.get(i); assertThat(storeInfo.containsKey("version"), equalTo(true)); assertThat(((int) storeInfo.get("version")), equalTo(((int) storeStatus.getVersion()))); + assertThat(storeInfo.containsKey("allocation_id"), equalTo(true)); + assertThat(((String) storeInfo.get("allocation_id")), equalTo((storeStatus.getAllocationId()))); assertThat(storeInfo.containsKey("allocation"), equalTo(true)); - assertThat(((String) storeInfo.get("allocation")), equalTo(storeStatus.getAllocation().value())); + assertThat(((String) storeInfo.get("allocation")), equalTo(storeStatus.getAllocationStatus().value())); assertThat(storeInfo.containsKey(storeStatus.getNode().id()), equalTo(true)); if (storeStatus.getStoreException() != null) { assertThat(storeInfo.containsKey("store_exception"), equalTo(true)); @@ -104,11 +107,11 @@ public class IndicesShardStoreResponseTests extends ESTestCase { public void testStoreStatusOrdering() throws Exception { DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT); List orderedStoreStatuses = new ArrayList<>(); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 2, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, new IOException("corrupted"))); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 2, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted"))); List storeStatuses = new ArrayList<>(orderedStoreStatuses); Collections.shuffle(storeStatuses, random()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java new file mode 100644 index 00000000000..dcd35303b75 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -0,0 +1,103 @@ +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.disruption.NetworkDisconnectPartition; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +@ESIntegTestCase.SuppressLocalMode +public class PrimaryAllocationIT extends ESIntegTestCase { + + public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception { + logger.info("--> starting 3 nodes, 1 master, 2 data"); + String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); + internalCluster().startDataOnlyNodesAsync(2).get(); + + assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() + .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get()); + ensureGreen(); + logger.info("--> indexing..."); + client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + refresh(); + + ClusterState state = client().admin().cluster().prepareState().all().get().getState(); + List shards = state.routingTable().allShards("test"); + assertThat(shards.size(), equalTo(2)); + + final String primaryNode; + final String replicaNode; + if (shards.get(0).primary()) { + primaryNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().name(); + replicaNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().name(); + } else { + primaryNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().name(); + replicaNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().name(); + } + + NetworkDisconnectPartition partition = new NetworkDisconnectPartition( + new HashSet<>(Arrays.asList(master, replicaNode)), Collections.singleton(primaryNode), random()); + internalCluster().setDisruptionScheme(partition); + logger.info("--> partitioning node with primary shard from rest of cluster"); + partition.startDisrupting(); + + ensureStableCluster(2, master); + + logger.info("--> index a document into previous replica shard (that is now primary)"); + client(replicaNode).prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + + logger.info("--> shut down node that has new acknowledged document"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); + + ensureStableCluster(1, master); + + partition.stopDisrupting(); + + logger.info("--> waiting for node with old primary shard to rejoin the cluster"); + ensureStableCluster(2, master); + + logger.info("--> check that old primary shard does not get promoted to primary again"); + // kick reroute and wait for all shard states to be fetched + client(master).admin().cluster().prepareReroute().get(); + assertBusy(() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetch(), equalTo(0))); + // kick reroute a second time and check that all shards are unassigned + assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), equalTo(2)); + + logger.info("--> starting node that reuses data folder with the up-to-date primary shard"); + internalCluster().startDataOnlyNode(Settings.EMPTY); + + logger.info("--> check that the up-to-date primary shard gets promoted and that documents are available"); + ensureYellow("test"); + assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2l); + } + + public void testNotWaitForQuorumCopies() throws Exception { + logger.info("--> starting 3 nodes"); + internalCluster().startNodesAsync(3).get(); + logger.info("--> creating index with 1 primary and 2 replicas"); + assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() + .put("index.number_of_shards", randomIntBetween(1, 3)).put("index.number_of_replicas", 2)).get()); + ensureGreen("test"); + client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + logger.info("--> removing 2 nodes from cluster"); + internalCluster().stopRandomDataNode(); + internalCluster().stopRandomDataNode(); + internalCluster().fullRestart(); + logger.info("--> checking that index still gets allocated with only 1 shard copy being available"); + ensureYellow("test"); + assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 1l); + } +} diff --git a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index 73cbb51faed..193985a1c68 100644 --- a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -36,7 +36,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESAllocationTestCase; import org.junit.Before; -import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -59,25 +59,29 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { this.testAllocator = new TestAllocator(); } - /** - * Verifies that the canProcess method of primary allocation behaves correctly - * and processes only the applicable shard. - */ - public void testNoProcessReplica() { - ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, false, ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)); - assertThat(testAllocator.needToFindPrimaryCopy(shard), equalTo(false)); - } - - public void testNoProcessPrimayNotAllcoatedBefore() { - ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, true, ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); - assertThat(testAllocator.needToFindPrimaryCopy(shard), equalTo(false)); + public void testNoProcessPrimaryNotAllocatedBefore() { + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), randomBoolean(), Version.CURRENT); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), true, Version.V_2_1_0); + } + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().iterator().next().shardId(), equalTo(shardId)); } /** * Tests that when async fetch returns that there is no data, the shard will not be allocated. */ public void testNoAsyncFetchData() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "allocId"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_0); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); @@ -85,11 +89,17 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { } /** - * Tests when the node returns that no data was found for it (-1), it will be moved to ignore unassigned. + * Tests when the node returns that no data was found for it (-1 for version and null for allocation id), + * it will be moved to ignore unassigned. */ public void testNoAllocationFound() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); - testAllocator.addData(node1, -1); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "allocId"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_0); + } + testAllocator.addData(node1, -1, null); boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); @@ -97,11 +107,43 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { } /** - * Tests when the node returns that no data was found for it (-1), it will be moved to ignore unassigned. + * Tests when the node returns data with a shard allocation id that does not match active allocation ids, it will be moved to ignore unassigned. + */ + public void testNoMatchingAllocationIdFound() { + RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "id2"); + testAllocator.addData(node1, 1, "id1"); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); + } + + /** + * Tests that when there is a node to allocate the shard to, and there are no active allocation ids, it will be allocated to it. + * This is the case when we have old shards from pre-3.0 days. + */ + public void testNoActiveAllocationIds() { + RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_1); + testAllocator.addData(node1, 1, null); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.id())); + } + + /** + * Tests when the node returns that no data was found for it, it will be moved to ignore unassigned. */ public void testStoreException() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); - testAllocator.addData(node1, 3, new CorruptIndexException("test", "test")); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1"); + testAllocator.addData(node1, 1, "allocId1", new CorruptIndexException("test", "test")); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_1); + testAllocator.addData(node1, 3, null, new CorruptIndexException("test", "test")); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); @@ -112,8 +154,14 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { * Tests that when there is a node to allocate the shard to, it will be allocated to it. */ public void testFoundAllocationAndAllocating() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); - testAllocator.addData(node1, 10); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1"); + testAllocator.addData(node1, 1, "allocId1"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_2_0); + testAllocator.addData(node1, 3, null); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -126,8 +174,14 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { * it will be moved to ignore unassigned until it can be allocated to. */ public void testFoundAllocationButThrottlingDecider() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders()); - testAllocator.addData(node1, 10); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1"); + testAllocator.addData(node1, 1, "allocId1"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), false, Version.V_2_2_0); + testAllocator.addData(node1, 3, null); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); @@ -139,8 +193,14 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { * force the allocation to it. */ public void testFoundAllocationButNoDecider() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders()); - testAllocator.addData(node1, 10); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1"); + testAllocator.addData(node1, 1, "allocId1"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), false, Version.V_2_0_0); + testAllocator.addData(node1, 3, null); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -149,11 +209,11 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { } /** - * Tests that the highest version node is chosed for allocation. + * Tests that the highest version node is chosen for allocation. */ - public void testAllocateToTheHighestVersion() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); - testAllocator.addData(node1, 10).addData(node2, 12); + public void testAllocateToTheHighestVersionOnLegacyIndex() { + RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_0_0); + testAllocator.addData(node1, 10, null).addData(node2, 12, null); boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -162,35 +222,150 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { } /** - * Tests that when restoring from snapshot, even if we didn't find any node to allocate on, the shard - * will remain in the unassigned list to be allocated later. + * Tests that when restoring from a snapshot and we find a node with a shard copy and allocation + * deciders say yes, we allocate to that node. */ - public void testRestoreIgnoresNoNodesToAllocate() { - MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) - .build(); - RoutingTable routingTable = RoutingTable.builder() - .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new SnapshotId("test", "test"), Version.CURRENT, shardId.getIndex())) - .build(); - ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) - .metaData(metaData) - .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); - RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), state.getRoutingNodes(), state.nodes(), null, System.nanoTime()); + public void testRestore() { + RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + } - testAllocator.addData(node1, -1).addData(node2, -1); + /** + * Tests that when restoring from a snapshot and we find a node with a shard copy and allocation + * deciders say throttle, we add it to ignored shards. + */ + public void testRestoreThrottle() { + RoutingAllocation allocation = getRestoreRoutingAllocation(throttleAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false)); + } + + /** + * Tests that when restoring from a snapshot and we find a node with a shard copy but allocation + * deciders say no, we still allocate to that node. + */ + public void testRestoreForcesAllocateIfShardAvailable() { + RoutingAllocation allocation = getRestoreRoutingAllocation(noAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "some allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + } + + /** + * Tests that when restoring from a snapshot and we don't find a node with a shard copy, the shard will remain in + * the unassigned list to be allocated later. + */ + public void testRestoreDoesNotAssignIfNoShardAvailable() { + RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders()); + testAllocator.addData(node1, -1, null); boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().size(), equalTo(1)); + } + + private RoutingAllocation getRestoreRoutingAllocation(AllocationDeciders allocationDeciders) { + Version version = randomFrom(Version.CURRENT, Version.V_2_0_0); + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(version)).numberOfShards(1).numberOfReplicas(0) + .putActiveAllocationIds(0, version == Version.CURRENT ? new HashSet<>(Arrays.asList("allocId")) : Collections.emptySet())) + .build(); + + RoutingTable routingTable = RoutingTable.builder() + .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new SnapshotId("test", "test"), version, shardId.getIndex())) + .build(); + ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) + .metaData(metaData) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); + } + + /** + * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy and allocation + * deciders say yes, we allocate to that node. + */ + public void testRecoverOnAnyNode() { + RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + } + + /** + * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy and allocation + * deciders say throttle, we add it to ignored shards. + */ + public void testRecoverOnAnyNodeThrottle() { + RoutingAllocation allocation = getRestoreRoutingAllocation(throttleAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false)); + } + + /** + * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy but allocation + * deciders say no, we still allocate to that node. + */ + public void testRecoverOnAnyNodeForcesAllocateIfShardAvailable() { + RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(noAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + } + + /** + * Tests that when recovering using "recover_on_any_node" and we don't find a node with a shard copy we let + * BalancedShardAllocator assign the shard + */ + public void testRecoverOnAnyNodeDoesNotAssignIfNoShardAvailable() { + RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders()); + testAllocator.addData(node1, -1, null); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().size(), equalTo(1)); + } + + private RoutingAllocation getRecoverOnAnyNodeRoutingAllocation(AllocationDeciders allocationDeciders) { + Version version = randomFrom(Version.CURRENT, Version.V_2_0_0); + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(version) + .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) + .put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true)) + .numberOfShards(1).numberOfReplicas(0).putActiveAllocationIds(0, version == Version.CURRENT ? new HashSet<>(Arrays.asList("allocId")) : Collections.emptySet())) + .build(); + + RoutingTable routingTable = RoutingTable.builder() + .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new SnapshotId("test", "test"), Version.CURRENT, shardId.getIndex())) + .build(); + ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) + .metaData(metaData) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); } /** * Tests that only when enough copies of the shard exists we are going to allocate it. This test * verifies that with same version (1), and quorum allocation. */ - public void testEnoughCopiesFoundForAllocation() { + public void testEnoughCopiesFoundForAllocationOnLegacyIndex() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.V_2_0_0)).numberOfShards(1).numberOfReplicas(2)) .build(); RoutingTable routingTable = RoutingTable.builder() .addAsRecovery(metaData.index(shardId.getIndex())) @@ -207,7 +382,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas - testAllocator.addData(node1, 1); + testAllocator.addData(node1, 1, null); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); @@ -215,7 +390,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas - testAllocator.addData(node2, 1); + testAllocator.addData(node2, 1, null); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); @@ -229,9 +404,9 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { * Tests that only when enough copies of the shard exists we are going to allocate it. This test * verifies that even with different version, we treat different versions as a copy, and count them. */ - public void testEnoughCopiesFoundForAllocationWithDifferentVersion() { + public void testEnoughCopiesFoundForAllocationOnLegacyIndexWithDifferentVersion() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.V_2_0_0)).numberOfShards(1).numberOfReplicas(2)) .build(); RoutingTable routingTable = RoutingTable.builder() .addAsRecovery(metaData.index(shardId.getIndex())) @@ -248,7 +423,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas - testAllocator.addData(node1, 1); + testAllocator.addData(node1, 1, null); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); @@ -256,7 +431,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas - testAllocator.addData(node2, 2); + testAllocator.addData(node2, 2, null); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); @@ -266,67 +441,20 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id())); } - public void testAllocationOnAnyNodeWithSharedFs() { - ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, false, - ShardRoutingState.UNASSIGNED, 0, - new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)); - - Map data = new HashMap<>(); - data.put(node1, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node1, 1)); - data.put(node2, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node2, 5)); - data.put(node3, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node3, -1)); - AsyncShardFetch.FetchResult fetches = - new AsyncShardFetch.FetchResult(shardId, data, new HashSet<>(), new HashSet<>()); - - PrimaryShardAllocator.NodesAndVersions nAndV = testAllocator.buildNodesAndVersions(shard, false, new HashSet(), fetches); - assertThat(nAndV.allocationsFound, equalTo(2)); - assertThat(nAndV.highestVersion, equalTo(5L)); - assertThat(nAndV.nodes, contains(node2)); - - nAndV = testAllocator.buildNodesAndVersions(shard, true, new HashSet(), fetches); - assertThat(nAndV.allocationsFound, equalTo(3)); - assertThat(nAndV.highestVersion, equalTo(5L)); - // All three nodes are potential candidates because shards can be recovered on any node - assertThat(nAndV.nodes, contains(node2, node1, node3)); - } - - public void testAllocationOnAnyNodeShouldPutNodesWithExceptionsLast() { - ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, false, - ShardRoutingState.UNASSIGNED, 0, - new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)); - - Map data = new HashMap<>(); - data.put(node1, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node1, 1)); - data.put(node2, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node2, 1)); - data.put(node3, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node3, 1, new IOException("I failed to open"))); - HashSet ignoredNodes = new HashSet<>(); - ignoredNodes.add(node2.id()); - AsyncShardFetch.FetchResult fetches = - new AsyncShardFetch.FetchResult(shardId, data, new HashSet<>(), ignoredNodes); - - PrimaryShardAllocator.NodesAndVersions nAndV = testAllocator.buildNodesAndVersions(shard, false, ignoredNodes, fetches); - assertThat(nAndV.allocationsFound, equalTo(1)); - assertThat(nAndV.highestVersion, equalTo(1L)); - assertThat(nAndV.nodes, contains(node1)); - - nAndV = testAllocator.buildNodesAndVersions(shard, true, ignoredNodes, fetches); - assertThat(nAndV.allocationsFound, equalTo(2)); - assertThat(nAndV.highestVersion, equalTo(1L)); - // node3 should be last here - assertThat(nAndV.nodes.size(), equalTo(2)); - assertThat(nAndV.nodes, contains(node1, node3)); - } - - private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders) { + private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders, boolean asNew, Version version, String... activeAllocationIds) { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) - .build(); - RoutingTable routingTable = RoutingTable.builder() - .addAsRecovery(metaData.index(shardId.getIndex())) - .build(); + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(version)) + .numberOfShards(1).numberOfReplicas(0).putActiveAllocationIds(0, new HashSet<>(Arrays.asList(activeAllocationIds)))) + .build(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + if (asNew) { + routingTableBuilder.addAsNew(metaData.index(shardId.getIndex())); + } else { + routingTableBuilder.addAsRecovery(metaData.index(shardId.getIndex())); + } ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) .metaData(metaData) - .routingTable(routingTable) + .routingTable(routingTableBuilder.build()) .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); return new RoutingAllocation(deciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); } @@ -344,15 +472,15 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { return this; } - public TestAllocator addData(DiscoveryNode node, long version) { - return addData(node, version, null); + public TestAllocator addData(DiscoveryNode node, long version, String allocationId) { + return addData(node, version, allocationId, null); } - public TestAllocator addData(DiscoveryNode node, long version, @Nullable Throwable storeException) { + public TestAllocator addData(DiscoveryNode node, long version, String allocationId, @Nullable Throwable storeException) { if (data == null) { data = new HashMap<>(); } - data.put(node, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node, version, storeException)); + data.put(node, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node, version, allocationId, storeException)); return this; } diff --git a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java index edde1720474..69c518eb9c6 100644 --- a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java @@ -20,10 +20,10 @@ package org.elasticsearch.gateway; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -32,14 +32,10 @@ import org.elasticsearch.test.InternalTestCluster.RestartCallback; import java.util.concurrent.TimeUnit; import static org.elasticsearch.client.Requests.clusterHealthRequest; -import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; /** * @@ -51,72 +47,12 @@ public class QuorumGatewayIT extends ESIntegTestCase { return 2; } - public void testChangeInitialShardsRecovery() throws Exception { - logger.info("--> starting 3 nodes"); - final String[] nodes = internalCluster().startNodesAsync(3).get().toArray(new String[0]); - - createIndex("test"); - ensureGreen(); - NumShards test = getNumShards("test"); - - logger.info("--> indexing..."); - client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); - //We don't check for failures in the flush response: if we do we might get the following: - // FlushNotAllowedEngineException[[test][1] recovery is in progress, flush [COMMIT_TRANSLOG] is not allowed] - flush(); - client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get(); - refresh(); - - for (int i = 0; i < 10; i++) { - assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2l); - } - - final String nodeToRemove = nodes[between(0,2)]; - logger.info("--> restarting 1 nodes -- kill 2"); - internalCluster().fullRestart(new RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - return Settings.EMPTY; - } - - @Override - public boolean doRestart(String nodeName) { - return nodeToRemove.equals(nodeName); - } - }); - if (randomBoolean()) { - Thread.sleep(between(1, 400)); // wait a bit and give is a chance to try to allocate - } - ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForNodes("1")).actionGet(); - assertThat(clusterHealth.isTimedOut(), equalTo(false)); - assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.RED)); // nothing allocated yet - assertTrue(awaitBusy(() -> { - ClusterStateResponse clusterStateResponse = internalCluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get(); - return clusterStateResponse.getState() != null && clusterStateResponse.getState().routingTable().index("test") != null; - })); // wait until we get a cluster state - could be null if we quick enough. - final ClusterStateResponse clusterStateResponse = internalCluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get(); - assertThat(clusterStateResponse.getState(), notNullValue()); - assertThat(clusterStateResponse.getState().routingTable().index("test"), notNullValue()); - assertThat(clusterStateResponse.getState().routingTable().index("test").allPrimaryShardsActive(), is(false)); - logger.info("--> change the recovery.initial_shards setting, and make sure its recovered"); - client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("recovery.initial_shards", 1)).get(); - - logger.info("--> running cluster_health (wait for the shards to startup), primaries only since we only have 1 node"); - clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(test.numPrimaries)).actionGet(); - logger.info("--> done cluster_health, status " + clusterHealth.getStatus()); - assertThat(clusterHealth.isTimedOut(), equalTo(false)); - assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); - - for (int i = 0; i < 10; i++) { - assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2l); - } - } - public void testQuorumRecovery() throws Exception { logger.info("--> starting 3 nodes"); - internalCluster().startNodesAsync(3).get(); // we are shutting down nodes - make sure we don't have 2 clusters if we test network - setMinimumMasterNodes(2); + internalCluster().startNodesAsync(3, + Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2).build()).get(); + createIndex("test"); ensureGreen(); diff --git a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java index 9a053b36527..0818999ea7e 100644 --- a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java @@ -43,9 +43,11 @@ import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; import org.elasticsearch.test.ESAllocationTestCase; import org.junit.Before; +import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; @@ -275,13 +277,16 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { } private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders, Settings settings, UnassignedInfo.Reason reason) { + ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT).put(settings)).numberOfShards(1).numberOfReplicas(0)) - .build(); + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT).put(settings)) + .numberOfShards(1).numberOfReplicas(1) + .putActiveAllocationIds(0, new HashSet<>(Arrays.asList(primaryShard.allocationId().getId())))) + .build(); RoutingTable routingTable = RoutingTable.builder() .add(IndexRoutingTable.builder(shardId.getIndex()) .addIndexShard(new IndexShardRoutingTable.Builder(shardId) - .addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10)) + .addShard(primaryShard) .addShard(ShardRouting.newUnassigned(shardId.getIndex(), shardId.getId(), null, false, new UnassignedInfo(reason, null))) .build()) ) @@ -294,13 +299,16 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { } private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) { + ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(1) + .putActiveAllocationIds(0, new HashSet<>(Arrays.asList(primaryShard.allocationId().getId())))) .build(); RoutingTable routingTable = RoutingTable.builder() .add(IndexRoutingTable.builder(shardId.getIndex()) .addIndexShard(new IndexShardRoutingTable.Builder(shardId) - .addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10)) + .addShard(primaryShard) .addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node2.id(), null, null, false, ShardRoutingState.INITIALIZING, 10, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null))) .build()) ) diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index b5f33afa94c..8dce7f1f954 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -133,7 +133,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ShardId id = new ShardId("foo", 1); long version = between(1, Integer.MAX_VALUE / 2); boolean primary = randomBoolean(); - AllocationId allocationId = randomAllocationId(); + AllocationId allocationId = randomBoolean() ? null : randomAllocationId(); ShardStateMetaData state1 = new ShardStateMetaData(version, primary, "foo", allocationId); write(state1, env.availableShardPaths(id)); ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(id)); @@ -288,7 +288,8 @@ public class IndexShardTests extends ESSingleNodeTestCase { } public void testShardStateMetaHashCodeEquals() { - ShardStateMetaData meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), randomAllocationId()); + AllocationId allocationId = randomBoolean() ? null : randomAllocationId(); + ShardStateMetaData meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId); assertEquals(meta, new ShardStateMetaData(meta.version, meta.primary, meta.indexUUID, meta.allocationId)); assertEquals(meta.hashCode(), new ShardStateMetaData(meta.version, meta.primary, meta.indexUUID, meta.allocationId).hashCode()); @@ -299,7 +300,8 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertFalse(meta.equals(new ShardStateMetaData(meta.version, !meta.primary, meta.indexUUID + "foo", randomAllocationId()))); Set hashCodes = new HashSet<>(); for (int i = 0; i < 30; i++) { // just a sanity check that we impl hashcode - meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), randomAllocationId()); + allocationId = randomBoolean() ? null : randomAllocationId(); + meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId); hashCodes.add(meta.hashCode()); } assertTrue("more than one unique hashcode expected but got: " + hashCodes.size(), hashCodes.size() > 1); diff --git a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java index a5cfa816455..af9cfeb94c1 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java @@ -97,7 +97,7 @@ public class SimpleIndexStateIT extends ESIntegTestCase { client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); } - public void testFastCloseAfterCreateDoesNotClose() { + public void testFastCloseAfterCreateContinuesCreateAfterOpen() { logger.info("--> creating test index that cannot be allocated"); client().admin().indices().prepareCreate("test").setSettings(Settings.settingsBuilder() .put("index.routing.allocation.include.tag", "no_such_node").build()).get(); @@ -106,17 +106,14 @@ public class SimpleIndexStateIT extends ESIntegTestCase { assertThat(health.isTimedOut(), equalTo(false)); assertThat(health.getStatus(), equalTo(ClusterHealthStatus.RED)); - try { - client().admin().indices().prepareClose("test").get(); - fail("Exception should have been thrown"); - } catch(IndexPrimaryShardNotAllocatedException e) { - // expected - } + client().admin().indices().prepareClose("test").get(); logger.info("--> updating test index settings to allow allocation"); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.settingsBuilder() .put("index.routing.allocation.include.tag", "").build()).get(); + client().admin().indices().prepareOpen("test").get(); + logger.info("--> waiting for green status"); ensureGreen(); diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index a2b73a44842..56e9d4ddb91 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -129,19 +129,6 @@ specific index module: experimental[] Disables the purge of <> on the current index. -[[index.recovery.initial_shards]]`index.recovery.initial_shards`:: -+ --- -A primary shard is only recovered only if there are enough nodes available to -allocate sufficient replicas to form a quorum. It can be set to: - - * `quorum` (default) - * `quorum-1` (or `half`) - * `full` - * `full-1`. - * Number values are also supported, e.g. `1`. --- - [float] === Settings in other index modules diff --git a/docs/reference/indices/shadow-replicas.asciidoc b/docs/reference/indices/shadow-replicas.asciidoc index da74a651242..0d589adb64a 100644 --- a/docs/reference/indices/shadow-replicas.asciidoc +++ b/docs/reference/indices/shadow-replicas.asciidoc @@ -104,9 +104,8 @@ settings API: `index.shared_filesystem.recover_on_any_node`:: Boolean value indicating whether the primary shards for the index should be - allowed to recover on any node in the cluster, regardless of the number of - replicas or whether the node has previously had the shard allocated to it - before. Defaults to `false`. + allowed to recover on any node in the cluster. If a node holding a copy of + the shard is found, recovery prefers that node. Defaults to `false`. === Node level settings related to shadow replicas diff --git a/docs/reference/indices/shard-stores.asciidoc b/docs/reference/indices/shard-stores.asciidoc index d4d385bd6dc..19acbc44d3f 100644 --- a/docs/reference/indices/shard-stores.asciidoc +++ b/docs/reference/indices/shard-stores.asciidoc @@ -52,8 +52,9 @@ The shard stores information is grouped by indices and shard ids. } }, "version": 4, <4> + "allocation_id": "2iNySv_OQVePRX-yaRH_lQ", <5> "allocation" : "primary" | "replica" | "unused", <6> - "store_exception": ... <5> + "store_exception": ... <7> }, ... ] @@ -66,7 +67,8 @@ The shard stores information is grouped by indices and shard ids. <3> The node information that hosts a copy of the store, the key is the unique node id. <4> The version of the store copy -<5> The status of the store copy, whether it is used as a +<5> The allocation id of the store copy +<6> The status of the store copy, whether it is used as a primary, replica or not used at all -<6> Any exception encountered while opening the shard index or +<7> Any exception encountered while opening the shard index or from earlier engine failure diff --git a/docs/reference/migration/migrate_3_0.asciidoc b/docs/reference/migration/migrate_3_0.asciidoc index 6588f22a85a..0179e289b99 100644 --- a/docs/reference/migration/migrate_3_0.asciidoc +++ b/docs/reference/migration/migrate_3_0.asciidoc @@ -14,6 +14,7 @@ your application to Elasticsearch 3.0. * <> * <> * <> +* <> [[breaking_30_search_changes]] === Search changes @@ -515,3 +516,23 @@ from `OsStats.Cpu#getPercent`. Only stored fields are retrievable with this option. The fields option won't be able to load non stored fields from _source anymore. +[[breaking_30_allocation]] +=== Primary shard allocation + +Previously, primary shards were only assigned if a quorum of shard copies were found (configurable using +`index.recovery.initial_shards`, now deprecated). In case where a primary had only a single replica, quorum was defined +to be a single shard. This meant that any shard copy of an index with replication factor 1 could become primary, even it +was a stale copy of the data on disk. This is now fixed by using allocation IDs. + +Allocation IDs assign unique identifiers to shard copies. This allows the cluster to differentiate between multiple +copies of the same data and track which shards have been active, so that after a cluster restart, shard copies +containing only the most recent data can become primaries. + +==== `index.shared_filesystem.recover_on_any_node` changes + +The behavior of `index.shared_filesystem.recover_on_any_node = true` has been changed. Previously, in the case where no +shard copies could be found, an arbitrary node was chosen by potentially ignoring allocation deciders. Now, we take +balancing into account but don't assign the shard if the allocation deciders are not satisfied. The behavior has also changed +in the case where shard copies can be found. Previously, a node not holding the shard copy was chosen if none of the nodes +holding shard copies were satisfying the allocation deciders. Now, the shard will be assigned to a node having a shard copy, +even if none of the nodes holding a shard copy satisfy the allocation deciders. diff --git a/docs/reference/modules/cluster/shards_allocation.asciidoc b/docs/reference/modules/cluster/shards_allocation.asciidoc index 1daf131106d..b8073927a0f 100644 --- a/docs/reference/modules/cluster/shards_allocation.asciidoc +++ b/docs/reference/modules/cluster/shards_allocation.asciidoc @@ -22,9 +22,8 @@ Enable or disable allocation for specific kinds of shards: This setting does not affect the recovery of local primary shards when restarting a node. A restarted node that has a copy of an unassigned primary -shard will recover that primary immediately, assuming that the -<> setting is -satisfied. +shard will recover that primary immediately, assuming that its allocation id matches +one of the active allocation ids in the cluster state. -- diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java index e82823ae997..e6a25a3956a 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; @@ -230,7 +231,8 @@ public abstract class ESAllocationTestCase extends ESTestCase { boolean changed = false; while (unassignedIterator.hasNext()) { ShardRouting shard = unassignedIterator.next(); - if (shard.primary() || shard.allocatedPostIndexCreate() == false) { + IndexMetaData indexMetaData = allocation.metaData().index(shard.getIndex()); + if (shard.primary() || shard.allocatedPostIndexCreate(indexMetaData) == false) { continue; } changed |= replicaShardAllocator.ignoreUnassignedIfDelayed(unassignedIterator, shard); From f19056ec209538ecdd23b382c2f3f41d9b588d3a Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 17 Dec 2015 16:25:36 +0100 Subject: [PATCH 122/167] Fix documentation for running REST tests using Gradle --- TESTING.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 569c16b0747..fef23d0cd3d 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -290,14 +290,14 @@ The REST tests are run automatically when executing the "gradle check" command. REST tests use the following command: --------------------------------------------------------------------------- -gradle :distribution:tar:integTest \ +gradle :distribution:integ-test-zip:integTest \ -Dtests.class=org.elasticsearch.test.rest.RestIT --------------------------------------------------------------------------- A specific test case can be run with --------------------------------------------------------------------------- -gradle :distribution:tar:integTest \ +gradle :distribution:integ-test-zip:integTest \ -Dtests.class=org.elasticsearch.test.rest.RestIT \ -Dtests.method="test {p0=cat.shards/10_basic/Help}" --------------------------------------------------------------------------- From 91eed30a143bd5f40c72f5fef27ce46311d0c30e Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Thu, 17 Dec 2015 15:27:41 +0100 Subject: [PATCH 123/167] BulkProcessor backs off exponentially by default With this commit we change the default behavior of BulkProcessor from not backing off when getting EsRejectedExecutionException to backing off exponentially. --- .../java/org/elasticsearch/action/bulk/BulkProcessor.java | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 316ec7a548e..af5af80ac2f 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -78,7 +78,7 @@ public class BulkProcessor implements Closeable { private int bulkActions = 1000; private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB); private TimeValue flushInterval = null; - private BackoffPolicy backoffPolicy = BackoffPolicy.noBackoff(); + private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff(); /** * Creates a builder of bulk processor with the client to use and the listener that will be used @@ -140,7 +140,9 @@ public class BulkProcessor implements Closeable { * Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally * in case they have failed due to resource constraints (i.e. a thread pool was full). * - * The default is to not back off, i.e. failing immediately. + * The default is to back off exponentially. + * + * @see org.elasticsearch.action.bulk.BackoffPolicy#exponentialBackoff() */ public Builder setBackoffPolicy(BackoffPolicy backoffPolicy) { if (backoffPolicy == null) { @@ -162,7 +164,7 @@ public class BulkProcessor implements Closeable { if (client == null) { throw new NullPointerException("The client you specified while building a BulkProcessor is null"); } - + return new Builder(client, listener); } From 9dfc9397d464d1334a9c4569d904908fcbcab150 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Thu, 17 Dec 2015 16:59:05 +0100 Subject: [PATCH 124/167] Temporarily deactivate spuriously failing test --- core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index 5728f7b54bc..636cea5c8aa 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -107,6 +107,7 @@ public class RetryTests extends ESTestCase { listener.assertOnFailureNeverCalled(); } + @AwaitsFix(bugUrl = "spuriously fails on Jenkins. Investigation ongoing.") public void testAsyncRetryFailsAfterBacksOff() throws Exception { BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL - 1); AssertingListener listener = new AssertingListener(); From 517fc113fe4e4c96e9a96d16f8a5b8800c2f727b Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 17 Dec 2015 17:01:29 +0100 Subject: [PATCH 125/167] Fix spans extraction to not also include individual terms. This is a bug that I introduced in #13239 while thinking that the differences were due to changes in Lucene: extractUnknownQuery is also called when span extraction already succeeded, so we should only fall back to Weight.extractTerms if no spans have been extracted yet. Close #15291 --- .../search/highlight/CustomQueryScorer.java | 2 +- .../search/highlight/HighlighterSearchIT.java | 6 +-- .../highlight/PlainHighlighterTests.java | 42 +++++++++++++++++++ 3 files changed, 46 insertions(+), 4 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/search/highlight/PlainHighlighterTests.java diff --git a/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java b/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java index 9e86edef47d..8ad24b5cb19 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java @@ -82,7 +82,7 @@ public final class CustomQueryScorer extends QueryScorer { } else if (query instanceof FiltersFunctionScoreQuery) { query = ((FiltersFunctionScoreQuery) query).getSubQuery(); extract(query, query.getBoost(), terms); - } else { + } else if (terms.isEmpty()) { extractWeightedTerms(terms, query, query.getBoost()); } } diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java index 63378baa721..4063ec81a28 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java @@ -1557,7 +1557,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { .fragmenter("simple"))).get(); assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); + assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQuery.Type.PHRASE)) @@ -1566,7 +1566,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { .fragmenter("span"))).get(); assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); + assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); assertFailures(client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQuery.Type.PHRASE)) @@ -2062,7 +2062,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy quick dog")); + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy quick dog")); } public void testPostingsHighlighterMultipleFields() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/highlight/PlainHighlighterTests.java b/core/src/test/java/org/elasticsearch/search/highlight/PlainHighlighterTests.java new file mode 100644 index 00000000000..5156209d6f1 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/highlight/PlainHighlighterTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.highlight; + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.highlight.QueryScorer; +import org.apache.lucene.util.LuceneTestCase; + +public class PlainHighlighterTests extends LuceneTestCase { + + public void testHighlightPhrase() throws Exception { + Query query = new PhraseQuery.Builder() + .add(new Term("field", "foo")) + .add(new Term("field", "bar")) + .build(); + QueryScorer queryScorer = new CustomQueryScorer(query); + org.apache.lucene.search.highlight.Highlighter highlighter = new org.apache.lucene.search.highlight.Highlighter(queryScorer); + String[] frags = highlighter.getBestFragments(new MockAnalyzer(random()), "field", "bar foo bar foo", 10); + assertArrayEquals(new String[] {"bar foo bar foo"}, frags); + } + +} From 6ea16671f43617536ebb0d03468fe74bbc7bab37 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 17 Dec 2015 14:54:57 +0100 Subject: [PATCH 126/167] Simplify the Text API. We have the Text API, which is essentially a wrapper around a String and a BytesReference and then we have 3 implementations depending on whether the String view should be cached, the BytesReference view should be cached, or both should be cached. This commit merges everything into a single Text that is essentially the old StringAndBytesText impl. Long term we should look into whether this API has any performance benefit or if we could just use plain strings. This would greatly simplify all our other APIs that currently use Text. --- .../TransportShardMultiPercolateAction.java | 3 - .../service/InternalClusterService.java | 4 +- .../common/io/stream/StreamInput.java | 6 +- .../elasticsearch/common/text/BytesText.java | 82 ------------- .../common/text/StringAndBytesText.java | 111 ------------------ .../elasticsearch/common/text/StringText.java | 94 --------------- .../org/elasticsearch/common/text/Text.java | 96 ++++++++++++--- .../index/mapper/DocumentMapper.java | 6 +- .../percolator/PercolateContext.java | 4 +- .../percolator/PercolatorService.java | 14 +-- .../search/SearchShardTarget.java | 6 +- .../bucket/histogram/InternalHistogram.java | 3 +- .../search/fetch/FetchPhase.java | 4 +- .../highlight/FastVectorHighlighter.java | 6 +- .../search/highlight/HighlightField.java | 3 +- .../search/highlight/PlainHighlighter.java | 6 +- .../search/highlight/PostingsHighlighter.java | 4 +- .../search/internal/InternalSearchHit.java | 10 +- .../completion/CompletionSuggester.java | 6 +- .../suggest/phrase/PhraseSuggester.java | 7 +- .../search/suggest/term/TermSuggester.java | 6 +- .../search/highlight/CustomHighlighter.java | 5 +- .../internal/InternalSearchHitTests.java | 14 +-- .../search/suggest/CustomSuggester.java | 6 +- .../messy/tests/SimpleSortTests.java | 10 +- .../TransportDeleteByQueryActionTests.java | 4 +- 26 files changed, 140 insertions(+), 380 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/common/text/BytesText.java delete mode 100644 core/src/main/java/org/elasticsearch/common/text/StringAndBytesText.java delete mode 100644 core/src/main/java/org/elasticsearch/common/text/StringText.java diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java index 68bcdc1503d..1d29e6c3971 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.percolate; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.IndicesRequest; @@ -37,8 +36,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.StringText; -import org.elasticsearch.common.text.Text; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.threadpool.ThreadPool; diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index b8c898a31e9..020e8d8da2d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -50,7 +50,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -357,7 +357,7 @@ public class InternalClusterService extends AbstractLifecycleComponent { +public final class Text implements Comparable { + + public static final Text[] EMPTY_ARRAY = new Text[0]; + + public static Text[] convertFromStringArray(String[] strings) { + if (strings.length == 0) { + return EMPTY_ARRAY; + } + Text[] texts = new Text[strings.length]; + for (int i = 0; i < strings.length; i++) { + texts[i] = new Text(strings[i]); + } + return texts; + } + + private BytesReference bytes; + private String text; + private int hash; + + public Text(BytesReference bytes) { + this.bytes = bytes; + } + + public Text(String text) { + this.text = text; + } /** - * Are bytes available without the need to be converted into bytes when calling {@link #bytes()}. + * Whether a {@link BytesReference} view of the data is already materialized. */ - boolean hasBytes(); + public boolean hasBytes() { + return bytes != null; + } /** - * The UTF8 bytes representing the the text, might be converted on the fly, see {@link #hasBytes()} + * Returns a {@link BytesReference} view of the data. */ - BytesReference bytes(); + public BytesReference bytes() { + if (bytes == null) { + bytes = new BytesArray(text.getBytes(StandardCharsets.UTF_8)); + } + return bytes; + } /** - * Is there a {@link String} representation of the text. If not, then it {@link #hasBytes()}. + * Whether a {@link String} view of the data is already materialized. */ - boolean hasString(); + public boolean hasString() { + return text != null; + } /** - * Returns the string representation of the text, might be converted to a string on the fly. + * Returns a {@link String} view of the data. */ - String string(); + public String string() { + if (text == null) { + if (!bytes.hasArray()) { + bytes = bytes.toBytesArray(); + } + text = new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8); + } + return text; + } - /** - * Returns the string representation of the text, might be converted to a string on the fly. - */ @Override - String toString(); + public String toString() { + return string(); + } + + @Override + public int hashCode() { + if (hash == 0) { + hash = bytes().hashCode(); + } + return hash; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + return bytes().equals(((Text) obj).bytes()); + } + + @Override + public int compareTo(Text text) { + return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes()); + } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 24374806717..333cda459f7 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.xcontent.ToXContent; @@ -114,7 +114,7 @@ public class DocumentMapper implements ToXContent { private final MapperService mapperService; private final String type; - private final StringAndBytesText typeText; + private final Text typeText; private volatile CompressedXContent mappingSource; @@ -138,7 +138,7 @@ public class DocumentMapper implements ToXContent { ReentrantReadWriteLock mappingLock) { this.mapperService = mapperService; this.type = rootObjectMapper.name(); - this.typeText = new StringAndBytesText(this.type); + this.typeText = new Text(this.type); this.mapping = new Mapping( Version.indexCreated(indexSettings), rootObjectMapper, diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java index 70abaaaff3d..5b09b55f8dc 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java @@ -38,7 +38,7 @@ import org.elasticsearch.common.HasHeaders; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.analysis.AnalysisService; @@ -164,7 +164,7 @@ public class PercolateContext extends SearchContext { fields.put(field.name(), new InternalSearchHitField(field.name(), Collections.emptyList())); } hitContext().reset( - new InternalSearchHit(0, "unknown", new StringText(parsedDocument.type()), fields), + new InternalSearchHit(0, "unknown", new Text(parsedDocument.type()), fields), atomicReaderContext, 0, docSearcher.searcher() ); } diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java index fa7b47766a8..eb33f3832b4 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -52,8 +52,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.BytesText; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -533,10 +531,10 @@ public class PercolatorService extends AbstractComponent { List finalMatches = new ArrayList<>(requestedSize == 0 ? numMatches : requestedSize); outer: for (PercolateShardResponse response : shardResults) { - Text index = new StringText(response.getIndex()); + Text index = new Text(response.getIndex()); for (int i = 0; i < response.matches().length; i++) { float score = response.scores().length == 0 ? NO_SCORE : response.scores()[i]; - Text match = new BytesText(new BytesArray(response.matches()[i])); + Text match = new Text(new BytesArray(response.matches()[i])); Map hl = response.hls().isEmpty() ? null : response.hls().get(i); finalMatches.add(new PercolateResponse.Match(index, match, score, hl)); if (requestedSize != 0 && finalMatches.size() == requestedSize) { @@ -686,10 +684,10 @@ public class PercolatorService extends AbstractComponent { List finalMatches = new ArrayList<>(requestedSize); if (nonEmptyResponses == 1) { PercolateShardResponse response = shardResults.get(firstNonEmptyIndex); - Text index = new StringText(response.getIndex()); + Text index = new Text(response.getIndex()); for (int i = 0; i < response.matches().length; i++) { float score = response.scores().length == 0 ? Float.NaN : response.scores()[i]; - Text match = new BytesText(new BytesArray(response.matches()[i])); + Text match = new Text(new BytesArray(response.matches()[i])); if (!response.hls().isEmpty()) { Map hl = response.hls().get(i); finalMatches.add(new PercolateResponse.Match(index, match, score, hl)); @@ -728,8 +726,8 @@ public class PercolatorService extends AbstractComponent { slots[requestIndex]++; PercolateShardResponse shardResponse = shardResults.get(requestIndex); - Text index = new StringText(shardResponse.getIndex()); - Text match = new BytesText(new BytesArray(shardResponse.matches()[itemIndex])); + Text index = new Text(shardResponse.getIndex()); + Text match = new Text(new BytesArray(shardResponse.matches()[itemIndex])); float score = shardResponse.scores()[itemIndex]; if (!shardResponse.hls().isEmpty()) { Map hl = shardResponse.hls().get(itemIndex); diff --git a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java index 1a12751d396..c648436c3a9 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java +++ b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text; import java.io.IOException; @@ -42,8 +42,8 @@ public class SearchShardTarget implements Streamable, Comparable extends Inter @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (formatter != ValueFormatter.RAW) { - Text keyTxt = new StringText(formatter.format(key)); + Text keyTxt = new Text(formatter.format(key)); if (keyed) { builder.startObject(keyTxt.string()); } else { diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 227141e4ddf..04890700be8 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; @@ -198,7 +198,7 @@ public class FetchPhase implements SearchPhase { DocumentMapper documentMapper = context.mapperService().documentMapper(fieldsVisitor.uid().type()); Text typeText; if (documentMapper == null) { - typeText = new StringAndBytesText(fieldsVisitor.uid().type()); + typeText = new Text(fieldsVisitor.uid().type()); } else { typeText = documentMapper.typeText(); } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java b/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java index 65702dd24b5..b57899b2e17 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java @@ -33,7 +33,7 @@ import org.apache.lucene.search.vectorhighlight.SimpleFieldFragList; import org.apache.lucene.search.vectorhighlight.SimpleFragListBuilder; import org.apache.lucene.search.vectorhighlight.SingleFragListBuilder; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.search.fetch.FetchPhaseExecutionException; import org.elasticsearch.search.fetch.FetchSubPhase; @@ -159,7 +159,7 @@ public class FastVectorHighlighter implements Highlighter { } if (fragments != null && fragments.length > 0) { - return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); + return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments)); } int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize(); @@ -170,7 +170,7 @@ public class FastVectorHighlighter implements Highlighter { fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(), mapper.fieldType().names().indexName(), fieldFragList, 1, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder); if (fragments != null && fragments.length > 0) { - return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); + return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments)); } } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java index 9077278d515..30530b697f3 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.highlight; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import java.io.IOException; @@ -90,7 +89,7 @@ public class HighlightField implements Streamable { if (in.readBoolean()) { int size = in.readVInt(); if (size == 0) { - fragments = StringText.EMPTY_ARRAY; + fragments = Text.EMPTY_ARRAY; } else { fragments = new Text[size]; for (int i = 0; i < size; i++) { diff --git a/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java b/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java index 041ed754d76..5f4cdddb060 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java @@ -33,9 +33,7 @@ import org.apache.lucene.search.highlight.SimpleSpanFragmenter; import org.apache.lucene.search.highlight.TextFragment; import org.apache.lucene.util.BytesRefHash; import org.apache.lucene.util.CollectionUtil; -import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.search.fetch.FetchPhaseExecutionException; @@ -158,7 +156,7 @@ public class PlainHighlighter implements Highlighter { } if (fragments.length > 0) { - return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); + return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments)); } int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize(); @@ -172,7 +170,7 @@ public class PlainHighlighter implements Highlighter { throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e); } if (end > 0) { - return new HighlightField(highlighterContext.fieldName, new Text[] { new StringText(fieldContents.substring(0, end)) }); + return new HighlightField(highlighterContext.fieldName, new Text[] { new Text(fieldContents.substring(0, end)) }); } } return null; diff --git a/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java b/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java index e11840e89e7..2509f95da59 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java @@ -28,7 +28,7 @@ import org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator; import org.apache.lucene.search.postingshighlight.Snippet; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.search.fetch.FetchPhaseExecutionException; import org.elasticsearch.search.fetch.FetchSubPhase; @@ -122,7 +122,7 @@ public class PostingsHighlighter implements Highlighter { } if (fragments.length > 0) { - return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); + return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments)); } return null; diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java index 96fd103fa6f..fcac5b1cc8b 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java @@ -30,7 +30,7 @@ import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -104,14 +104,14 @@ public class InternalSearchHit implements SearchHit { public InternalSearchHit(int docId, String id, Text type, Map fields) { this.docId = docId; - this.id = new StringAndBytesText(id); + this.id = new Text(id); this.type = type; this.fields = fields; } public InternalSearchHit(int nestedTopDocId, String id, Text type, InternalNestedIdentity nestedIdentity, Map fields) { this.docId = nestedTopDocId; - this.id = new StringAndBytesText(id); + this.id = new Text(id); this.type = type; this.nestedIdentity = nestedIdentity; this.fields = fields; @@ -339,7 +339,7 @@ public class InternalSearchHit implements SearchHit { if (sortValues != null) { for (int i = 0; i < sortValues.length; i++) { if (sortValues[i] instanceof BytesRef) { - sortValuesCopy[i] = new StringAndBytesText(new BytesArray((BytesRef) sortValues[i])); + sortValuesCopy[i] = new Text(new BytesArray((BytesRef) sortValues[i])); } } } @@ -783,7 +783,7 @@ public class InternalSearchHit implements SearchHit { private InternalNestedIdentity child; public InternalNestedIdentity(String field, int offset, InternalNestedIdentity child) { - this.field = new StringAndBytesText(field); + this.field = new Text(field); this.offset = offset; this.child = child; } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java index 106672ae7ae..6a0155ffb7a 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java @@ -30,7 +30,7 @@ import org.apache.lucene.search.suggest.document.TopSuggestDocs; import org.apache.lucene.search.suggest.document.TopSuggestDocsCollector; import org.apache.lucene.util.*; import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.index.fielddata.AtomicFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.MappedFieldType; @@ -57,7 +57,7 @@ public class CompletionSuggester extends Suggester } CompletionSuggestion completionSuggestion = new CompletionSuggestion(name, suggestionContext.getSize()); spare.copyUTF8Bytes(suggestionContext.getText()); - CompletionSuggestion.Entry completionSuggestEntry = new CompletionSuggestion.Entry(new StringText(spare.toString()), 0, spare.length()); + CompletionSuggestion.Entry completionSuggestEntry = new CompletionSuggestion.Entry(new Text(spare.toString()), 0, spare.length()); completionSuggestion.addTerm(completionSuggestEntry); TopSuggestDocsCollector collector = new TopDocumentsCollector(suggestionContext.getSize()); suggest(searcher, suggestionContext.toQuery(), collector); @@ -91,7 +91,7 @@ public class CompletionSuggester extends Suggester } if (numResult++ < suggestionContext.getSize()) { CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option( - new StringText(suggestDoc.key.toString()), suggestDoc.score, contexts, payload); + new Text(suggestDoc.key.toString()), suggestDoc.score, contexts, payload); completionSuggestEntry.addOption(option); } else { break; diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java index fccf9ebc30e..c7fa6fae302 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java @@ -30,7 +30,6 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRefBuilder; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.query.ParsedQuery; @@ -127,11 +126,11 @@ public final class PhraseSuggester extends Suggester { if (!collateMatch && !collatePrune) { continue; } - Text phrase = new StringText(spare.toString()); + Text phrase = new Text(spare.toString()); Text highlighted = null; if (suggestion.getPreTag() != null) { spare.copyUTF8Bytes(correction.join(SEPARATOR, byteSpare, suggestion.getPreTag(), suggestion.getPostTag())); - highlighted = new StringText(spare.toString()); + highlighted = new Text(spare.toString()); } if (collatePrune) { resultEntry.addOption(new Suggestion.Entry.Option(phrase, highlighted, (float) (correction.score), collateMatch)); @@ -147,7 +146,7 @@ public final class PhraseSuggester extends Suggester { private PhraseSuggestion.Entry buildResultEntry(PhraseSuggestionContext suggestion, CharsRefBuilder spare, double cutoffScore) { spare.copyUTF8Bytes(suggestion.getText()); - return new PhraseSuggestion.Entry(new StringText(spare.toString()), 0, spare.length(), cutoffScore); + return new PhraseSuggestion.Entry(new Text(spare.toString()), 0, spare.length(), cutoffScore); } ScriptService scriptService() { diff --git a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java index 4c1b176c990..34cd3ad4d56 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java @@ -27,8 +27,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRefBuilder; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.text.BytesText; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.search.suggest.SuggestContextParser; import org.elasticsearch.search.suggest.SuggestUtils; @@ -54,10 +52,10 @@ public final class TermSuggester extends Suggester { SuggestWord[] suggestedWords = directSpellChecker.suggestSimilar( token.term, suggestion.getShardSize(), indexReader, suggestion.getDirectSpellCheckerSettings().suggestMode() ); - Text key = new BytesText(new BytesArray(token.term.bytes())); + Text key = new Text(new BytesArray(token.term.bytes())); TermSuggestion.Entry resultEntry = new TermSuggestion.Entry(key, token.startOffset, token.endOffset - token.startOffset); for (SuggestWord suggestWord : suggestedWords) { - Text word = new StringText(suggestWord.string); + Text word = new Text(suggestWord.string); resultEntry.addOption(new TermSuggestion.Entry.Option(word, suggestWord.freq, suggestWord.score)); } response.addTerm(resultEntry); diff --git a/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java b/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java index 5a8d7c0150a..05b999a9196 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.highlight; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.FieldMapper; @@ -52,12 +51,12 @@ public class CustomHighlighter implements Highlighter { } List responses = new ArrayList<>(); - responses.add(new StringText(String.format(Locale.ENGLISH, "standard response for %s at position %s", field.field(), + responses.add(new Text(String.format(Locale.ENGLISH, "standard response for %s at position %s", field.field(), cacheEntry.position))); if (field.fieldOptions().options() != null) { for (Map.Entry entry : field.fieldOptions().options().entrySet()) { - responses.add(new StringText("field:" + entry.getKey() + ":" + entry.getValue())); + responses.add(new Text("field:" + entry.getKey() + ":" + entry.getValue())); } } diff --git a/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java b/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java index cc631d5df2a..0525fd28db1 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java +++ b/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.internal; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; @@ -39,25 +39,25 @@ public class InternalSearchHitTests extends ESTestCase { SearchShardTarget target = new SearchShardTarget("_node_id", "_index", 0); Map innerHits = new HashMap<>(); - InternalSearchHit innerHit1 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit innerHit1 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerHit1.shardTarget(target); - InternalSearchHit innerInnerHit2 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit innerInnerHit2 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerInnerHit2.shardTarget(target); innerHits.put("1", new InternalSearchHits(new InternalSearchHit[]{innerInnerHit2}, 1, 1f)); innerHit1.setInnerHits(innerHits); - InternalSearchHit innerHit2 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit innerHit2 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerHit2.shardTarget(target); - InternalSearchHit innerHit3 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit innerHit3 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerHit3.shardTarget(target); innerHits = new HashMap<>(); - InternalSearchHit hit1 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit hit1 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerHits.put("1", new InternalSearchHits(new InternalSearchHit[]{innerHit1, innerHit2}, 1, 1f)); innerHits.put("2", new InternalSearchHits(new InternalSearchHit[]{innerHit3}, 1, 1f)); hit1.shardTarget(target); hit1.setInnerHits(innerHits); - InternalSearchHit hit2 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit hit2 = new InternalSearchHit(0, "_id", new Text("_type"), null); hit2.shardTarget(target); InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[]{hit1, hit2}, 2, 1f); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java index 281cf6ae18e..419316b5265 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.CharsRefBuilder; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import java.io.IOException; import java.util.Locale; @@ -42,11 +42,11 @@ public class CustomSuggester extends Suggester> response = new Suggest.Suggestion<>(name, suggestion.getSize()); String firstSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "12"); - Suggest.Suggestion.Entry resultEntry12 = new Suggest.Suggestion.Entry<>(new StringText(firstSuggestion), 0, text.length() + 2); + Suggest.Suggestion.Entry resultEntry12 = new Suggest.Suggestion.Entry<>(new Text(firstSuggestion), 0, text.length() + 2); response.addTerm(resultEntry12); String secondSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "123"); - Suggest.Suggestion.Entry resultEntry123 = new Suggest.Suggestion.Entry<>(new StringText(secondSuggestion), 0, text.length() + 3); + Suggest.Suggestion.Entry resultEntry123 = new Suggest.Suggestion.Entry<>(new Text(secondSuggestion), 0, text.length() + 3); response.addTerm(resultEntry123); return response; diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java index db8a13c5ab8..de9b5b5f4f5 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java @@ -34,7 +34,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -1633,8 +1633,7 @@ public class SimpleSortTests extends ESIntegTestCase { assertThat(hits[i].getSortValues().length, is(1)); Object o = hits[i].getSortValues()[0]; assertThat(o, notNullValue()); - assertThat(o instanceof StringAndBytesText, is(true)); - StringAndBytesText text = (StringAndBytesText) o; + Text text = (Text) o; assertThat(text.string(), is("bar")); } @@ -1650,8 +1649,7 @@ public class SimpleSortTests extends ESIntegTestCase { assertThat(hits[i].getSortValues().length, is(1)); Object o = hits[i].getSortValues()[0]; assertThat(o, notNullValue()); - assertThat(o instanceof StringAndBytesText, is(true)); - StringAndBytesText text = (StringAndBytesText) o; + Text text = (Text) o; assertThat(text.string(), is("bar bar")); } } @@ -1925,7 +1923,7 @@ public class SimpleSortTests extends ESIntegTestCase { .addSort(fieldSort("str_field2").order(SortOrder.DESC).unmappedType("string")).get(); assertSortValues(resp, - new Object[] {new StringAndBytesText("bcd"), null}, + new Object[] {new Text("bcd"), null}, new Object[] {null, null}); resp = client().prepareSearch("test1", "test2") diff --git a/plugins/delete-by-query/src/test/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryActionTests.java b/plugins/delete-by-query/src/test/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryActionTests.java index c44608c4e4b..57bfa4c2328 100644 --- a/plugins/delete-by-query/src/test/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryActionTests.java +++ b/plugins/delete-by-query/src/test/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryActionTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.concurrent.CountDown; @@ -339,7 +339,7 @@ public class TransportDeleteByQueryActionTests extends ESSingleNodeTestCase { final int nbDocs = randomIntBetween(0, 20); SearchHit[] docs = new SearchHit[nbDocs]; for (int i = 0; i < nbDocs; i++) { - InternalSearchHit doc = new InternalSearchHit(randomInt(), String.valueOf(i), new StringText("type"), null); + InternalSearchHit doc = new InternalSearchHit(randomInt(), String.valueOf(i), new Text("type"), null); doc.shard(new SearchShardTarget("node", "test", randomInt())); docs[i] = doc; } From 87f800cdca235de255ffaefb7777888da59dc420 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 17 Dec 2015 17:26:11 +0100 Subject: [PATCH 127/167] make it updateable --- core/src/main/java/org/elasticsearch/transport/Transport.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/transport/Transport.java b/core/src/main/java/org/elasticsearch/transport/Transport.java index 8e4b7a47b1b..16270234494 100644 --- a/core/src/main/java/org/elasticsearch/transport/Transport.java +++ b/core/src/main/java/org/elasticsearch/transport/Transport.java @@ -36,7 +36,7 @@ import java.util.Map; public interface Transport extends LifecycleComponent { - Setting TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", false, Setting.Scope.CLUSTER); + Setting TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", true, Setting.Scope.CLUSTER); public static class TransportSettings { public static final String TRANSPORT_TCP_COMPRESS = "transport.tcp.compress"; From 51fc09b2642afae23ef76569e022e5ade2b99e1d Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Thu, 17 Dec 2015 17:57:55 +0100 Subject: [PATCH 128/167] Temporarily deactivate spuriously failing test --- core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index 636cea5c8aa..5b9ab898b0e 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -91,6 +91,7 @@ public class RetryTests extends ESTestCase { assertThat(response.getItems().length, equalTo(bulkRequest.numberOfActions())); } + @AwaitsFix(bugUrl = "spuriously fails on Jenkins. Investigation ongoing.") public void testAsyncRetryBacksOff() throws Exception { BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL); AssertingListener listener = new AssertingListener(); From bf7a89e304b5b2afc38aeccb2835ab6850084dd1 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 17 Dec 2015 12:18:39 -0500 Subject: [PATCH 129/167] Remove optionality from streaming useCompressedOops field --- core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java index 22f0ed912d0..82a99bd0bd1 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java @@ -375,7 +375,7 @@ public class JvmInfo implements Streamable, ToXContent { mem.readFrom(in); gcCollectors = in.readStringArray(); memoryPools = in.readStringArray(); - useCompressedOops = in.readOptionalString(); + useCompressedOops = in.readString(); } @Override @@ -400,7 +400,7 @@ public class JvmInfo implements Streamable, ToXContent { mem.writeTo(out); out.writeStringArray(gcCollectors); out.writeStringArray(memoryPools); - out.writeOptionalString(useCompressedOops); + out.writeString(useCompressedOops); } public static class Mem implements Streamable { From 315b0c263d1988905a5b989d79abb01515e1fc5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 17 Nov 2015 23:46:07 +0100 Subject: [PATCH 130/167] Geo: Make rest of ShapeBuilders implement Writable This is the second part of making ShapeBuilders implement Writable. This PR add serialization, equality and hashCode to all remaining ShapeBuilders that don't implement it yet. --- .../common/geo/builders/CircleBuilder.java | 2 - .../common/geo/builders/EnvelopeBuilder.java | 1 - .../geo/builders/MultiPointBuilder.java | 43 +++++++++++++- .../common/geo/builders/PointBuilder.java | 2 - .../AbstractShapeBuilderTestCase.java | 7 ++- .../geo/builders/MultiPointBuilderTests.java | 56 +++++++++++++++++++ 6 files changed, 100 insertions(+), 11 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java index 5f11d12a4bf..cdfc89a7f94 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java @@ -36,8 +36,6 @@ public class CircleBuilder extends ShapeBuilder { public static final String FIELD_RADIUS = "radius"; public static final GeoShapeType TYPE = GeoShapeType.CIRCLE; - public static final CircleBuilder PROTOTYPE = new CircleBuilder(); - private DistanceUnit unit; private double radius; private Coordinate center; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index 62f29d2bad7..770bc4ffdef 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -33,7 +33,6 @@ import java.util.Objects; public class EnvelopeBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE; - public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(); protected Coordinate topLeft; protected Coordinate bottomRight; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java index 8d5cfabdabb..517cfa05048 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java @@ -22,16 +22,18 @@ package org.elasticsearch.common.geo.builders; import com.spatial4j.core.shape.Point; import com.spatial4j.core.shape.Shape; import com.vividsolutions.jts.geom.Coordinate; -import org.elasticsearch.common.geo.XShapeCollection; -import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.geo.XShapeCollection; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Objects; public class MultiPointBuilder extends PointCollection { - public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT; @Override @@ -61,4 +63,39 @@ public class MultiPointBuilder extends PointCollection { public GeoShapeType type() { return TYPE; } + + @Override + public int hashCode() { + return Objects.hash(points); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + MultiPointBuilder other = (MultiPointBuilder) obj; + return Objects.equals(points, other.points); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(points.size()); + for (Coordinate point : points) { + writeCoordinateTo(point, out); + } + } + + @Override + public ShapeBuilder readFrom(StreamInput in) throws IOException { + MultiPointBuilder multiPointBuilder = new MultiPointBuilder(); + int size = in.readVInt(); + for (int i=0; i < size; i++) { + multiPointBuilder.point(readCoordinateFrom(in)); + } + return multiPointBuilder; + } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java index d6d62c28b8c..e7070333666 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java @@ -33,8 +33,6 @@ public class PointBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.POINT; - public static final PointBuilder PROTOTYPE = new PointBuilder(); - private Coordinate coordinate; public PointBuilder coordinate(Coordinate coordinate) { diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java index f15a731e86e..ce1663fad79 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java @@ -44,9 +44,10 @@ public abstract class AbstractShapeBuilderTestCase exte public static void init() { if (namedWriteableRegistry == null) { namedWriteableRegistry = new NamedWriteableRegistry(); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, new PointBuilder()); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, new CircleBuilder()); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, new EnvelopeBuilder()); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, new MultiPointBuilder()); } } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java new file mode 100644 index 00000000000..49c256a4362 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import com.vividsolutions.jts.geom.Coordinate; + +import org.elasticsearch.test.geo.RandomShapeGenerator; +import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; + +import java.io.IOException; + +public class MultiPointBuilderTests extends AbstractShapeBuilderTestCase { + + @Override + protected MultiPointBuilder createTestShapeBuilder() { + return (MultiPointBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.MULTIPOINT); + } + + @Override + protected MultiPointBuilder mutate(MultiPointBuilder original) throws IOException { + MultiPointBuilder mutation = copyShape(original); + Coordinate[] coordinates = original.coordinates(false); + Coordinate coordinate = randomFrom(coordinates); + if (randomBoolean()) { + if (coordinate.x != 0.0) { + coordinate.x = coordinate.x / 2; + } else { + coordinate.x = randomDoubleBetween(-180.0, 180.0, true); + } + } else { + if (coordinate.y != 0.0) { + coordinate.y = coordinate.y / 2; + } else { + coordinate.y = randomDoubleBetween(-90.0, 90.0, true); + } + } + return mutation.points(coordinates); + } +} From 31f90c91af6e084391c392d0eef81118015861ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 19 Nov 2015 13:48:50 +0100 Subject: [PATCH 131/167] Making LineStringBuilder and MultiLineStringBuilder writable and add equals/hashCode --- .../common/geo/builders/CircleBuilder.java | 2 + .../common/geo/builders/EnvelopeBuilder.java | 2 + .../geo/builders/LineStringBuilder.java | 44 +++++++++++++ .../geo/builders/MultiLineStringBuilder.java | 44 +++++++++++++ .../geo/builders/MultiPointBuilder.java | 4 +- .../common/geo/builders/PointBuilder.java | 1 + .../AbstractShapeBuilderTestCase.java | 16 ++--- .../geo/builders/LineStringBuilderTests.java | 57 +++++++++++++++++ .../builders/MultiLineStringBuilderTests.java | 62 +++++++++++++++++++ 9 files changed, 224 insertions(+), 8 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java create mode 100644 core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java index cdfc89a7f94..ddafecc4e7f 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java @@ -36,6 +36,8 @@ public class CircleBuilder extends ShapeBuilder { public static final String FIELD_RADIUS = "radius"; public static final GeoShapeType TYPE = GeoShapeType.CIRCLE; + static final CircleBuilder PROTOTYPE = new CircleBuilder(); + private DistanceUnit unit; private double radius; private Coordinate center; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index 770bc4ffdef..62eea070032 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -34,6 +34,8 @@ public class EnvelopeBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE; + static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(); + protected Coordinate topLeft; protected Coordinate bottomRight; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java index c7ba9b72f55..71d9bee8686 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java @@ -22,8 +22,12 @@ package org.elasticsearch.common.geo.builders; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Objects; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; + import com.spatial4j.core.shape.Shape; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; @@ -34,6 +38,8 @@ public class LineStringBuilder extends PointCollection { public static final GeoShapeType TYPE = GeoShapeType.LINESTRING; + static final LineStringBuilder PROTOTYPE = new LineStringBuilder(); + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -139,4 +145,42 @@ public class LineStringBuilder extends PointCollection { } return coordinates; } + + @Override + public int hashCode() { + return Objects.hash(points, translated); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + LineStringBuilder other = (LineStringBuilder) obj; + return Objects.equals(points, other.points) && + (translated == other.translated); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(points.size()); + for (Coordinate point : points) { + writeCoordinateTo(point, out); + } + out.writeBoolean(translated); + } + + @Override + public LineStringBuilder readFrom(StreamInput in) throws IOException { + LineStringBuilder lineStringBuilder = new LineStringBuilder(); + int size = in.readVInt(); + for (int i=0; i < size; i++) { + lineStringBuilder.point(readCoordinateFrom(in)); + } + lineStringBuilder.translated = in.readBoolean(); + return lineStringBuilder; + } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java index a004b90a2dc..16b36d81037 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.geo.builders; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import com.spatial4j.core.shape.Shape; @@ -29,11 +31,14 @@ import com.vividsolutions.jts.geom.LineString; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; +import java.util.Objects; public class MultiLineStringBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.MULTILINESTRING; + static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder(); + private final ArrayList lines = new ArrayList<>(); public MultiLineStringBuilder linestring(LineStringBuilder line) { @@ -41,6 +46,10 @@ public class MultiLineStringBuilder extends ShapeBuilder { return this; } + public MultiLineStringBuilder linestring(Coordinate[] coordinates) { + return this.linestring(new LineStringBuilder().points(coordinates)); + } + public Coordinate[][] coordinates() { Coordinate[][] result = new Coordinate[lines.size()][]; for (int i = 0; i < result.length; i++) { @@ -92,4 +101,39 @@ public class MultiLineStringBuilder extends ShapeBuilder { } return jtsGeometry(geometry); } + + @Override + public int hashCode() { + return Objects.hash(lines); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + MultiLineStringBuilder other = (MultiLineStringBuilder) obj; + return Objects.equals(lines, other.lines); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(lines.size()); + for (LineStringBuilder line : lines) { + line.writeTo(out); + } + } + + @Override + public MultiLineStringBuilder readFrom(StreamInput in) throws IOException { + MultiLineStringBuilder multiLineStringBuilder = new MultiLineStringBuilder(); + int size = in.readVInt(); + for (int i=0; i < size; i++) { + multiLineStringBuilder.linestring(LineStringBuilder.PROTOTYPE.readFrom(in)); + } + return multiLineStringBuilder; + } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java index 517cfa05048..4f2ad8cbc57 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java @@ -36,6 +36,8 @@ public class MultiPointBuilder extends PointCollection { public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT; + final static MultiPointBuilder PROTOTYPE = new MultiPointBuilder(); + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -90,7 +92,7 @@ public class MultiPointBuilder extends PointCollection { } @Override - public ShapeBuilder readFrom(StreamInput in) throws IOException { + public MultiPointBuilder readFrom(StreamInput in) throws IOException { MultiPointBuilder multiPointBuilder = new MultiPointBuilder(); int size = in.readVInt(); for (int i=0; i < size; i++) { diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java index e7070333666..22c293f4939 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java @@ -32,6 +32,7 @@ import com.vividsolutions.jts.geom.Coordinate; public class PointBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.POINT; + static final PointBuilder PROTOTYPE = new PointBuilder(); private Coordinate coordinate; diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java index ce1663fad79..3889e00c4ea 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java @@ -44,10 +44,12 @@ public abstract class AbstractShapeBuilderTestCase exte public static void init() { if (namedWriteableRegistry == null) { namedWriteableRegistry = new NamedWriteableRegistry(); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, new PointBuilder()); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, new CircleBuilder()); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, new EnvelopeBuilder()); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, new MultiPointBuilder()); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE); } } @@ -94,9 +96,9 @@ public abstract class AbstractShapeBuilderTestCase exte for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { SB testShape = createTestShapeBuilder(); SB deserializedShape = copyShape(testShape); - assertEquals(deserializedShape, testShape); - assertEquals(deserializedShape.hashCode(), testShape.hashCode()); - assertNotSame(deserializedShape, testShape); + assertEquals(testShape, deserializedShape); + assertEquals(testShape.hashCode(), deserializedShape.hashCode()); + assertNotSame(testShape, deserializedShape); } } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java new file mode 100644 index 00000000000..e4e483c0fcb --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import com.vividsolutions.jts.geom.Coordinate; + +import org.elasticsearch.test.geo.RandomShapeGenerator; +import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; + +import java.io.IOException; + +public class LineStringBuilderTests extends AbstractShapeBuilderTestCase { + + @Override + protected LineStringBuilder createTestShapeBuilder() { + LineStringBuilder lsb = (LineStringBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.LINESTRING); + return lsb; + } + + @Override + protected LineStringBuilder mutate(LineStringBuilder original) throws IOException { + LineStringBuilder mutation = copyShape(original); + Coordinate[] coordinates = original.coordinates(false); + Coordinate coordinate = randomFrom(coordinates); + if (randomBoolean()) { + if (coordinate.x != 0.0) { + coordinate.x = coordinate.x / 2; + } else { + coordinate.x = randomDoubleBetween(-180.0, 180.0, true); + } + } else { + if (coordinate.y != 0.0) { + coordinate.y = coordinate.y / 2; + } else { + coordinate.y = randomDoubleBetween(-90.0, 90.0, true); + } + } + return mutation.points(coordinates); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java new file mode 100644 index 00000000000..07698683e87 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import com.vividsolutions.jts.geom.Coordinate; + +import org.elasticsearch.test.geo.RandomShapeGenerator; +import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; + +import java.io.IOException; + +public class MultiLineStringBuilderTests extends AbstractShapeBuilderTestCase { + + @Override + protected MultiLineStringBuilder createTestShapeBuilder() { + return (MultiLineStringBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.MULTILINESTRING); + } + + @Override + protected MultiLineStringBuilder mutate(MultiLineStringBuilder original) throws IOException { + MultiLineStringBuilder mutation = copyShape(original); + Coordinate[][] coordinates = mutation.coordinates(); + int lineToChange = randomInt(coordinates.length - 1); + for (int i = 0; i < coordinates.length; i++) { + Coordinate[] line = coordinates[i]; + if (i == lineToChange) { + Coordinate coordinate = randomFrom(line); + if (randomBoolean()) { + if (coordinate.x != 0.0) { + coordinate.x = coordinate.x / 2; + } else { + coordinate.x = randomDoubleBetween(-180.0, 180.0, true); + } + } else { + if (coordinate.y != 0.0) { + coordinate.y = coordinate.y / 2; + } else { + coordinate.y = randomDoubleBetween(-90.0, 90.0, true); + } + } + } + } + return mutation; + } +} From ae7e8bbaf0138f287430be741fbd8e013d8fb392 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 19 Nov 2015 17:08:32 +0100 Subject: [PATCH 132/167] Making PolygonBuilder writable and add equals/hashCode --- .../common/geo/builders/PolygonBuilder.java | 186 +++++------------- .../AbstractShapeBuilderTestCase.java | 3 +- .../geo/builders/LineStringBuilderTests.java | 3 + .../geo/builders/PolygonBuilderTests.java | 71 +++++++ 4 files changed, 127 insertions(+), 136 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index 04540df27e9..13b068b2e5b 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -29,6 +29,8 @@ import com.vividsolutions.jts.geom.MultiPolygon; import com.vividsolutions.jts.geom.Polygon; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,6 +41,8 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.List; +import java.util.Objects; /** * The {@link PolygonBuilder} implements the groundwork to create polygons. This contains @@ -48,6 +52,9 @@ import java.util.concurrent.atomic.AtomicBoolean; public class PolygonBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.POLYGON; + static final PolygonBuilder PROTOTYPE = new PolygonBuilder(); + + private static final Coordinate[][] EMPTY = new Coordinate[0][]; // line string defining the shell of the polygon private LineStringBuilder shell; @@ -103,6 +110,20 @@ public class PolygonBuilder extends ShapeBuilder { return this; } + /** + * @return the list of holes defined for this polygon + */ + public List holes() { + return this.holes; + } + + /** + * @return the list of points of the shell for this polygon + */ + public LineStringBuilder shell() { + return this.shell; + } + /** * Close the shell of the polygon */ @@ -357,8 +378,6 @@ public class PolygonBuilder extends ShapeBuilder { return result; } - private static final Coordinate[][] EMPTY = new Coordinate[0][]; - private static Coordinate[][] holes(Edge[] holes, int numHoles) { if (numHoles == 0) { return EMPTY; @@ -520,147 +539,44 @@ public class PolygonBuilder extends ShapeBuilder { return points.length-1; } - /** - * Create a connected list of a list of coordinates - * - * @param points - * array of point - * @param offset - * index of the first point - * @param length - * number of points - * @return Array of edges - */ - private static Edge[] ring(int component, boolean direction, boolean handedness, LineStringBuilder shell, - Coordinate[] points, int offset, Edge[] edges, int toffset, int length, final AtomicBoolean translated) { - // calculate the direction of the points: - // find the point a the top of the set and check its - // neighbors orientation. So direction is equivalent - // to clockwise/counterclockwise - final int top = top(points, offset, length); - final int prev = (offset + ((top + length - 1) % length)); - final int next = (offset + ((top + 1) % length)); - boolean orientation = points[offset + prev].x > points[offset + next].x; - - // OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness) - // since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards - // thus if orientation is computed as cw, the logic will translate points across dateline - // and convert to a right handed system - - // compute the bounding box and calculate range - double[] range = range(points, offset, length); - final double rng = range[1] - range[0]; - // translate the points if the following is true - // 1. shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres - // (translation would result in a collapsed poly) - // 2. the shell of the candidate hole has been translated (to preserve the coordinate system) - boolean incorrectOrientation = component == 0 && handedness != orientation; - if ( (incorrectOrientation && (rng > DATELINE && rng != 2*DATELINE)) || (translated.get() && component != 0)) { - translate(points); - // flip the translation bit if the shell is being translated - if (component == 0) { - translated.set(true); - } - // correct the orientation post translation (ccw for shell, cw for holes) - if (component == 0 || (component != 0 && handedness == orientation)) { - orientation = !orientation; - } - } - return concat(component, direction ^ orientation, points, offset, edges, toffset, length); + @Override + public int hashCode() { + return Objects.hash(shell, holes, orientation); } - private static final int top(Coordinate[] points, int offset, int length) { - int top = 0; // we start at 1 here since top points to 0 - for (int i = 1; i < length; i++) { - if (points[offset + i].y < points[offset + top].y) { - top = i; - } else if (points[offset + i].y == points[offset + top].y) { - if (points[offset + i].x < points[offset + top].x) { - top = i; - } - } + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; } - return top; + if (obj == null || getClass() != obj.getClass()) { + return false; + } + PolygonBuilder other = (PolygonBuilder) obj; + return Objects.equals(shell, other.shell) && + Objects.equals(holes, other.holes) && + Objects.equals(orientation, other.orientation); } - private static final double[] range(Coordinate[] points, int offset, int length) { - double minX = points[0].x; - double maxX = points[0].x; - double minY = points[0].y; - double maxY = points[0].y; - // compute the bounding coordinates (@todo: cleanup brute force) - for (int i = 1; i < length; ++i) { - if (points[offset + i].x < minX) { - minX = points[offset + i].x; - } - if (points[offset + i].x > maxX) { - maxX = points[offset + i].x; - } - if (points[offset + i].y < minY) { - minY = points[offset + i].y; - } - if (points[offset + i].y > maxY) { - maxY = points[offset + i].y; - } + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(orientation == Orientation.RIGHT); + shell.writeTo(out); + out.writeVInt(holes.size()); + for (LineStringBuilder hole : holes) { + hole.writeTo(out); } - return new double[] {minX, maxX, minY, maxY}; } - /** - * Concatenate a set of points to a polygon - * - * @param component - * component id of the polygon - * @param direction - * direction of the ring - * @param points - * list of points to concatenate - * @param pointOffset - * index of the first point - * @param edges - * Array of edges to write the result to - * @param edgeOffset - * index of the first edge in the result - * @param length - * number of points to use - * @return the edges creates - */ - private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset, - int length) { - assert edges.length >= length+edgeOffset; - assert points.length >= length+pointOffset; - edges[edgeOffset] = new Edge(points[pointOffset], null); - for (int i = 1; i < length; i++) { - if (direction) { - edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]); - edges[edgeOffset + i].component = component; - } else if(!edges[edgeOffset + i - 1].coordinate.equals(points[pointOffset + i])) { - edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null); - edges[edgeOffset + i - 1].component = component; - } else { - throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + points[pointOffset + i]); - } - } - - if (direction) { - edges[edgeOffset].setNext(edges[edgeOffset + length - 1]); - edges[edgeOffset].component = component; - } else { - edges[edgeOffset + length - 1].setNext(edges[edgeOffset]); - edges[edgeOffset + length - 1].component = component; - } - - return edges; - } - - /** - * Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range - */ - private static void translate(Coordinate[] points) { - for (Coordinate c : points) { - if (c.x < 0) { - c.x += 2*DATELINE; - } + @Override + public PolygonBuilder readFrom(StreamInput in) throws IOException { + Orientation orientation = in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT; + PolygonBuilder polyBuilder = new PolygonBuilder(orientation); + polyBuilder.shell = LineStringBuilder.PROTOTYPE.readFrom(in); + int holes = in.readVInt(); + for (int i = 0; i < holes; i++) { + polyBuilder.hole(LineStringBuilder.PROTOTYPE.readFrom(in)); } + return polyBuilder; } } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java index 3889e00c4ea..7776c8dbb7a 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java @@ -34,7 +34,7 @@ import static org.hamcrest.Matchers.*; public abstract class AbstractShapeBuilderTestCase extends ESTestCase { - private static final int NUMBER_OF_TESTBUILDERS = 20; + private static final int NUMBER_OF_TESTBUILDERS = 1; private static NamedWriteableRegistry namedWriteableRegistry; /** @@ -50,6 +50,7 @@ public abstract class AbstractShapeBuilderTestCase exte namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE); namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE); namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE); } } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java index e4e483c0fcb..3a1f458cd87 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java @@ -31,6 +31,9 @@ public class LineStringBuilderTests extends AbstractShapeBuilderTestCase { + + @Override + protected PolygonBuilder createTestShapeBuilder() { + PolygonBuilder pgb = (PolygonBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POLYGON); + // NORELEASE translated might have been changed by createShape, but won't survive xContent->Parse roundtrip + pgb.shell().translated = false; + return pgb; + } + + @Override + protected PolygonBuilder mutate(PolygonBuilder original) throws IOException { + PolygonBuilder mutation = copyShape(original); + if (randomBoolean()) { + // toggle orientation + mutation.orientation = (original.orientation == Orientation.LEFT ? Orientation.RIGHT : Orientation.LEFT); + } else { + // change either point in shell or in random hole + LineStringBuilder lineToChange; + if (randomBoolean() || mutation.holes().size() == 0) { + lineToChange = mutation.shell(); + } else { + lineToChange = randomFrom(mutation.holes()); + } + Coordinate coordinate = randomFrom(lineToChange.coordinates(false)); + if (randomBoolean()) { + if (coordinate.x != 0.0) { + coordinate.x = coordinate.x / 2; + } else { + coordinate.x = randomDoubleBetween(-180.0, 180.0, true); + } + } else { + if (coordinate.y != 0.0) { + coordinate.y = coordinate.y / 2; + } else { + coordinate.y = randomDoubleBetween(-90.0, 90.0, true); + } + } + } + return mutation; + } +} From 1f5ee642fd2d1d72b75f608148df779072850b4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 19 Nov 2015 18:35:35 +0100 Subject: [PATCH 133/167] Making MultiPolygonBuilder writable and adding equals/hashCode --- .../geo/builders/MultiPolygonBuilder.java | 53 +++++++++++++++++- .../common/geo/builders/PolygonBuilder.java | 2 + .../AbstractShapeBuilderTestCase.java | 3 +- .../builders/MultiPolygonBuilderTests.java | 56 +++++++++++++++++++ .../geo/builders/PolygonBuilderTests.java | 15 +++-- 5 files changed, 122 insertions(+), 7 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java index e7762e51b61..e6525f770f7 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java @@ -22,8 +22,12 @@ package org.elasticsearch.common.geo.builders; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Locale; +import java.util.Objects; import org.elasticsearch.common.geo.XShapeCollection; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import com.spatial4j.core.shape.Shape; @@ -32,8 +36,9 @@ import com.vividsolutions.jts.geom.Coordinate; public class MultiPolygonBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.MULTIPOLYGON; + static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder(); - protected final ArrayList polygons = new ArrayList<>(); + private final ArrayList polygons = new ArrayList<>(); public MultiPolygonBuilder() { this(Orientation.RIGHT); @@ -48,10 +53,18 @@ public class MultiPolygonBuilder extends ShapeBuilder { return this; } + /** + * get the list of polygons + */ + public ArrayList polygons() { + return polygons; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(FIELD_TYPE, TYPE.shapeName()); + builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT)); builder.startArray(FIELD_COORDINATES); for(PolygonBuilder polygon : polygons) { builder.startArray(); @@ -89,4 +102,42 @@ public class MultiPolygonBuilder extends ShapeBuilder { return new XShapeCollection<>(shapes, SPATIAL_CONTEXT); //note: ShapeCollection is probably faster than a Multi* geom. } + + @Override + public int hashCode() { + return Objects.hash(polygons, orientation); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + MultiPolygonBuilder other = (MultiPolygonBuilder) obj; + return Objects.equals(polygons, other.polygons) && + Objects.equals(orientation, other.orientation); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(orientation == Orientation.RIGHT); + out.writeVInt(polygons.size()); + for (PolygonBuilder polygon : polygons) { + polygon.writeTo(out); + } + } + + @Override + public MultiPolygonBuilder readFrom(StreamInput in) throws IOException { + Orientation orientation = in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT; + MultiPolygonBuilder polyBuilder = new MultiPolygonBuilder(orientation); + int holes = in.readVInt(); + for (int i = 0; i < holes; i++) { + polyBuilder.polygon(PolygonBuilder.PROTOTYPE.readFrom(in)); + } + return polyBuilder; + } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index 13b068b2e5b..81555aa716a 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -42,6 +42,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.concurrent.atomic.AtomicBoolean; import java.util.List; +import java.util.Locale; import java.util.Objects; /** @@ -196,6 +197,7 @@ public class PolygonBuilder extends ShapeBuilder { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(FIELD_TYPE, TYPE.shapeName()); + builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT)); builder.startArray(FIELD_COORDINATES); coordinatesArray(builder, params); builder.endArray(); diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java index 7776c8dbb7a..d0f9914ef7d 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java @@ -34,7 +34,7 @@ import static org.hamcrest.Matchers.*; public abstract class AbstractShapeBuilderTestCase extends ESTestCase { - private static final int NUMBER_OF_TESTBUILDERS = 1; + private static final int NUMBER_OF_TESTBUILDERS = 20; private static NamedWriteableRegistry namedWriteableRegistry; /** @@ -51,6 +51,7 @@ public abstract class AbstractShapeBuilderTestCase exte namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE); namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE); namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE); } } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java new file mode 100644 index 00000000000..eb174d1aeef --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import org.elasticsearch.common.geo.builders.ShapeBuilder.Orientation; +import org.elasticsearch.test.geo.RandomShapeGenerator; +import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; + +import java.io.IOException; + +public class MultiPolygonBuilderTests extends AbstractShapeBuilderTestCase { + + @Override + protected MultiPolygonBuilder createTestShapeBuilder() { + MultiPolygonBuilder mpb = new MultiPolygonBuilder(randomFrom(Orientation.values())); + int polys = randomIntBetween(1, 10); + for (int i = 0; i < polys; i++) { + PolygonBuilder pgb = (PolygonBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POLYGON); + pgb.orientation = mpb.orientation; + // NORELEASE translated might have been changed by createShape, but won't survive xContent->Parse roundtrip + pgb.shell().translated = false; + mpb.polygon(pgb); + } + return mpb; + } + + @Override + protected MultiPolygonBuilder mutate(MultiPolygonBuilder original) throws IOException { + MultiPolygonBuilder mutation = copyShape(original); + if (randomBoolean()) { + // toggle orientation + mutation.orientation = (original.orientation == Orientation.LEFT ? Orientation.RIGHT : Orientation.LEFT); + } else { + int polyToChange = randomInt(mutation.polygons().size() - 1); + PolygonBuilderTests.mutatePolygonBuilder(mutation.polygons().get(polyToChange)); + } + return mutation; + } +} diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java index e6e4ae38429..e95adb40e73 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java @@ -32,6 +32,7 @@ public class PolygonBuilderTests extends AbstractShapeBuilderTestCaseParse roundtrip pgb.shell().translated = false; return pgb; @@ -40,16 +41,20 @@ public class PolygonBuilderTests extends AbstractShapeBuilderTestCase Date: Mon, 23 Nov 2015 15:36:42 +0100 Subject: [PATCH 134/167] Adding serialization, equals and hashCode to GeometryCollectionBuilder --- .../common/geo/builders/EnvelopeBuilder.java | 5 +- .../builders/GeometryCollectionBuilder.java | 44 +++++++ .../geo/builders/MultiPolygonBuilder.java | 5 +- .../common/geo/builders/PolygonBuilder.java | 5 +- .../common/geo/builders/ShapeBuilder.java | 8 ++ .../common/io/stream/StreamInput.java | 9 ++ .../common/io/stream/StreamOutput.java | 8 ++ ...lderTests.java => CircleBuilderTests.java} | 4 +- .../geo/builders/EnvelopeBuilderTests.java | 2 + .../GeometryCollectionBuilderTests.java | 111 ++++++++++++++++++ .../geo/builders/LineStringBuilderTests.java | 2 + .../builders/MultiLineStringBuilderTests.java | 2 + .../geo/builders/MultiPointBuilderTests.java | 2 + .../builders/MultiPolygonBuilderTests.java | 2 + .../geo/builders/PointBuilderTests.java | 2 + .../geo/builders/PolygonBuilderTests.java | 2 + 16 files changed, 203 insertions(+), 10 deletions(-) rename core/src/test/java/org/elasticsearch/common/geo/builders/{CirlceBuilderTests.java => CircleBuilderTests.java} (94%) create mode 100644 core/src/test/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilderTests.java diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index 62eea070032..793e24d37b1 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -108,15 +108,14 @@ public class EnvelopeBuilder extends ShapeBuilder { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeBoolean(orientation == Orientation.RIGHT); + orientation.writeTo(out); writeCoordinateTo(topLeft, out); writeCoordinateTo(bottomRight, out); } @Override public EnvelopeBuilder readFrom(StreamInput in) throws IOException { - Orientation orientation = in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT; - return new EnvelopeBuilder(orientation) + return new EnvelopeBuilder(Orientation.readFrom(in)) .topLeft(readCoordinateFrom(in)) .bottomRight(readCoordinateFrom(in)); } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java index 45397ed962f..e4db15998fd 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java @@ -20,18 +20,25 @@ package org.elasticsearch.common.geo.builders; import com.spatial4j.core.shape.Shape; + import org.elasticsearch.common.geo.XShapeCollection; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Locale; +import java.util.Objects; public class GeometryCollectionBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.GEOMETRYCOLLECTION; + public static final GeometryCollectionBuilder PROTOTYPE = new GeometryCollectionBuilder(); + protected final ArrayList shapes = new ArrayList<>(); public GeometryCollectionBuilder() { @@ -103,6 +110,7 @@ public class GeometryCollectionBuilder extends ShapeBuilder { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(FIELD_TYPE, TYPE.shapeName()); + builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT)); builder.startArray(FIELD_GEOMETRIES); for (ShapeBuilder shape : shapes) { shape.toXContent(builder, params); @@ -132,4 +140,40 @@ public class GeometryCollectionBuilder extends ShapeBuilder { //note: ShapeCollection is probably faster than a Multi* geom. } + @Override + public int hashCode() { + return Objects.hash(orientation, shapes); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + GeometryCollectionBuilder other = (GeometryCollectionBuilder) obj; + return Objects.equals(orientation, other.orientation) && Objects.equals(shapes, other.shapes); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + orientation.writeTo(out); + out.writeVInt(shapes.size()); + for (ShapeBuilder shape : shapes) { + out.writeShape(shape); + } + } + + @Override + public GeometryCollectionBuilder readFrom(StreamInput in) throws IOException { + GeometryCollectionBuilder geometryCollectionBuilder = new GeometryCollectionBuilder(Orientation.readFrom(in)); + int shapes = in.readVInt(); + for (int i = 0; i < shapes; i++) { + geometryCollectionBuilder.shape(in.readShape()); + } + return geometryCollectionBuilder; + } + } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java index e6525f770f7..46d39bdbba2 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java @@ -123,7 +123,7 @@ public class MultiPolygonBuilder extends ShapeBuilder { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeBoolean(orientation == Orientation.RIGHT); + orientation.writeTo(out); out.writeVInt(polygons.size()); for (PolygonBuilder polygon : polygons) { polygon.writeTo(out); @@ -132,8 +132,7 @@ public class MultiPolygonBuilder extends ShapeBuilder { @Override public MultiPolygonBuilder readFrom(StreamInput in) throws IOException { - Orientation orientation = in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT; - MultiPolygonBuilder polyBuilder = new MultiPolygonBuilder(orientation); + MultiPolygonBuilder polyBuilder = new MultiPolygonBuilder(Orientation.readFrom(in)); int holes = in.readVInt(); for (int i = 0; i < holes; i++) { polyBuilder.polygon(PolygonBuilder.PROTOTYPE.readFrom(in)); diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index 81555aa716a..5aff34dbda5 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -562,7 +562,7 @@ public class PolygonBuilder extends ShapeBuilder { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeBoolean(orientation == Orientation.RIGHT); + orientation.writeTo(out); shell.writeTo(out); out.writeVInt(holes.size()); for (LineStringBuilder hole : holes) { @@ -572,8 +572,7 @@ public class PolygonBuilder extends ShapeBuilder { @Override public PolygonBuilder readFrom(StreamInput in) throws IOException { - Orientation orientation = in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT; - PolygonBuilder polyBuilder = new PolygonBuilder(orientation); + PolygonBuilder polyBuilder = new PolygonBuilder(Orientation.readFrom(in)); polyBuilder.shell = LineStringBuilder.PROTOTYPE.readFrom(in); int holes = in.readVInt(); for (int i = 0; i < holes; i++) { diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index d8689ee737f..40c54efc7f9 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -408,6 +408,14 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri public static final Orientation COUNTER_CLOCKWISE = Orientation.RIGHT; public static final Orientation CW = Orientation.LEFT; public static final Orientation CCW = Orientation.RIGHT; + + public void writeTo (StreamOutput out) throws IOException { + out.writeBoolean(this == Orientation.RIGHT); + } + + public static Orientation readFrom (StreamInput in) throws IOException { + return in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT; + } } public static final String FIELD_TYPE = "type"; diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 76438fb8b6b..f8072804225 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -34,6 +34,8 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.geo.builders.ShapeBuilder; +import org.elasticsearch.common.text.StringAndBytesText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; @@ -629,6 +631,13 @@ public abstract class StreamInput extends InputStream { return readNamedWriteable(QueryBuilder.class); } + /** + * Reads a {@link ShapeBuilder} from the current stream + */ + public ShapeBuilder readShape() throws IOException { + return readNamedWriteable(ShapeBuilder.class); + } + /** * Reads a {@link org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder} from the current stream */ diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 5f1e7623d28..e8997b8073f 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -32,6 +32,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; @@ -618,6 +619,13 @@ public abstract class StreamOutput extends OutputStream { writeNamedWriteable(queryBuilder); } + /** + * Writes a {@link ShapeBuilder} to the current stream + */ + public void writeShape(ShapeBuilder shapeBuilder) throws IOException { + writeNamedWriteable(shapeBuilder); + } + /** * Writes a {@link ScoreFunctionBuilder} to the current stream */ diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/CirlceBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java similarity index 94% rename from core/src/test/java/org/elasticsearch/common/geo/builders/CirlceBuilderTests.java rename to core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java index 6b102b87b2c..9b6bd27b472 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/CirlceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java @@ -25,7 +25,9 @@ import org.elasticsearch.common.unit.DistanceUnit; import java.io.IOException; -public class CirlceBuilderTests extends AbstractShapeBuilderTestCase { +public class CircleBuilderTests extends AbstractShapeBuilderTestCase { + + static CircleBuilderTests PROTOTYPE = new CircleBuilderTests(); @Override protected CircleBuilder createTestShapeBuilder() { diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java index e6f3db2f8af..a8794fbd687 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java @@ -29,6 +29,8 @@ import java.io.IOException; public class EnvelopeBuilderTests extends AbstractShapeBuilderTestCase { + static final EnvelopeBuilderTests PROTOTYPE = new EnvelopeBuilderTests(); + @Override protected EnvelopeBuilder createTestShapeBuilder() { EnvelopeBuilder envelope = new EnvelopeBuilder(randomFrom(Orientation.values())); diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilderTests.java new file mode 100644 index 00000000000..46b4044de8f --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilderTests.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import org.elasticsearch.test.geo.RandomShapeGenerator; + +import java.io.IOException; + +public class GeometryCollectionBuilderTests extends AbstractShapeBuilderTestCase { + + @Override + protected GeometryCollectionBuilder createTestShapeBuilder() { + // NORELEASE check of GeometryCollectionBuilder should parse maintain orientation + GeometryCollectionBuilder geometryCollection = new GeometryCollectionBuilder(); + int shapes = randomIntBetween(0, 8); + for (int i = 0; i < shapes; i++) { + switch (randomIntBetween(0, 7)) { + case 0: + geometryCollection.shape(PointBuilderTests.PROTOTYPE.createTestShapeBuilder()); + break; + case 1: + geometryCollection.shape(CircleBuilderTests.PROTOTYPE.createTestShapeBuilder()); + break; + case 2: + geometryCollection.shape(EnvelopeBuilderTests.PROTOTYPE.createTestShapeBuilder()); + break; + case 3: + geometryCollection.shape(LineStringBuilderTests.PROTOTYPE.createTestShapeBuilder()); + break; + case 4: + geometryCollection.shape(MultiLineStringBuilderTests.PROTOTYPE.createTestShapeBuilder()); + break; + case 5: + geometryCollection.shape(MultiPolygonBuilderTests.PROTOTYPE.createTestShapeBuilder()); + break; + case 6: + geometryCollection.shape(MultiPointBuilderTests.PROTOTYPE.createTestShapeBuilder()); + break; + case 7: + geometryCollection.shape(PolygonBuilderTests.PROTOTYPE.createTestShapeBuilder()); + break; + } + } + return geometryCollection; + } + + @Override + protected GeometryCollectionBuilder mutate(GeometryCollectionBuilder original) throws IOException { + GeometryCollectionBuilder mutation = copyShape(original); + // NORELEASE check of GeometryCollectionBuilder should parse maintain orientation +// if (randomBoolean()) { +// // toggle orientation +// mutation.orientation = (mutation.orientation == Orientation.LEFT ? Orientation.RIGHT : Orientation.LEFT); +// } else { + // change one shape + if (mutation.shapes.size() > 0) { + int shapePosition = randomIntBetween(0, mutation.shapes.size() - 1); + ShapeBuilder shapeToChange = mutation.shapes.get(shapePosition); + switch (shapeToChange.type()) { + case POINT: + shapeToChange = PointBuilderTests.PROTOTYPE.mutate((PointBuilder) shapeToChange); + break; + case CIRCLE: + shapeToChange = CircleBuilderTests.PROTOTYPE.mutate((CircleBuilder) shapeToChange); + break; + case ENVELOPE: + shapeToChange = EnvelopeBuilderTests.PROTOTYPE.mutate((EnvelopeBuilder) shapeToChange); + break; + case LINESTRING: + shapeToChange = LineStringBuilderTests.PROTOTYPE.mutate((LineStringBuilder) shapeToChange); + break; + case MULTILINESTRING: + shapeToChange = MultiLineStringBuilderTests.PROTOTYPE.mutate((MultiLineStringBuilder) shapeToChange); + break; + case MULTIPOLYGON: + shapeToChange = MultiPolygonBuilderTests.PROTOTYPE.mutate((MultiPolygonBuilder) shapeToChange); + break; + case MULTIPOINT: + shapeToChange = MultiPointBuilderTests.PROTOTYPE.mutate((MultiPointBuilder) shapeToChange); + break; + case POLYGON: + shapeToChange = PolygonBuilderTests.PROTOTYPE.mutate((PolygonBuilder) shapeToChange); + break; + case GEOMETRYCOLLECTION: + throw new UnsupportedOperationException("GeometryCollection should not be nested inside each other"); + } + mutation.shapes.set(shapePosition, shapeToChange); + } else { + mutation.shape(RandomShapeGenerator.createShape(getRandom())); + } +// } + return mutation; + } +} diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java index 3a1f458cd87..72138799881 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java @@ -28,6 +28,8 @@ import java.io.IOException; public class LineStringBuilderTests extends AbstractShapeBuilderTestCase { + static final LineStringBuilderTests PROTOTYPE = new LineStringBuilderTests(); + @Override protected LineStringBuilder createTestShapeBuilder() { LineStringBuilder lsb = (LineStringBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.LINESTRING); diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java index 07698683e87..b43ffb42e6f 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java @@ -28,6 +28,8 @@ import java.io.IOException; public class MultiLineStringBuilderTests extends AbstractShapeBuilderTestCase { + static final MultiLineStringBuilderTests PROTOTYPE = new MultiLineStringBuilderTests(); + @Override protected MultiLineStringBuilder createTestShapeBuilder() { return (MultiLineStringBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.MULTILINESTRING); diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java index 49c256a4362..952d5d50ec3 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java @@ -28,6 +28,8 @@ import java.io.IOException; public class MultiPointBuilderTests extends AbstractShapeBuilderTestCase { + static final MultiPointBuilderTests PROTOTYPE = new MultiPointBuilderTests(); + @Override protected MultiPointBuilder createTestShapeBuilder() { return (MultiPointBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.MULTIPOINT); diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java index eb174d1aeef..1b8e813e956 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java @@ -27,6 +27,8 @@ import java.io.IOException; public class MultiPolygonBuilderTests extends AbstractShapeBuilderTestCase { + static final MultiPolygonBuilderTests PROTOTYPE = new MultiPolygonBuilderTests(); + @Override protected MultiPolygonBuilder createTestShapeBuilder() { MultiPolygonBuilder mpb = new MultiPolygonBuilder(randomFrom(Orientation.values())); diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java index 1e94a1bab3a..a7e07155ce8 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java @@ -26,6 +26,8 @@ import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; public class PointBuilderTests extends AbstractShapeBuilderTestCase { + static PointBuilderTests PROTOTYPE = new PointBuilderTests(); + @Override protected PointBuilder createTestShapeBuilder() { return (PointBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POINT); diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java index e95adb40e73..2c9ce090535 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java @@ -29,6 +29,8 @@ import java.io.IOException; public class PolygonBuilderTests extends AbstractShapeBuilderTestCase { + static final PolygonBuilderTests PROTOTYPE = new PolygonBuilderTests(); + @Override protected PolygonBuilder createTestShapeBuilder() { PolygonBuilder pgb = (PolygonBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POLYGON); From f07e61c05b0d475c0847bb699acb2f5db72d822e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Mon, 23 Nov 2015 18:38:06 +0100 Subject: [PATCH 135/167] Make GeoShapeQueryBuilder serialize shape --- .../common/geo/builders/CircleBuilder.java | 2 +- .../common/geo/builders/EnvelopeBuilder.java | 3 +- .../geo/builders/LineStringBuilder.java | 10 +-- .../geo/builders/MultiLineStringBuilder.java | 2 +- .../geo/builders/MultiPointBuilder.java | 2 +- .../geo/builders/MultiPolygonBuilder.java | 2 +- .../common/geo/builders/PointBuilder.java | 2 +- .../common/geo/builders/PolygonBuilder.java | 2 +- .../common/geo/builders/ShapeBuilder.java | 11 --- .../geo/builders/ShapeBuilderRegistry.java | 42 ++++++++++ .../index/query/GeoShapeQueryBuilder.java | 78 ++++++------------- .../index/query/GeoShapeQueryParser.java | 9 +-- .../elasticsearch/indices/IndicesModule.java | 2 + .../AbstractShapeBuilderTestCase.java | 1 + .../geo/builders/CircleBuilderTests.java | 2 +- .../builders/MultiPolygonBuilderTests.java | 2 +- .../geo/builders/PointBuilderTests.java | 4 +- .../geo/builders/PolygonBuilderTests.java | 2 +- .../index/query/AbstractQueryTestCase.java | 2 + .../query/GeoShapeQueryBuilderTests.java | 37 +++++---- 20 files changed, 113 insertions(+), 104 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java index ddafecc4e7f..5f11d12a4bf 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java @@ -36,7 +36,7 @@ public class CircleBuilder extends ShapeBuilder { public static final String FIELD_RADIUS = "radius"; public static final GeoShapeType TYPE = GeoShapeType.CIRCLE; - static final CircleBuilder PROTOTYPE = new CircleBuilder(); + public static final CircleBuilder PROTOTYPE = new CircleBuilder(); private DistanceUnit unit; private double radius; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index 793e24d37b1..82fd3275116 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -25,7 +25,6 @@ import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; - import java.io.IOException; import java.util.Locale; import java.util.Objects; @@ -34,7 +33,7 @@ public class EnvelopeBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE; - static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(); + public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(); protected Coordinate topLeft; protected Coordinate bottomRight; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java index 71d9bee8686..cec2a66e757 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java @@ -38,7 +38,7 @@ public class LineStringBuilder extends PointCollection { public static final GeoShapeType TYPE = GeoShapeType.LINESTRING; - static final LineStringBuilder PROTOTYPE = new LineStringBuilder(); + public static final LineStringBuilder PROTOTYPE = new LineStringBuilder(); @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { @@ -148,7 +148,7 @@ public class LineStringBuilder extends PointCollection { @Override public int hashCode() { - return Objects.hash(points, translated); + return Objects.hash(points, translated()); } @Override @@ -161,7 +161,7 @@ public class LineStringBuilder extends PointCollection { } LineStringBuilder other = (LineStringBuilder) obj; return Objects.equals(points, other.points) && - (translated == other.translated); + (translated() == other.translated()); } @Override @@ -170,7 +170,7 @@ public class LineStringBuilder extends PointCollection { for (Coordinate point : points) { writeCoordinateTo(point, out); } - out.writeBoolean(translated); + out.writeBoolean(translated()); } @Override @@ -180,7 +180,7 @@ public class LineStringBuilder extends PointCollection { for (int i=0; i < size; i++) { lineStringBuilder.point(readCoordinateFrom(in)); } - lineStringBuilder.translated = in.readBoolean(); + lineStringBuilder.translated(in.readBoolean()); return lineStringBuilder; } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java index 16b36d81037..c0a79611dec 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java @@ -37,7 +37,7 @@ public class MultiLineStringBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.MULTILINESTRING; - static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder(); + public static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder(); private final ArrayList lines = new ArrayList<>(); diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java index 4f2ad8cbc57..f1b403d42b9 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java @@ -36,7 +36,7 @@ public class MultiPointBuilder extends PointCollection { public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT; - final static MultiPointBuilder PROTOTYPE = new MultiPointBuilder(); + public final static MultiPointBuilder PROTOTYPE = new MultiPointBuilder(); @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java index 46d39bdbba2..a82c0fad9bd 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java @@ -36,7 +36,7 @@ import com.vividsolutions.jts.geom.Coordinate; public class MultiPolygonBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.MULTIPOLYGON; - static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder(); + public static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder(); private final ArrayList polygons = new ArrayList<>(); diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java index 22c293f4939..35225461658 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java @@ -32,7 +32,7 @@ import com.vividsolutions.jts.geom.Coordinate; public class PointBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.POINT; - static final PointBuilder PROTOTYPE = new PointBuilder(); + public static final PointBuilder PROTOTYPE = new PointBuilder(); private Coordinate coordinate; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index 5aff34dbda5..80591dbb3eb 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -53,7 +53,7 @@ import java.util.Objects; public class PolygonBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.POLYGON; - static final PolygonBuilder PROTOTYPE = new PolygonBuilder(); + public static final PolygonBuilder PROTOTYPE = new PolygonBuilder(); private static final Coordinate[][] EMPTY = new Coordinate[0][]; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index 40c54efc7f9..1ab568d4be1 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -708,15 +708,4 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri public String getWriteableName() { return type().shapeName(); } - - // NORELEASE this should be deleted as soon as all shape builders implement writable - @Override - public void writeTo(StreamOutput out) throws IOException { - } - - // NORELEASE this should be deleted as soon as all shape builders implement writable - @Override - public ShapeBuilder readFrom(StreamInput in) throws IOException { - return null; - } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java new file mode 100644 index 00000000000..a900b6a9ee5 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; + +/** + * Register the shape builder prototypes with the {@link NamedWriteableRegistry} + */ +public class ShapeBuilderRegistry { + + @Inject + public ShapeBuilderRegistry(NamedWriteableRegistry namedWriteableRegistry) { + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE); + } +} diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java index 5aad36cd27a..454465727b7 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java @@ -31,19 +31,16 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.geo.SpatialStrategy; +import org.elasticsearch.common.geo.builders.PointBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper; import org.elasticsearch.search.internal.SearchContext; @@ -61,13 +58,11 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder { String fieldName = null; ShapeRelation shapeRelation = null; SpatialStrategy strategy = null; - BytesReference shape = null; + ShapeBuilder shape = null; String id = null; String type = null; @@ -79,8 +77,7 @@ public class GeoShapeQueryParser implements QueryParser { currentFieldName = parser.currentName(); token = parser.nextToken(); if (parseContext.parseFieldMatcher().match(currentFieldName, SHAPE_FIELD)) { - XContentBuilder builder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); - shape = builder.bytes(); + shape = ShapeBuilder.parse(parser); } else if (parseContext.parseFieldMatcher().match(currentFieldName, STRATEGY_FIELD)) { String strategyName = parser.text(); strategy = SpatialStrategy.fromString(strategyName); diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index 6878002c015..3faeb6fb3df 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -22,6 +22,7 @@ package org.elasticsearch.indices; import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; import org.elasticsearch.common.geo.ShapesAvailability; +import org.elasticsearch.common.geo.builders.ShapeBuilderRegistry; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.index.NodeServicesProvider; @@ -218,6 +219,7 @@ public class IndicesModule extends AbstractModule { bind(IndicesFieldDataCacheListener.class).asEagerSingleton(); bind(TermVectorsService.class).asEagerSingleton(); bind(NodeServicesProvider.class).asEagerSingleton(); + bind(ShapeBuilderRegistry.class).asEagerSingleton(); } // public for testing diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java index d0f9914ef7d..a7fbfb8e380 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java @@ -52,6 +52,7 @@ public abstract class AbstractShapeBuilderTestCase exte namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE); namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE); namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE); } } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java index 9b6bd27b472..17ed5e19876 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java @@ -27,7 +27,7 @@ import java.io.IOException; public class CircleBuilderTests extends AbstractShapeBuilderTestCase { - static CircleBuilderTests PROTOTYPE = new CircleBuilderTests(); + final static CircleBuilderTests PROTOTYPE = new CircleBuilderTests(); @Override protected CircleBuilder createTestShapeBuilder() { diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java index 1b8e813e956..4429dca608a 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java @@ -37,7 +37,7 @@ public class MultiPolygonBuilderTests extends AbstractShapeBuilderTestCaseParse roundtrip - pgb.shell().translated = false; + pgb.shell().translated(false); mpb.polygon(pgb); } return mpb; diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java index a7e07155ce8..e1bc86d57a4 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; public class PointBuilderTests extends AbstractShapeBuilderTestCase { - static PointBuilderTests PROTOTYPE = new PointBuilderTests(); + final static PointBuilderTests PROTOTYPE = new PointBuilderTests(); @Override protected PointBuilder createTestShapeBuilder() { @@ -35,6 +35,6 @@ public class PointBuilderTests extends AbstractShapeBuilderTestCaseParse roundtrip - pgb.shell().translated = false; + pgb.shell().translated(false); return pgb; } diff --git a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java index aa97d722737..2d250ff0b95 100644 --- a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java @@ -47,6 +47,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.geo.builders.ShapeBuilderRegistry; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.ModulesBuilder; @@ -191,6 +192,7 @@ public abstract class AbstractQueryTestCase> // skip services bindQueryParsersExtension(); bindMapperExtension(); + bind(ShapeBuilderRegistry.class).asEagerSingleton(); } }, new ScriptModule(settings) { diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java index 6b2088d25c6..55292fefc7a 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.SpatialStrategy; import org.elasticsearch.common.geo.builders.EnvelopeBuilder; +import org.elasticsearch.common.geo.builders.PolygonBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilders; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -59,14 +60,17 @@ public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase Date: Fri, 4 Dec 2015 13:33:30 +0100 Subject: [PATCH 136/167] Changes after rebase on master --- .../geo/builders/LineStringBuilder.java | 7 +- .../geo/builders/MultiLineStringBuilder.java | 2 +- .../geo/builders/MultiPointBuilder.java | 2 +- .../common/geo/builders/PolygonBuilder.java | 144 ++++++++++++++++++ .../AbstractShapeBuilderTestCase.java | 18 +-- .../geo/builders/CircleBuilderTests.java | 24 +-- .../geo/builders/EnvelopeBuilderTests.java | 24 +-- .../GeometryCollectionBuilderTests.java | 40 ++--- .../geo/builders/LineStringBuilderTests.java | 24 +-- .../builders/MultiLineStringBuilderTests.java | 16 +- .../geo/builders/MultiPointBuilderTests.java | 16 +- .../builders/MultiPolygonBuilderTests.java | 32 ++-- .../geo/builders/PointBuilderTests.java | 18 ++- .../geo/builders/PolygonBuilderTests.java | 22 +-- .../query/GeoShapeQueryBuilderTests.java | 7 - 15 files changed, 291 insertions(+), 105 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java index cec2a66e757..464d72c8d8c 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java @@ -148,7 +148,7 @@ public class LineStringBuilder extends PointCollection { @Override public int hashCode() { - return Objects.hash(points, translated()); + return Objects.hash(points); } @Override @@ -160,8 +160,7 @@ public class LineStringBuilder extends PointCollection { return false; } LineStringBuilder other = (LineStringBuilder) obj; - return Objects.equals(points, other.points) && - (translated() == other.translated()); + return Objects.equals(points, other.points); } @Override @@ -170,7 +169,6 @@ public class LineStringBuilder extends PointCollection { for (Coordinate point : points) { writeCoordinateTo(point, out); } - out.writeBoolean(translated()); } @Override @@ -180,7 +178,6 @@ public class LineStringBuilder extends PointCollection { for (int i=0; i < size; i++) { lineStringBuilder.point(readCoordinateFrom(in)); } - lineStringBuilder.translated(in.readBoolean()); return lineStringBuilder; } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java index c0a79611dec..4703ac19b08 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java @@ -131,7 +131,7 @@ public class MultiLineStringBuilder extends ShapeBuilder { public MultiLineStringBuilder readFrom(StreamInput in) throws IOException { MultiLineStringBuilder multiLineStringBuilder = new MultiLineStringBuilder(); int size = in.readVInt(); - for (int i=0; i < size; i++) { + for (int i = 0; i < size; i++) { multiLineStringBuilder.linestring(LineStringBuilder.PROTOTYPE.readFrom(in)); } return multiLineStringBuilder; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java index f1b403d42b9..a4d236e3557 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java @@ -56,7 +56,7 @@ public class MultiPointBuilder extends PointCollection { for (Coordinate coord : points) { shapes.add(SPATIAL_CONTEXT.makePoint(coord.x, coord.y)); } - XShapeCollection multiPoints = new XShapeCollection<>(shapes, SPATIAL_CONTEXT); + XShapeCollection multiPoints = new XShapeCollection<>(shapes, SPATIAL_CONTEXT); multiPoints.setPointsOnly(true); return multiPoints; } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index 80591dbb3eb..fefbcb348ca 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -541,6 +541,150 @@ public class PolygonBuilder extends ShapeBuilder { return points.length-1; } + /** + * Create a connected list of a list of coordinates + * + * @param points + * array of point + * @param offset + * index of the first point + * @param length + * number of points + * @return Array of edges + */ + private static Edge[] ring(int component, boolean direction, boolean handedness, LineStringBuilder shell, + Coordinate[] points, int offset, Edge[] edges, int toffset, int length, final AtomicBoolean translated) { + // calculate the direction of the points: + // find the point a the top of the set and check its + // neighbors orientation. So direction is equivalent + // to clockwise/counterclockwise + final int top = top(points, offset, length); + final int prev = (offset + ((top + length - 1) % length)); + final int next = (offset + ((top + 1) % length)); + boolean orientation = points[offset + prev].x > points[offset + next].x; + + // OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness) + // since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards + // thus if orientation is computed as cw, the logic will translate points across dateline + // and convert to a right handed system + + // compute the bounding box and calculate range + double[] range = range(points, offset, length); + final double rng = range[1] - range[0]; + // translate the points if the following is true + // 1. shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres + // (translation would result in a collapsed poly) + // 2. the shell of the candidate hole has been translated (to preserve the coordinate system) + boolean incorrectOrientation = component == 0 && handedness != orientation; + if ( (incorrectOrientation && (rng > DATELINE && rng != 2*DATELINE)) || (translated.get() && component != 0)) { + translate(points); + // flip the translation bit if the shell is being translated + if (component == 0) { + translated.set(true); + } + // correct the orientation post translation (ccw for shell, cw for holes) + if (component == 0 || (component != 0 && handedness == orientation)) { + orientation = !orientation; + } + } + return concat(component, direction ^ orientation, points, offset, edges, toffset, length); + } + + private static final int top(Coordinate[] points, int offset, int length) { + int top = 0; // we start at 1 here since top points to 0 + for (int i = 1; i < length; i++) { + if (points[offset + i].y < points[offset + top].y) { + top = i; + } else if (points[offset + i].y == points[offset + top].y) { + if (points[offset + i].x < points[offset + top].x) { + top = i; + } + } + } + return top; + } + + private static final double[] range(Coordinate[] points, int offset, int length) { + double minX = points[0].x; + double maxX = points[0].x; + double minY = points[0].y; + double maxY = points[0].y; + // compute the bounding coordinates (@todo: cleanup brute force) + for (int i = 1; i < length; ++i) { + if (points[offset + i].x < minX) { + minX = points[offset + i].x; + } + if (points[offset + i].x > maxX) { + maxX = points[offset + i].x; + } + if (points[offset + i].y < minY) { + minY = points[offset + i].y; + } + if (points[offset + i].y > maxY) { + maxY = points[offset + i].y; + } + } + return new double[] {minX, maxX, minY, maxY}; + } + + /** + * Concatenate a set of points to a polygon + * + * @param component + * component id of the polygon + * @param direction + * direction of the ring + * @param points + * list of points to concatenate + * @param pointOffset + * index of the first point + * @param edges + * Array of edges to write the result to + * @param edgeOffset + * index of the first edge in the result + * @param length + * number of points to use + * @return the edges creates + */ + private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset, + int length) { + assert edges.length >= length+edgeOffset; + assert points.length >= length+pointOffset; + edges[edgeOffset] = new Edge(points[pointOffset], null); + for (int i = 1; i < length; i++) { + if (direction) { + edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]); + edges[edgeOffset + i].component = component; + } else if(!edges[edgeOffset + i - 1].coordinate.equals(points[pointOffset + i])) { + edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null); + edges[edgeOffset + i - 1].component = component; + } else { + throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + points[pointOffset + i]); + } + } + + if (direction) { + edges[edgeOffset].setNext(edges[edgeOffset + length - 1]); + edges[edgeOffset].component = component; + } else { + edges[edgeOffset + length - 1].setNext(edges[edgeOffset]); + edges[edgeOffset + length - 1].component = component; + } + + return edges; + } + + /** + * Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range + */ + private static void translate(Coordinate[] points) { + for (Coordinate c : points) { + if (c.x < 0) { + c.x += 2*DATELINE; + } + } + } + @Override public int hashCode() { return Objects.hash(shell, holes, orientation); diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java index a7fbfb8e380..10a5070f3f8 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java @@ -69,7 +69,7 @@ public abstract class AbstractShapeBuilderTestCase exte /** * mutate the given shape so the returned shape is different */ - protected abstract SB mutate(SB original) throws IOException; + protected abstract SB createMutation(SB original) throws IOException; /** * Test that creates new shape from a random test shape and checks both for equality @@ -95,10 +95,11 @@ public abstract class AbstractShapeBuilderTestCase exte /** * Test serialization and deserialization of the test shape. */ + @SuppressWarnings("unchecked") public void testSerialization() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { SB testShape = createTestShapeBuilder(); - SB deserializedShape = copyShape(testShape); + SB deserializedShape = (SB) copyShape(testShape); assertEquals(testShape, deserializedShape); assertEquals(testShape.hashCode(), deserializedShape.hashCode()); assertNotSame(testShape, deserializedShape); @@ -108,6 +109,7 @@ public abstract class AbstractShapeBuilderTestCase exte /** * Test equality and hashCode properties */ + @SuppressWarnings("unchecked") public void testEqualsAndHashcode() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { SB firstShape = createTestShapeBuilder(); @@ -116,15 +118,15 @@ public abstract class AbstractShapeBuilderTestCase exte assertTrue("shape is not equal to self", firstShape.equals(firstShape)); assertThat("same shape's hashcode returns different values if called multiple times", firstShape.hashCode(), equalTo(firstShape.hashCode())); - assertThat("different shapes should not be equal", mutate(firstShape), not(equalTo(firstShape))); + assertThat("different shapes should not be equal", createMutation(firstShape), not(equalTo(firstShape))); - SB secondShape = copyShape(firstShape); + SB secondShape = (SB) copyShape(firstShape); assertTrue("shape is not equal to self", secondShape.equals(secondShape)); assertTrue("shape is not equal to its copy", firstShape.equals(secondShape)); assertTrue("equals is not symmetric", secondShape.equals(firstShape)); assertThat("shape copy's hashcode is different from original hashcode", secondShape.hashCode(), equalTo(firstShape.hashCode())); - SB thirdShape = copyShape(secondShape); + SB thirdShape = (SB) copyShape(secondShape); assertTrue("shape is not equal to self", thirdShape.equals(thirdShape)); assertTrue("shape is not equal to its copy", secondShape.equals(thirdShape)); assertThat("shape copy's hashcode is different from original hashcode", secondShape.hashCode(), equalTo(thirdShape.hashCode())); @@ -135,14 +137,12 @@ public abstract class AbstractShapeBuilderTestCase exte } } - protected SB copyShape(SB original) throws IOException { + static ShapeBuilder copyShape(ShapeBuilder original) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { original.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) { ShapeBuilder prototype = (ShapeBuilder) namedWriteableRegistry.getPrototype(ShapeBuilder.class, original.getWriteableName()); - @SuppressWarnings("unchecked") - SB copy = (SB) prototype.readFrom(in); - return copy; + return prototype.readFrom(in); } } } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java index 17ed5e19876..1db9da428ad 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java @@ -27,20 +27,18 @@ import java.io.IOException; public class CircleBuilderTests extends AbstractShapeBuilderTestCase { - final static CircleBuilderTests PROTOTYPE = new CircleBuilderTests(); - @Override protected CircleBuilder createTestShapeBuilder() { - double centerX = randomDoubleBetween(-180, 180, false); - double centerY = randomDoubleBetween(-90, 90, false); - return new CircleBuilder() - .center(new Coordinate(centerX, centerY)) - .radius(randomDoubleBetween(0.1, 10.0, false), randomFrom(DistanceUnit.values())); + return createRandomShape(); } @Override - protected CircleBuilder mutate(CircleBuilder original) throws IOException { - CircleBuilder mutation = copyShape(original); + protected CircleBuilder createMutation(CircleBuilder original) throws IOException { + return mutate(original); + } + + static CircleBuilder mutate(CircleBuilder original) throws IOException { + CircleBuilder mutation = (CircleBuilder) copyShape(original); double radius = original.radius(); DistanceUnit unit = original.unit(); @@ -57,4 +55,12 @@ public class CircleBuilderTests extends AbstractShapeBuilderTestCase { - static final EnvelopeBuilderTests PROTOTYPE = new EnvelopeBuilderTests(); - @Override protected EnvelopeBuilder createTestShapeBuilder() { - EnvelopeBuilder envelope = new EnvelopeBuilder(randomFrom(Orientation.values())); - Rectangle box = RandomShapeGenerator.xRandomRectangle(getRandom(), RandomShapeGenerator.xRandomPoint(getRandom())); - envelope.topLeft(box.getMinX(), box.getMaxY()) - .bottomRight(box.getMaxX(), box.getMinY()); - return envelope; + return createRandomShape(); } @Override - protected EnvelopeBuilder mutate(EnvelopeBuilder original) throws IOException { - EnvelopeBuilder mutation = copyShape(original); + protected EnvelopeBuilder createMutation(EnvelopeBuilder original) throws IOException { + return mutate(original); + } + + static EnvelopeBuilder mutate(EnvelopeBuilder original) throws IOException { + EnvelopeBuilder mutation = (EnvelopeBuilder) copyShape(original); if (randomBoolean()) { // toggle orientation mutation.orientation = (original.orientation == Orientation.LEFT ? Orientation.RIGHT : Orientation.LEFT); @@ -65,4 +63,12 @@ public class EnvelopeBuilderTests extends AbstractShapeBuilderTestCase { - static final LineStringBuilderTests PROTOTYPE = new LineStringBuilderTests(); - @Override protected LineStringBuilder createTestShapeBuilder() { - LineStringBuilder lsb = (LineStringBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.LINESTRING); - if (randomBoolean()) { - lsb.close(); - } - return lsb; + return createRandomShape(); } @Override - protected LineStringBuilder mutate(LineStringBuilder original) throws IOException { - LineStringBuilder mutation = copyShape(original); + protected LineStringBuilder createMutation(LineStringBuilder original) throws IOException { + return mutate(original); + } + + static LineStringBuilder mutate(LineStringBuilder original) throws IOException { + LineStringBuilder mutation = (LineStringBuilder) copyShape(original); Coordinate[] coordinates = original.coordinates(false); Coordinate coordinate = randomFrom(coordinates); if (randomBoolean()) { @@ -59,4 +57,12 @@ public class LineStringBuilderTests extends AbstractShapeBuilderTestCase { - static final MultiLineStringBuilderTests PROTOTYPE = new MultiLineStringBuilderTests(); - @Override protected MultiLineStringBuilder createTestShapeBuilder() { - return (MultiLineStringBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.MULTILINESTRING); + return createRandomShape(); } @Override - protected MultiLineStringBuilder mutate(MultiLineStringBuilder original) throws IOException { - MultiLineStringBuilder mutation = copyShape(original); + protected MultiLineStringBuilder createMutation(MultiLineStringBuilder original) throws IOException { + return mutate(original); + } + + static MultiLineStringBuilder mutate(MultiLineStringBuilder original) throws IOException { + MultiLineStringBuilder mutation = (MultiLineStringBuilder) copyShape(original); Coordinate[][] coordinates = mutation.coordinates(); int lineToChange = randomInt(coordinates.length - 1); for (int i = 0; i < coordinates.length; i++) { @@ -61,4 +63,8 @@ public class MultiLineStringBuilderTests extends AbstractShapeBuilderTestCase { - static final MultiPointBuilderTests PROTOTYPE = new MultiPointBuilderTests(); - @Override protected MultiPointBuilder createTestShapeBuilder() { - return (MultiPointBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.MULTIPOINT); + return createRandomShape(); } @Override - protected MultiPointBuilder mutate(MultiPointBuilder original) throws IOException { - MultiPointBuilder mutation = copyShape(original); + protected MultiPointBuilder createMutation(MultiPointBuilder original) throws IOException { + return mutate(original); + } + + static MultiPointBuilder mutate(MultiPointBuilder original) throws IOException { + MultiPointBuilder mutation = (MultiPointBuilder) copyShape(original); Coordinate[] coordinates = original.coordinates(false); Coordinate coordinate = randomFrom(coordinates); if (randomBoolean()) { @@ -55,4 +57,8 @@ public class MultiPointBuilderTests extends AbstractShapeBuilderTestCase { - static final MultiPolygonBuilderTests PROTOTYPE = new MultiPolygonBuilderTests(); - @Override protected MultiPolygonBuilder createTestShapeBuilder() { - MultiPolygonBuilder mpb = new MultiPolygonBuilder(randomFrom(Orientation.values())); - int polys = randomIntBetween(1, 10); - for (int i = 0; i < polys; i++) { - PolygonBuilder pgb = (PolygonBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POLYGON); - pgb.orientation = mpb.orientation; - // NORELEASE translated might have been changed by createShape, but won't survive xContent->Parse roundtrip - pgb.shell().translated(false); - mpb.polygon(pgb); - } - return mpb; + return createRandomShape(); } @Override - protected MultiPolygonBuilder mutate(MultiPolygonBuilder original) throws IOException { - MultiPolygonBuilder mutation = copyShape(original); + protected MultiPolygonBuilder createMutation(MultiPolygonBuilder original) throws IOException { + return mutate(original); + } + + static MultiPolygonBuilder mutate(MultiPolygonBuilder original) throws IOException { + MultiPolygonBuilder mutation = (MultiPolygonBuilder) copyShape(original); if (randomBoolean()) { // toggle orientation mutation.orientation = (original.orientation == Orientation.LEFT ? Orientation.RIGHT : Orientation.LEFT); @@ -55,4 +48,15 @@ public class MultiPolygonBuilderTests extends AbstractShapeBuilderTestCase { +import java.io.IOException; - final static PointBuilderTests PROTOTYPE = new PointBuilderTests(); +public class PointBuilderTests extends AbstractShapeBuilderTestCase { @Override protected PointBuilder createTestShapeBuilder() { - return (PointBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POINT); + return createRandomShape(); } @Override - protected PointBuilder mutate(PointBuilder original) { + protected PointBuilder createMutation(PointBuilder original) throws IOException { + return mutate(original); + } + + static PointBuilder mutate(PointBuilder original) { return new PointBuilder().coordinate(new Coordinate(original.longitude() / 2, original.latitude() / 2)); } + + static PointBuilder createRandomShape() { + return (PointBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POINT); + } + + } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java index b7cbf85a601..69457419727 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java @@ -29,20 +29,18 @@ import java.io.IOException; public class PolygonBuilderTests extends AbstractShapeBuilderTestCase { - static final PolygonBuilderTests PROTOTYPE = new PolygonBuilderTests(); - @Override protected PolygonBuilder createTestShapeBuilder() { - PolygonBuilder pgb = (PolygonBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POLYGON); - pgb.orientation = randomFrom(Orientation.values()); - // NORELEASE translated might have been changed by createShape, but won't survive xContent->Parse roundtrip - pgb.shell().translated(false); - return pgb; + return createRandomShape(); } @Override - protected PolygonBuilder mutate(PolygonBuilder original) throws IOException { - PolygonBuilder mutation = copyShape(original); + protected PolygonBuilder createMutation(PolygonBuilder original) throws IOException { + return mutate(original); + } + + static PolygonBuilder mutate(PolygonBuilder original) throws IOException { + PolygonBuilder mutation = (PolygonBuilder) copyShape(original); return mutatePolygonBuilder(mutation); } @@ -75,4 +73,10 @@ public class PolygonBuilderTests extends AbstractShapeBuilderTestCase Date: Fri, 4 Dec 2015 15:59:28 +0100 Subject: [PATCH 137/167] Only Polygon and MultiPolygon need `orientation` property Removing the `orientation` field from ShapeBuilder, only leaving it in PolygonBuilder and MultiPolygonBuilder which are the only places where it is actually used and parsed at the moment. --- .../common/geo/builders/EnvelopeBuilder.java | 30 +++++------- .../builders/GeometryCollectionBuilder.java | 17 ++----- .../geo/builders/MultiPolygonBuilder.java | 21 ++++++-- .../common/geo/builders/PolygonBuilder.java | 10 +++- .../common/geo/builders/ShapeBuilder.java | 49 ++++++++----------- .../common/geo/builders/ShapeBuilders.java | 18 ------- .../index/mapper/geo/GeoShapeFieldMapper.java | 2 +- .../geo/builders/EnvelopeBuilderTests.java | 36 ++++++-------- .../GeometryCollectionBuilderTests.java | 8 --- .../builders/MultiPolygonBuilderTests.java | 20 +++++--- .../geo/builders/PolygonBuilderTests.java | 20 ++++++-- .../query/GeoShapeQueryBuilderTests.java | 1 - 12 files changed, 109 insertions(+), 123 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index 82fd3275116..afcf8990513 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Locale; import java.util.Objects; public class EnvelopeBuilder extends ShapeBuilder { @@ -35,16 +34,8 @@ public class EnvelopeBuilder extends ShapeBuilder { public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(); - protected Coordinate topLeft; - protected Coordinate bottomRight; - - public EnvelopeBuilder() { - this(Orientation.RIGHT); - } - - public EnvelopeBuilder(Orientation orientation) { - super(orientation); - } + private Coordinate topLeft; + private Coordinate bottomRight; public EnvelopeBuilder topLeft(Coordinate topLeft) { this.topLeft = topLeft; @@ -55,6 +46,10 @@ public class EnvelopeBuilder extends ShapeBuilder { return topLeft(coordinate(longitude, latitude)); } + public Coordinate topLeft() { + return this.topLeft; + } + public EnvelopeBuilder bottomRight(Coordinate bottomRight) { this.bottomRight = bottomRight; return this; @@ -64,11 +59,14 @@ public class EnvelopeBuilder extends ShapeBuilder { return bottomRight(coordinate(longitude, latitude)); } + public Coordinate bottomRight() { + return this.bottomRight; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(FIELD_TYPE, TYPE.shapeName()); - builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT)); builder.startArray(FIELD_COORDINATES); toXContent(builder, topLeft); toXContent(builder, bottomRight); @@ -88,7 +86,7 @@ public class EnvelopeBuilder extends ShapeBuilder { @Override public int hashCode() { - return Objects.hash(orientation, topLeft, bottomRight); + return Objects.hash(topLeft, bottomRight); } @Override @@ -100,21 +98,19 @@ public class EnvelopeBuilder extends ShapeBuilder { return false; } EnvelopeBuilder other = (EnvelopeBuilder) obj; - return Objects.equals(orientation, other.orientation) && - Objects.equals(topLeft, other.topLeft) && + return Objects.equals(topLeft, other.topLeft) && Objects.equals(bottomRight, other.bottomRight); } @Override public void writeTo(StreamOutput out) throws IOException { - orientation.writeTo(out); writeCoordinateTo(topLeft, out); writeCoordinateTo(bottomRight, out); } @Override public EnvelopeBuilder readFrom(StreamInput in) throws IOException { - return new EnvelopeBuilder(Orientation.readFrom(in)) + return new EnvelopeBuilder() .topLeft(readCoordinateFrom(in)) .bottomRight(readCoordinateFrom(in)); } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java index e4db15998fd..067cd014c0f 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Locale; import java.util.Objects; public class GeometryCollectionBuilder extends ShapeBuilder { @@ -41,14 +40,6 @@ public class GeometryCollectionBuilder extends ShapeBuilder { protected final ArrayList shapes = new ArrayList<>(); - public GeometryCollectionBuilder() { - this(Orientation.RIGHT); - } - - public GeometryCollectionBuilder(Orientation orientation) { - super(orientation); - } - public GeometryCollectionBuilder shape(ShapeBuilder shape) { this.shapes.add(shape); return this; @@ -110,7 +101,6 @@ public class GeometryCollectionBuilder extends ShapeBuilder { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(FIELD_TYPE, TYPE.shapeName()); - builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT)); builder.startArray(FIELD_GEOMETRIES); for (ShapeBuilder shape : shapes) { shape.toXContent(builder, params); @@ -142,7 +132,7 @@ public class GeometryCollectionBuilder extends ShapeBuilder { @Override public int hashCode() { - return Objects.hash(orientation, shapes); + return Objects.hash(shapes); } @Override @@ -154,12 +144,11 @@ public class GeometryCollectionBuilder extends ShapeBuilder { return false; } GeometryCollectionBuilder other = (GeometryCollectionBuilder) obj; - return Objects.equals(orientation, other.orientation) && Objects.equals(shapes, other.shapes); + return Objects.equals(shapes, other.shapes); } @Override public void writeTo(StreamOutput out) throws IOException { - orientation.writeTo(out); out.writeVInt(shapes.size()); for (ShapeBuilder shape : shapes) { out.writeShape(shape); @@ -168,7 +157,7 @@ public class GeometryCollectionBuilder extends ShapeBuilder { @Override public GeometryCollectionBuilder readFrom(StreamInput in) throws IOException { - GeometryCollectionBuilder geometryCollectionBuilder = new GeometryCollectionBuilder(Orientation.readFrom(in)); + GeometryCollectionBuilder geometryCollectionBuilder = new GeometryCollectionBuilder(); int shapes = in.readVInt(); for (int i = 0; i < shapes; i++) { geometryCollectionBuilder.shape(in.readShape()); diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java index a82c0fad9bd..2f9d595c9cb 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java @@ -40,16 +40,31 @@ public class MultiPolygonBuilder extends ShapeBuilder { private final ArrayList polygons = new ArrayList<>(); + private Orientation orientation = Orientation.RIGHT; + public MultiPolygonBuilder() { this(Orientation.RIGHT); } public MultiPolygonBuilder(Orientation orientation) { - super(orientation); + this.orientation = orientation; } + public Orientation orientation() { + return this.orientation; + } + + /** + * Add a shallow copy of the polygon to the multipolygon. This will apply the orientation of the + * {@link MultiPolygonBuilder} to the polygon if polygon has different orientation. + */ public MultiPolygonBuilder polygon(PolygonBuilder polygon) { - this.polygons.add(polygon); + PolygonBuilder pb = new PolygonBuilder(this.orientation); + pb.points(polygon.shell().coordinates(false)); + for (LineStringBuilder hole : polygon.holes()) { + pb.hole(hole); + } + this.polygons.add(pb); return this; } @@ -118,7 +133,7 @@ public class MultiPolygonBuilder extends ShapeBuilder { } MultiPolygonBuilder other = (MultiPolygonBuilder) obj; return Objects.equals(polygons, other.polygons) && - Objects.equals(orientation, other.orientation); + Objects.equals(orientation, other.orientation); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index fefbcb348ca..03ff6a6b892 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -57,6 +57,8 @@ public class PolygonBuilder extends ShapeBuilder { private static final Coordinate[][] EMPTY = new Coordinate[0][]; + private Orientation orientation = Orientation.RIGHT; + // line string defining the shell of the polygon private LineStringBuilder shell; @@ -64,7 +66,7 @@ public class PolygonBuilder extends ShapeBuilder { private final ArrayList holes = new ArrayList<>(); public PolygonBuilder() { - this(new ArrayList(), Orientation.RIGHT); + this(Orientation.RIGHT); } public PolygonBuilder(Orientation orientation) { @@ -72,10 +74,14 @@ public class PolygonBuilder extends ShapeBuilder { } public PolygonBuilder(ArrayList points, Orientation orientation) { - super(orientation); + this.orientation = orientation; this.shell = new LineStringBuilder().points(points); } + public Orientation orientation() { + return this.orientation; + } + public PolygonBuilder point(double longitude, double latitude) { shell.point(longitude, latitude); return this; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index 1ab568d4be1..fcd8177ac6c 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -77,16 +77,10 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri /** @see com.spatial4j.core.shape.jts.JtsGeometry#index() */ protected final boolean autoIndexJtsGeometry = true;//may want to turn off once SpatialStrategy impls do it. - protected Orientation orientation = Orientation.RIGHT; - protected ShapeBuilder() { } - protected ShapeBuilder(Orientation orientation) { - this.orientation = orientation; - } - protected static Coordinate coordinate(double longitude, double latitude) { return new Coordinate(longitude, latitude); } @@ -186,22 +180,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri return new Coordinate(in.readDouble(), in.readDouble()); } - public static Orientation orientationFromString(String orientation) { - orientation = orientation.toLowerCase(Locale.ROOT); - switch (orientation) { - case "right": - case "counterclockwise": - case "ccw": - return Orientation.RIGHT; - case "left": - case "clockwise": - case "cw": - return Orientation.LEFT; - default: - throw new IllegalArgumentException("Unknown orientation [" + orientation + "]"); - } - } - protected static Coordinate shift(Coordinate coordinate, double dateline) { if (dateline == 0) { return coordinate; @@ -416,6 +394,22 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri public static Orientation readFrom (StreamInput in) throws IOException { return in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT; } + + public static Orientation fromString(String orientation) { + orientation = orientation.toLowerCase(Locale.ROOT); + switch (orientation) { + case "right": + case "counterclockwise": + case "ccw": + return Orientation.RIGHT; + case "left": + case "clockwise": + case "cw": + return Orientation.LEFT; + default: + throw new IllegalArgumentException("Unknown orientation [" + orientation + "]"); + } + } } public static final String FIELD_TYPE = "type"; @@ -506,7 +500,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri radius = Distance.parseDistance(parser.text()); } else if (FIELD_ORIENTATION.equals(fieldName)) { parser.nextToken(); - requestedOrientation = orientationFromString(parser.text()); + requestedOrientation = Orientation.fromString(parser.text()); } else { parser.nextToken(); parser.skipChildren(); @@ -532,7 +526,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri case POLYGON: return parsePolygon(node, requestedOrientation, coerce); case MULTIPOLYGON: return parseMultiPolygon(node, requestedOrientation, coerce); case CIRCLE: return parseCircle(node, radius); - case ENVELOPE: return parseEnvelope(node, requestedOrientation); + case ENVELOPE: return parseEnvelope(node); case GEOMETRYCOLLECTION: return geometryCollections; default: throw new ElasticsearchParseException("shape type [{}] not included", shapeType); @@ -558,7 +552,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri return ShapeBuilders.newCircleBuilder().center(coordinates.coordinate).radius(radius); } - protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates, final Orientation orientation) { + protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates) { // validate the coordinate array for envelope type if (coordinates.children.size() != 2) { throw new ElasticsearchParseException("invalid number of points [{}] provided for " + @@ -572,7 +566,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri uL = new Coordinate(Math.min(uL.x, lR.x), Math.max(uL.y, lR.y)); lR = new Coordinate(Math.max(uLtmp.x, lR.x), Math.min(uLtmp.y, lR.y)); } - return ShapeBuilders.newEnvelope(orientation).topLeft(uL).bottomRight(lR); + return ShapeBuilders.newEnvelope().topLeft(uL).bottomRight(lR); } protected static void validateMultiPointNode(CoordinateNode coordinates) { @@ -692,8 +686,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri } XContentParser.Token token = parser.nextToken(); - GeometryCollectionBuilder geometryCollection = ShapeBuilders.newGeometryCollection( (mapper == null) ? Orientation.RIGHT : mapper - .fieldType().orientation()); + GeometryCollectionBuilder geometryCollection = ShapeBuilders.newGeometryCollection(); while (token != XContentParser.Token.END_ARRAY) { ShapeBuilder shapeBuilder = GeoShapeType.parse(parser); geometryCollection.shape(shapeBuilder); diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java index e294a9d6ef7..61d7a9cd07e 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java @@ -110,15 +110,6 @@ public class ShapeBuilders { return new GeometryCollectionBuilder(); } - /** - * Create a new GeometryCollection - * - * @return a new {@link GeometryCollectionBuilder} - */ - public static GeometryCollectionBuilder newGeometryCollection(ShapeBuilder.Orientation orientation) { - return new GeometryCollectionBuilder(orientation); - } - /** * create a new Circle * @@ -136,13 +127,4 @@ public class ShapeBuilders { public static EnvelopeBuilder newEnvelope() { return new EnvelopeBuilder(); } - - /** - * create a new rectangle - * - * @return a new {@link EnvelopeBuilder} - */ - public static EnvelopeBuilder newEnvelope(ShapeBuilder.Orientation orientation) { - return new EnvelopeBuilder(orientation); - } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index a99517f4003..1ba49e64d80 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -184,7 +184,7 @@ public class GeoShapeFieldMapper extends FieldMapper { builder.fieldType().setDistanceErrorPct(Double.parseDouble(fieldNode.toString())); iterator.remove(); } else if (Names.ORIENTATION.equals(fieldName)) { - builder.fieldType().setOrientation(ShapeBuilder.orientationFromString(fieldNode.toString())); + builder.fieldType().setOrientation(ShapeBuilder.Orientation.fromString(fieldNode.toString())); iterator.remove(); } else if (Names.STRATEGY.equals(fieldName)) { builder.fieldType().setStrategyName(fieldNode.toString()); diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java index 1c1e37e81aa..2015f2b0bc6 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.common.geo.builders; import com.spatial4j.core.shape.Rectangle; import com.vividsolutions.jts.geom.Coordinate; -import org.elasticsearch.common.geo.builders.ShapeBuilder.Orientation; import org.elasticsearch.test.geo.RandomShapeGenerator; import java.io.IOException; @@ -41,31 +40,26 @@ public class EnvelopeBuilderTests extends AbstractShapeBuilderTestCase 0) { int shapePosition = randomIntBetween(0, mutation.shapes.size() - 1); ShapeBuilder shapeToChange = mutation.shapes.get(shapePosition); @@ -109,7 +102,6 @@ public class GeometryCollectionBuilderTests extends AbstractShapeBuilderTestCase } else { mutation.shape(RandomShapeGenerator.createShape(getRandom())); } -// } return mutation; } } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java index fb41a2dffbb..702114a2cb8 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java @@ -38,23 +38,29 @@ public class MultiPolygonBuilderTests extends AbstractShapeBuilderTestCase 0) { + int polyToChange = randomInt(mutation.polygons().size() - 1); + mutation.polygons().set(polyToChange, PolygonBuilderTests.mutatePolygonBuilder(mutation.polygons().get(polyToChange))); + } else { + mutation.polygon((PolygonBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POLYGON)); + } } return mutation; } static MultiPolygonBuilder createRandomShape() { MultiPolygonBuilder mpb = new MultiPolygonBuilder(randomFrom(Orientation.values())); - int polys = randomIntBetween(1, 10); + int polys = randomIntBetween(0, 10); for (int i = 0; i < polys; i++) { PolygonBuilder pgb = (PolygonBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POLYGON); - pgb.orientation = mpb.orientation; mpb.polygon(pgb); } return mpb; diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java index 69457419727..ad8b3b817fe 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java @@ -46,8 +46,7 @@ public class PolygonBuilderTests extends AbstractShapeBuilderTestCase Date: Tue, 15 Dec 2015 09:46:10 +0100 Subject: [PATCH 138/167] Adding integration test checking that empty geometry collection returns zero docs --- .../geo/builders/AbstractShapeBuilderTestCase.java | 10 ++++++++-- .../elasticsearch/search/geo/GeoShapeQueryTests.java | 12 +++++++++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java index 10a5070f3f8..279e31aadd4 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java @@ -23,14 +23,20 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.xcontent.*; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; import java.io.IOException; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; public abstract class AbstractShapeBuilderTestCase extends ESTestCase { diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java index 847e03e5c44..c0cc17fc43d 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.geo; import com.spatial4j.core.shape.Rectangle; + import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.geo.ShapeRelation; @@ -47,7 +48,10 @@ import static org.elasticsearch.test.geo.RandomShapeGenerator.xRandomPoint; import static org.elasticsearch.test.geo.RandomShapeGenerator.xRandomRectangle; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.nullValue; public class GeoShapeQueryTests extends ESSingleNodeTestCase { public void testNullShape() throws Exception { @@ -396,6 +400,12 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase { .setPostFilter(filter).get(); assertSearchResponse(result); assertHitCount(result, 1); + // no shape + filter = QueryBuilders.geoShapeQuery("location", ShapeBuilders.newGeometryCollection()); + result = client().prepareSearch("test").setTypes("type").setQuery(QueryBuilders.matchAllQuery()) + .setPostFilter(filter).get(); + assertSearchResponse(result); + assertHitCount(result, 0); } public void testPointsOnly() throws Exception { From 8a02295b9a146abaf5a632fd9718aaffeb7b2f01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 17 Dec 2015 17:39:13 +0100 Subject: [PATCH 139/167] Make sure both JTS and Spatial4J are present before registering shape builders, query or mapper --- .../geo/builders/ShapeBuilderRegistry.java | 21 ++--- .../common/io/stream/StreamInput.java | 2 - .../elasticsearch/indices/IndicesModule.java | 77 +++++++++++++++++-- 3 files changed, 84 insertions(+), 16 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java index a900b6a9ee5..c66e969aa3a 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.geo.builders; +import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -29,14 +30,16 @@ public class ShapeBuilderRegistry { @Inject public ShapeBuilderRegistry(NamedWriteableRegistry namedWriteableRegistry) { - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE); + if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE); + } } } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index f8072804225..ffcb4201f4d 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -33,9 +33,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.text.Text; import org.elasticsearch.common.geo.builders.ShapeBuilder; -import org.elasticsearch.common.text.StringAndBytesText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index 3faeb6fb3df..61210bb0413 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -28,13 +28,80 @@ import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MetadataFieldMapper; -import org.elasticsearch.index.mapper.core.*; +import org.elasticsearch.index.mapper.core.BinaryFieldMapper; +import org.elasticsearch.index.mapper.core.BooleanFieldMapper; +import org.elasticsearch.index.mapper.core.ByteFieldMapper; +import org.elasticsearch.index.mapper.core.CompletionFieldMapper; +import org.elasticsearch.index.mapper.core.DateFieldMapper; +import org.elasticsearch.index.mapper.core.DoubleFieldMapper; +import org.elasticsearch.index.mapper.core.FloatFieldMapper; +import org.elasticsearch.index.mapper.core.IntegerFieldMapper; +import org.elasticsearch.index.mapper.core.LongFieldMapper; +import org.elasticsearch.index.mapper.core.ShortFieldMapper; +import org.elasticsearch.index.mapper.core.StringFieldMapper; +import org.elasticsearch.index.mapper.core.TokenCountFieldMapper; +import org.elasticsearch.index.mapper.core.TypeParsers; import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper; import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper; -import org.elasticsearch.index.mapper.internal.*; +import org.elasticsearch.index.mapper.internal.AllFieldMapper; +import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.internal.IdFieldMapper; +import org.elasticsearch.index.mapper.internal.IndexFieldMapper; +import org.elasticsearch.index.mapper.internal.ParentFieldMapper; +import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; +import org.elasticsearch.index.mapper.internal.SourceFieldMapper; +import org.elasticsearch.index.mapper.internal.TTLFieldMapper; +import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.mapper.internal.VersionFieldMapper; import org.elasticsearch.index.mapper.ip.IpFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.query.*; +import org.elasticsearch.index.query.BoolQueryParser; +import org.elasticsearch.index.query.BoostingQueryParser; +import org.elasticsearch.index.query.CommonTermsQueryParser; +import org.elasticsearch.index.query.ConstantScoreQueryParser; +import org.elasticsearch.index.query.DisMaxQueryParser; +import org.elasticsearch.index.query.ExistsQueryParser; +import org.elasticsearch.index.query.FieldMaskingSpanQueryParser; +import org.elasticsearch.index.query.FuzzyQueryParser; +import org.elasticsearch.index.query.GeoBoundingBoxQueryParser; +import org.elasticsearch.index.query.GeoDistanceQueryParser; +import org.elasticsearch.index.query.GeoDistanceRangeQueryParser; +import org.elasticsearch.index.query.GeoPolygonQueryParser; +import org.elasticsearch.index.query.GeoShapeQueryParser; +import org.elasticsearch.index.query.GeohashCellQuery; +import org.elasticsearch.index.query.HasChildQueryParser; +import org.elasticsearch.index.query.HasParentQueryParser; +import org.elasticsearch.index.query.IdsQueryParser; +import org.elasticsearch.index.query.IndicesQueryParser; +import org.elasticsearch.index.query.MatchAllQueryParser; +import org.elasticsearch.index.query.MatchNoneQueryParser; +import org.elasticsearch.index.query.MatchQueryParser; +import org.elasticsearch.index.query.MoreLikeThisQueryParser; +import org.elasticsearch.index.query.MultiMatchQueryParser; +import org.elasticsearch.index.query.NestedQueryParser; +import org.elasticsearch.index.query.PrefixQueryParser; +import org.elasticsearch.index.query.QueryParser; +import org.elasticsearch.index.query.QueryStringQueryParser; +import org.elasticsearch.index.query.RangeQueryParser; +import org.elasticsearch.index.query.RegexpQueryParser; +import org.elasticsearch.index.query.ScriptQueryParser; +import org.elasticsearch.index.query.SimpleQueryStringParser; +import org.elasticsearch.index.query.SpanContainingQueryParser; +import org.elasticsearch.index.query.SpanFirstQueryParser; +import org.elasticsearch.index.query.SpanMultiTermQueryParser; +import org.elasticsearch.index.query.SpanNearQueryParser; +import org.elasticsearch.index.query.SpanNotQueryParser; +import org.elasticsearch.index.query.SpanOrQueryParser; +import org.elasticsearch.index.query.SpanTermQueryParser; +import org.elasticsearch.index.query.SpanWithinQueryParser; +import org.elasticsearch.index.query.TemplateQueryParser; +import org.elasticsearch.index.query.TermQueryParser; +import org.elasticsearch.index.query.TermsQueryParser; +import org.elasticsearch.index.query.TypeQueryParser; +import org.elasticsearch.index.query.WildcardQueryParser; +import org.elasticsearch.index.query.WrapperQueryParser; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryParser; import org.elasticsearch.index.termvectors.TermVectorsService; import org.elasticsearch.indices.cache.query.IndicesQueryCache; @@ -123,7 +190,7 @@ public class IndicesModule extends AbstractModule { registerQueryParser(ExistsQueryParser.class); registerQueryParser(MatchNoneQueryParser.class); - if (ShapesAvailability.JTS_AVAILABLE) { + if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { registerQueryParser(GeoShapeQueryParser.class); } } @@ -147,7 +214,7 @@ public class IndicesModule extends AbstractModule { registerMapper(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser()); registerMapper(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser()); - if (ShapesAvailability.JTS_AVAILABLE) { + if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { registerMapper(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser()); } } From c31117f1e8d0b4daa05bc528b3fc30871f85302a Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 17 Dec 2015 18:38:46 +0100 Subject: [PATCH 140/167] Added BWC indices for 2.0.2 and 2.1.1 and versions 2.0.3-SNAPSHOT and 2.1.2-SNAPSHOT --- .../main/java/org/elasticsearch/Version.java | 12 ++++++++++-- .../test/resources/indices/bwc/index-2.0.2.zip | Bin 0 -> 83456 bytes .../test/resources/indices/bwc/index-2.1.1.zip | Bin 0 -> 71994 bytes .../test/resources/indices/bwc/repo-2.0.2.zip | Bin 0 -> 81369 bytes .../test/resources/indices/bwc/repo-2.1.1.zip | Bin 0 -> 70133 bytes 5 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 core/src/test/resources/indices/bwc/index-2.0.2.zip create mode 100644 core/src/test/resources/indices/bwc/index-2.1.1.zip create mode 100644 core/src/test/resources/indices/bwc/repo-2.0.2.zip create mode 100644 core/src/test/resources/indices/bwc/repo-2.1.1.zip diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index a5e2e38ca26..b8ba0a411a9 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -268,11 +268,15 @@ public class Version { public static final int V_2_0_1_ID = 2000199; public static final Version V_2_0_1 = new Version(V_2_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final int V_2_0_2_ID = 2000299; - public static final Version V_2_0_2 = new Version(V_2_0_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); + public static final Version V_2_0_2 = new Version(V_2_0_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1); + public static final int V_2_0_3_ID = 2000399; + public static final Version V_2_0_3 = new Version(V_2_0_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final int V_2_1_0_ID = 2010099; public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final int V_2_1_1_ID = 2010199; - public static final Version V_2_1_1 = new Version(V_2_1_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1); + public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1); + public static final int V_2_1_2_ID = 2010299; + public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final int V_2_2_0_ID = 2020099; public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0); public static final int V_3_0_0_ID = 3000099; @@ -293,10 +297,14 @@ public class Version { return V_3_0_0; case V_2_2_0_ID: return V_2_2_0; + case V_2_1_2_ID: + return V_2_1_2; case V_2_1_1_ID: return V_2_1_1; case V_2_1_0_ID: return V_2_1_0; + case V_2_0_3_ID: + return V_2_0_3; case V_2_0_2_ID: return V_2_0_2; case V_2_0_1_ID: diff --git a/core/src/test/resources/indices/bwc/index-2.0.2.zip b/core/src/test/resources/indices/bwc/index-2.0.2.zip new file mode 100644 index 0000000000000000000000000000000000000000..2f77405a8319782becdcd2426f70ce8c897949e4 GIT binary patch literal 83456 zcmbrkV~{4{mMvU1y1Hz2mu(x}W!tuGSC?(uwr$&b%eLRDx_!=k6W_$SF?Z(9&4`_m zKQh;fy`Q~y?gw&`;1F;i|G1iBUuxR6PHhvl~1J zD8wTu2nYn)-xuZn)6dcW{c{6XLtS$lBV#vRCOQT>rvD)v?tcbnV{2sW_}>I={TCPU z|IeTd{|0OIU%B4@FR(xUD_C7eCw(X5e?$LAQ2&#_{v*IJApbxg<}j+(`y2DtzefMx zL^rjvHPE-BWubF)`sWD#Cq&wDvXWqeXua%00o?-6GCx}j0Y#E|7|Gg7k>PsSt~s1d zU#V$`Kif~KN}Wz5gXExNvUtSRoKEM9Gzs%eU~pF+=e4%?pG~&krzV#>+P>!@Fv^)& z5=TmAoKK)^*ZJk8`eOk;e|`(c_uO2Pj$tB7Kh`hsDlAf`QyxcGNK2r(Aj}aii=}U* zWC*!r9w+SIqO$L#l#es#;xedDfOA%75kTa&{KFV8U5aZVEXS!LHkiF^{bmi{cC~nv!LB|K%<;^6<|YDt6&OWD zhE*Z-hmbH0OP;eH{RixJxq)K)*S4`Ma$=vp6aAAC!FXzn=*SFxkv`*XvX?Xl^fSt< zLBd%6JUmU~O*J>>>bzwXAEd~`IzN?(FhJ{}IzVxy4#du!q#Z&l)ZpkF0w{LXmQOH$ zftOb{-zVt*uaJMuzMucqK>k&Yx#7Pj_kWp%{|e9e--Z9DIBEZbIRCGynExkK|6O(c zt6yCIGqV4vx&MQM|E9TraQ+f3Fw_65K(D_h^8dv7PqouB{JRkU!Tt~Xmr~l$lTA9U zp%t$dKkf&YTHc0+mN>2u5zc&npNLk87U?7a1}U1>7ABfRSbz8Kn59J`E!x7w$Vru&(g4t9cn2DGEW!e3IsMWpyQm{DxXYTzo zasI7}x&2pF>>rdH%}FwW)_?yri69^X|1Xrf2F7$IM*mjU{=w|=zeh8v3py2VsOedI z$=#&AolSbCve{Jwrchf=g*mGOR^taTXTHG0p8%+pRb>!E-!N>3 z9z)-cz^J%UvSYpYpdV7TH6I_c^iSvK*Ph$&UOohtW$Dq%CDX52iY4VE*&O+M-k(7q z$iQ{~-=;D9FXoYgoT(cA)&QT5yS|Bj(rhiC z!%TEa3Fxmp;AJ^F( zxb|dkN_S3uRPPe-RVzMV_7nGae4CZS_ZefK8@7rls5Lw`1qjF01D?zQFJKQF4q0gg z>7p^`S$<2ZUH6dq9iHhC_H?8w!v<%HXJ6Z9)Xd@Uep9}_L!;m*vLVgM>DL2>*2ve> zj8=1hNw+ooa9r;M_Ima-9+|x=FGySphbt)CdZP!==(iN{Fe&B_i^psvu!HQ z4ft(2T~U}}{T*n?(z{|K)KNfdGS`F7B%sJ60Kd)Ogo>|VO{UWG6t$E7HS!3$M?UDD z{0MUA;L&V0w|)z*MWyp#LxYzy6d6)}_)wh6RpH8*uq?r_5p!Fnah6jjyg8br(Yc;g z23{j1bj2|c|0diG3mP9ScxUcRu#i3Qv3=9O0Rufj=gvC1fK99jIQjuN?kqhz}u55WuPgtKLWei#QVMT&W zXDRZSmf!1_YbG~(&1%@n0<$x}*LMBsw9W$?bGP2xTw4200r?!MuV_rCSrYo4MMBt) z>RD{7N#kbxao%CS>rK4r*(2~iJ%+0z2|&^(YqvjR?jwc%yWDdRsdZ|}gxNuo zYxES~*~!~0h2&re166r}L-ToG+-W1GCm{NZjJ;i%zdJ$W9T)w!U4@=tf66UQH2;U} z_sMP5D$JaPb$gE@^@Ol#k(`2p0l;Ich#e+U>nM{LwQ*Y#L6zHW-%|CiM1$m4*?y?4 z00c67YM0!##Eu^DUaHxy1@(MD|5+-w?d%^2B}-+)2$XTOcVrH)QtROAeoAdf@>}7(rHP zaPzas0B2fm);(}8DwEwVZ6cMg;m9<`GC%1;1NFS^RHVyamwL{*0UOP&2EGI1VbqZagjDp`mea_4Q!3V^^yp!{11Y%cF zJ0eEQqu{FxTGGvSmFi>JK2An~@$J`bhkMQFKTYCj=2Gt%-ZhYk$qZjwE>;6sv;{|K zRt`ur$3i`BsgNDFSrR#=u6_i21=jV^91dp~*P(Nt>Ak?Lknb%K?8OGyo&*jdE0fM1 z(y{JY6~T0xMT>20HS)tIQx1XS2UK&z`Ts%4E z!SQSQMR!Ouev+lX;-KP>94taLd<|}1;L-KiY_zhX)^GTYdT(J&+fwDH>d^s`EoU{y z3(yc6Fj?4N`4Xy$4}PS%AF|SV*}=N<|eaRAMvGq#P;vV`UVXqR^ukl?QHA_2&Hru z9=z?`YAhzMS8~BV^Kq_MGKT2OS-LgHR?u&c^i-XK`f3K{z1@OdoTuP-On{Z!3)1yY zg2(X1w1iiOPw!;D{VI9+XS280Om#H_HB}Y&sTWFC{OzjxO&d|wc53p+k%aZk&+)GM zc6LD#@Fg#%jMoxJPo))UcuZEu=CO~KtT&(TD6K)P5F!)<$wO0YRb+lTzk5ANE2 zPjV_V9@R;7=}sGNn&awx`t`M_xnb~UhUgE>c0ez`c+#;>a5+7<-27Pkt3e&p*`=9_ zWL!JIdpbwY%x0P?aSGh@MLPlGPit z>lAsYl;(rd&P^vPGlr694U}n?_8O2&MorHB;E}(Q3fqc6W(5BT@DM}b?){Mr%6I-X ztS4A0a(+el(`mdEO^ox<@hg&(UhY5TwP*m(%G5~N>UWL;<#;O zNb!~_Mm9iJTgJuY1}p(u_!~AKmQZ{4p3fZu^k-oNqX(wrz7QTt6_vjk0V=8zJCBkz z`E8j*RWRm#zo|PoKYP3)`YVmqg2c&X7AGAu_3zh(T*U~c|dD8aztJdh^cGNEj^U<}$L zmdXzhS*n@<5(4>1*V%k&^l!B-Y^khE9uH`Q5_x)uubKWiooFI3M_A$9$G7px{p4+G z<$+PEes<4`c*m4{sWugYVDQX+JDM&G9ZgpZi z8&Jt1r2%06VF8o{8aX2J^)JkR&+v|zkZyWX8_$k2=?P- zI7EeD?;_7VxT1Xyp(}0wPl^67`jKOIzPU zCs+-8t&V2S?u4=?ns8e3mR#qmV9SBLk3XDjrfiH1cl{%%o4o|~01^{!DXbBo%H8l; zygj(_t|;t2H7`T@#jzDwEw(CKFYxKQG)Mif$P)vjj)9nuE}*=66Y^@Ij(CohBlL7Z zLN)nAf$;9U-BOJMGJ683Ut3T8vZqmCXPtHjV88aTT2z_tEvnRk>p?;G$0o{^S2nx- z@oz+0V?;t^8VADCqx8#ZrDjJdd+fX_O}~kTYMl}Wl=#cdnlmAse5(LrO55v$g7ZQJ zg6QZwpr_=#Yz@fZvON<|a05XzsoSKo@~Nb>_Gz=aa~s4VgdQJgq6oh|MBE*7`~(Li zI}x68i~xS#EPKF397Hbm^0$gNx8TZUV)c79-kjX#fj<^5eZTc{qLPInYkGZpLWPI% z_6s%C^(U0U+6~YbbQeN}c{+FydB1<^pj`JqzB>~lS*+Hsu9)fA`Bueg4;q6D;2@Ph zO>Y*i;0Cd!AtyXm?uW3)q-gG8+P{Dm#c7RxcvWg7u%%lfF%z&IK)cley4My{KIN#S={#c^1d44=6h$q~?41hcCxSE0CM zkvTbEGK|g2>3y%t?xdRo323+KD{h1z_`pFDH$8yq$J@3NU*&|$G)t$MTA68eRJg66 zfzE~hAM5!^{{C;e)o~uN>AL_peL2!xxN+|g(oX~I?lq)&aoi>qrloI}I_#=@%};{M zuKtQF1B}^&k{{o2PH4%Huvm`G1z1wU2ih|UmfYYa?!aCGw3{(=O0gVoIVd*nF8u9x zxU2I4G%$-zA5xhUTNg&c-4|R{FyW5e1#u~$F!~eh0*L3hNA!y({j9I-8QdjUPYH7M zH&_9sT-zi=im}&O&M?umMN?3L8)?LSCr$b8Du2{(L>4KM>C9@fmUsMU<8esESYtd%&tTbm$KFWVMERKB@Us0_;e zdkbRTLzV-pw=YN~xGoLfn8AJRMiKH0oga?p_FjLwBPtp423U+ETP088T8K(ZQF8T?BJg411lEkY>`7aHqKF%T9@rt+D_QcULt`RziEH5 z;P34cxP80(Hg*0@5qjWPn3*&7tl(~{co0w$J1dvn7 z4KcV4sfR!>It5ZU)(7Ch_#GR_^k)BXQU+c?nqu3ucg@Sp`!pSGf*5#)w(YruBbSVn zq)fX86mNzJ8`&xieZZB5gu6j4P7UQfQ%`v{kk!n#I`#lWp(`+Mt_6i7L)Q3i9bc+?({|Q+Cs<_WvozqBJ8xdI)j$tGL|}fU+_Hg;KtMiFkhLA$2&Epg zsUgvww`(7<8?t2ivxLn0Zr2n?f5ZU}*fb2IckZ~QZ6%2&Us-|A9x{7F*d_Wx+2;1+ zZi4VIMOPDZME81${Z{Hr1Rhg`_&i^Tb{yivuK{1#Civ1 z2lwi9i?$Hct9_TLLpR(Cdt5UU06utNtqszu!=Bgbo)ZiO&W=~k#?W2~s<~}Vg>+zy zv$7{D8V63|266Ru&1h5Z6O^JwCf9q|t^hPwQdGeF z5r>|I$p79?(fS2AqrBzDKXHvGb{pRzRl_087|r@|ppI~2W^Bxmqc|6HX(2wA(-d#b z&>uzmHZno`5~RasxtS;|C8;XywPD{-;rG-hOWO+nE!v2c+2zby{FIsDNmI+_dec(S zCHe#tpr+XR=wVO(gYknoQqPV&d)HzrN~Y}9Toi^g9G;FTGKMTt)SRnkB>7wSW#BYO zXJQNs|E1_XubiPl?)IpR>W^PF=vm9hhrXq0D}PL&ytgg;0Fj_|gzS^hd6j|CdAj@s zsA>)a74PzUuw{N^GOC+;zO53^sV99gEOi?KlzZnyV4mM>x60eg{F((tD_U2A{4x}i zOzz0F`H-<&D{?l-(`{~8mt?zxn)y+f7?l}66<_49A;dpXgy}xw8T<<=-UQ*zZd9jM zef#Yab8Qg#WEj!g=bI`E&U}EJBgrYJR^QH6d!^-)1ra@FOaC;yMSd*~$D|6w9e@sP zHjKpZi|r2~Q|P>qR-q6WPtaniY=B)nhHGZpPKJZ?8IG8oRtMJRF;Ho6BRyYNH}!i; zG2tC0;Mp|Az7KegbO}(CKE9CdV?wI*v?r0~3-uFdEpI z_Dg?XQgEYBIlqFyJmU?x>3uV($Eog@REBbQy$?QFJOmjYAQJof87X`EaohEAM?%rv z#5?H|y@CoKv*bQ006()l6mEUi2F>4b&)a!XWhacJd@h$<>Q{C{+CI)L#jh+lRAa4q z2iHQLw!Z%zh~|11Y%wPLld7wmpmG7(PNQJ}Hi=gFTqQ}asoUrWNwKWmNh7~?H%_5w zf`{Wu;%=kOC6aNh#S)7_MH@9tPjz;O2^;82JNkZTWvUBUAC~mX-uyK(a3W~u3zPrn zTF>;$m~TM|#}_oOD5Ree9jQv%K<3u!6XRsO=A=F%7T?)>^^2CuJ0C6uhzg5&$`3CL zAJB|uKeY{0&P0r%R2W&Y1p{$D1&|&b$V&F;W@yk8QncjscyLT$CP0QWIMwXT&f$;n zhPuwMfL=Vjod^^^7xjq&qdpi-S%wbCO1p4gK2Iw2fHzgA`CaqGivo~_a8g$BEQ^tk z9r^WQ0WFs3*OcV&i`;D5%5zT-%>OIDruRU-HZ(=Zth>@Uvqu zi5R5pJIM5aCo{1=%GA7z2pqxh7vHjTITWf47o&%=rbE*&@ya|v#J3dXQ6}|SS@wT@ zzZ^X8bNRtK$lwM^AIddWp}`#r3p%z{=>hA0Q9^Oa!<&;GS84{v^42;;y`;ylWxyi8 z$qB(l9-ViX9Wc|)T*#c84*cfKuqA#jiPYB>xC_>)%m8NG7QfWsx~i5ae&YnZz@E11 zMDq(MT}mEF-`v)*j}S-X=f@o&>Owg(Bb-9??}_`Z(@(c432k>>(OAcE)mglu7&uK& za|}CRM(qPMuLm;=^`c}y!-$6A7Eymr4crqe_T3UeY;Rre=Qn7MR$RC5L`l}8TC7ws zAH;NUM||*TWL>=Bt|S2ZBeg<3Bkn-j;hPHCzGIuOK}f?tuSI?Nb#$wuOhd7ynuc&j zNygB?KA;TN_)+IL3D_3uckR-rYx3y7jOpr0_&_Xzy5RDvaLW{@_S1Jm!=&k&kIX@_ zRmXYK8jjLTGa#q)-$s zpWsxsIo$>XHzFwvp2J_$`9yl@2t%2Isy(VLq-yO&=%qXQKX;7VPuTe>gkhek0c1;? zY43TW7C0deibI_m1tdR;HgKe}iF109JH`yAgQ0B7k5nt`X==_*042#87gbST1iWNj zwV*psP#lX1QfqQ;KxTw@WR{QqGqp^#XKL0qW}dmjcE(WF1=wb-zE)sa@a=1?eyLGf z3=?rfie=!M%%S&!pU5TI61aEA+JfKyWun9Ku6g9{7M0ith2>HH5ua!yBD+uhStzNu z5J6?WrXYC;8FxtR^V0ZgQbrV&&nl>`cTaz2Nd*-8yYgPAGRn+D`j@f)uoPn)g#}?1 zeo$yxvzjT4A~WVdO*aPH2d&eW4ewFak=w1L+Y3-*0;?2xq%>AD?dFp}E_G|m`g6#l z7&*}=qjPl+P)&D#puwD!%%cTKD9hA!QWb6=c2zK*j3QUjWAiW|kuaiQq>N0uB&}L& za>tgy-G>y~;AEeB33bwC)L@rMsHyX}(f_qKFqx0NrCSCz^%8zX{)`H%N95gbX7UB; zK-AU+?(p^z?k;G{?3qtRe=y0}at0h?+Fm(GlO|5#kg_sPCP+Ag8&SS=??=o5IOr>K ziKh`bHPEvD#_^!o;LNM_!Uo%UqIHw-fN{@ zVai^L<&Z-P$G&T&ZHZ!muiHQ=Ky1T${wjmyqbrWT?f>A4us@$&SBSb}BJ&Xb)g0NS z4fJBHCcO*-_i{c`wmXE0p*v1$;H^osSQK4^v zPJ>;nY^mMHXLdyZp3tqH!q703p%+p3!y>N5hEuNsbDOz?GKEo95cM&Gks-qg@jjzt z#rEMpP7xn{XC=Q7B6TxJs=!S3dXn?V0U>bZY?wGcP*W7hmn5TXyW_*yoqXWSl#Xq%lm;;JD(YeOcAvc)!h0mCv&9 zEw)H&bUbHqX##Dm2l_jb^*9dLpbcb2MpXd8%}?TBsT$3MS{++%J^t;rd7Bn|FH&{6U{Ma}b5{ za5KmR#u44a`?ECP2Fa!nlLZQQh7OC*rJTq*`XUMVJu%Z6CCYU=Q&;vv3V!P3dVwi< z21daMzcj(2O}m8OBdEF_4gMBwlzSI9K$}WRsw-qas!p#v+23AS22CmNvh|C^Q1bb` zC}JeC_+WCq3@hKam9;9%nJS>Za%qOi0(VgyzJya61 zY@60=`nf}P7(U&jH$UiPvGbK0FWK*_r=T_HwZd7+lzohr%p;uOcr$FFJ=UUT4T%dP zYK2H}sOY?Vi~PQLz`#_@hX$jmh`# z#HbHc=awx4Z{MZ?kU0a!Q)A@CLNodh06t$rgvdCn&+;Q*Vy_H`oYL_3yn9Lj4L2w! z-R_YDMHW0qFu&$)^Z2jy8l!{;q^X&{IO;$F9fw%1pHQnWb7QspxeOUcrnz|uy7M9^ zzpk}ASt1+Sz-oX<)ZmIwT1R9Xl)|uDOzA#15rN95LM`kAye#toBf|@_RPjJvDWS#j z6$_oZ)Q(mK?#b~{%^o-aAuk7*D2Sgjs1ZhC_lT9*u8}U^ zw+i2M%q$WVi2yX?Ypn`_D8|$1>0P*v_bs<%%tZ0G?wKCBf_#pR`!5sWQyPIftjnw* zaCC^2F)|9f!g30$3^0eVHk~D}aqy~7)+{ne=f>)-lv9tGmQ8&S;G*=%wI8#}zX|8@ zG+#X%)g6%(rp}dnn8A?0?f!h~f5feAKSwa*7V1Qt?2J{!=3rFB#Lcy$KM>Y;3MNZA zKiRf}A3nmE(yD&HWs#ZOH|DS3yn#E(G^6Kn?rssQykRLR&5LD?D3+z{Hm_o4Sj8?H z(7Wi+yD{-9RENT4wt!Dnz5mq!>y#+lCl3BtM5ti{1&i>VLWZf~mJp$(^?UUWSm`n? z4yg~jE8vc3srdYltn%gyq&LO1R+5t`wU;0vEI0bZoAgvb2zQRKwT_&GBkz0tmJIhs zKDLk5em`{S;{(1#wp6_tz%gfU#uGbLJo!S6vZVQ3c1$!dg>zA@t? zFp56vZmX3*?T0!R7$589gWR!;`@QKX9KWdJ+(^phr0x=I#WKK1^OnSj(=Dwyy}1!) zm(*?qg`RH?pdfNbmeuSB_R2mZ*!aYaKSu5`loEBU_^<7$(w@f_2&7ibr-4h7i>0Uy z%hH$8?>_T23RtTwO(rjKqS7IKKB#8|a%X!EJeF9CG<#CYgQiqAyJga9KH+JN^D#8^ zIC4=sjS3uoo*TU}2L50hfKW7X;?I?iiLvVUqFjB-ZpAtbNA(s^m|4)VhECAuk@fM! z24|Vt+7mq0pp{aP9&;edCa}$uG zss0zQp_%epr^L-5e??@vrBKut+ConO?h~xhFXX1WAIRklA{z>StoSSBD^0$aa5Cf3 zzeis6zk_+fh-WI@1IksP#rLRU9>yzJRo*B-dkLqVxyQ%@nUI1h^4EkpXeo1ij9rJ+ ze_r5o1VblijVySW#rl|8AXH zxM6Nk?i-g<&Y5OBPt;Se2ZY3&F;7<@DZ*%$yU~M{l1j6CAz5)RQJBtp%``)=p5e5}GEZ`%0wa%JqOFI4@zT=&;lQnW5uTdTije<3BO3>t)CerplQW zMUivoXW7(db(8bC<3^ZzHQ2nQ)0xjWy-supFlXy5^-8w6GJGu!D7lpwf{(6fyCE&> zPY%t?dq{q5>b^^S6r-O4N#(yLqi>ZI3QHZNX% zQ5qW2wszkqBG_Xw?+$n9u}E3BtTVjY%Fnk$nDWnuVJzf+X+p=-c9gk$DB&D0k)wEI zLwzaF`TDIsy`3KS#Nzj5e%ueLSzX|ir!4g>tpqGHc&|;>7MmKMmw7R3%H%IqfBL@V zMNs<@M4G8NIhPyA=u4C+L*TYD@EyrA?7*`m-E!ry!O%C{%xzw$&~C zJTzl~a@>JYbr*ci&?8U?m4AR)tDY6rm%&4yr&rPZJN@Fwz6?tnlvBDGOrs5+KLK70 z?bJKOpJl0n*+ri8y>oBFD-LbmR*PZtef5J8MXRvXvPjjvUqaQ8N#dc$)oY+ck6ed> z-i?QPTER>UD<8IDN1_ik`XyHkU(X`aZRz=X8UIi6y@N%FX;6A~Nb;z2>Xml8Rcu^37dG{C%$3ySOh zF>&{dBEymUm8f-5ocs_Jh?ZTc!bb?xcXV8eU<+`|*}--8M5cb$_`NGKEy?lxT;Ej0 zm4?*e4HSRaw8VfvaDL1p?ss<>=G5#uA`K8baHUiO=4t zTtdvAMU0_tF;IOHZ1I7jIIrB5C^dM`7@&+gt*Ev}H+$o9bFZ85&B|JEWumGU1}xJ% z#zxy^KM#@)KCcVh=UA?wN+|fyFtdSTO%qd$6sUnhXB&dxIr(y;sOS(>RTn5&J#@KX!Q<$%@cHpPOR}-qG*jrq&ZG!3aQSU~XBH&!n|PwsRdM*v2!; z-5DOf$;9h2&q<}Tah_e2u@+eWp2>CYG4UXdHKJ>1rv30Cv=vm$FRpmgTS|9K-y3>J zcV3D?3WbwtR?ei?IEWH^C5x47g|&5$2axGDdac@T#7ystIFb2L_ux&*P0msxA_=f1 zitjFjjEgNqn&7I|QG#MkLCXb9%;N2_CP!cf<#eP51TMIx#i8};k5dXK9C_KwnQu3R zIfN}FUZ}7v)~E16s&ZuJa?ziV>17mmOf4~@uvf;9!eKo#iI|JezQP~l~0s;fk`p0mo$=t@xuHMcO$ z$`RuAD(Qz*TCV0A@0kB#HAV7V3tBP>cIiL*i_$&T6g57%3mIO5rc+><_-cTXib8Rs zE!K04Fa6bXXv7aC5r%Y{=?sgU<6s7-782YfuFt}^4yUh4h164&zJoRs;(LXkBlPT6 zf|MPb%;t_u0)6kKwkVThIsm;#VT#`6eJBk?(mW|F^hnz*6u$K)V)TTmW|86}EM6(6 zx1iWoMJo0?zIVsiAdeu)3Fe8BJIrDCrb)WvkefSq`cvz2z@_;8?xW=$eqX6Kpapw< zzvD)X84=Edj(OTu&K`2x2YL1L2Ku0(aTQnvP%5 zhAk;Tf6Ul`&T6Q5^upZa;p{l$_&Wt1@Zfz(i*itO&K!__=lk1S{$1G_xMO;CmEg{< z#_eIUhq=Alu{!b^Vnb5+Fap7Hi9I_>bd}~DOLXM;MR%wJLV=KmL(wR{QGjcTlWT@7)i;Lv~ z8|#w8)nz~J66mZ|G<+Oo8+egnNaCh14EhMs=A4575? zXSuyOGKLb23)lzI0#EX-t3sF3i4n}$&8$inE8>$XNg43VOx+PG>IBEF$LL&V99-CZ zk@V#bWW>0eglf^P!dh83UQ1Th+&W@M;LqG9J-}y$8_j4*7bboP--IW1oPD@q6H~Mn z&qA(ZDjN-KFENG#)xHWQuyI9T_o|vlXpa3Zm)WBRxF{FoY{eZ1Cn9J*6yt9Q?FX?# zP9mB~9TnNb5>NN})3+6XOMDeK3>xTGjOk(JUO0B?nrcs)2rwD195BpNE+{`#44iPG zZy;ou+*w?g_aLYllgOGIC!Gjr%qpb-Ooninf>5b!Z#x~_b2Xfo+gsTBx^&a`R5Pq? zhKC{VqQ_4|f=(o1Q8HQ~qt!JjBZkLJ)F3g{Wf@d19k`$d1Gy#0f9;1UAu_ss^o4y7tl57iVDF~|0JK5NbY6uYM_@q;q2rvWaj)W&j+N~Y;Qe^fzPF1pziKn7Fe z5leRO0cydWXs&2!k}0>w)cNTv9rBiw;Jzgav{gb!|H>_=iK4^~FTOvjpX-!Dh&f6Y zU{1clc2qY`{N4;5&N+$LjcoVk!DHsum#OSbISZaSU|GAY4u4fP9G zO#nAc-@Mzl8^)p;&V^L6DV#jUVTA%0{m&fR9M@qEFL{3Y<4ZjlaffuO$((m1kD4Uu zeNy+fKMbBJQ8U*~3_m&_g!q1z=naDvIjLbv1Xy)G$e|wyQsjvX7eP7`!r~eqd$OL6U^)(O0ph%a=|so@Mdh zZbvI4o zSA@c929y+C5Quf`KwENw4voL4`9ywt&i1|}Gdl@Zo;qKto`Iri5P12M)%ZY9c(=>P zoSX)v8TBH`TO(|2<@UPt{|rw+t9Oe6T;3+pl2e*hO{L>z7QXC(buzYAW;O+lTOIEq zzZvV|Pb_-=wnY_W)UDB|CBZuOff3fTtzg+T9IvuGLDs&H zZ_>ao2E+XEW6C!o6ihU8G90u(S9~WOGQ;Z7lh1c;DV^f%%NjAF_;?+>KBR(iYMyQh3M3!-sD@e$ehlK zY^x#~X7yAc1}?lvujQGa;#OV9P3(bViu<>w1x`jZTz$B=5>b5ZVTic8p`G0>y=Sadi*Yl6Clfx$eEOagcQ(dDlTm;mB6C38py}?hene z^^$~E46B|GxIg%jJkOcYhGRB_XVA)x7gnfvk!xip$?1r@B8u8QjcPlLx}VqMz;1@n zF$~!HwW!x%kC7D?XpO&cRQ){$#89te7KF)ouro}T zy`6S{i`TS&@D7LdPAHaRxN{^bG@E6Ku3&ALiLSYK%Lvni+6qd}luKQ|GI{bd-=1HJ zl#w&TX-@+2zE+9s5mdDe`sE9q0^_% zs|x3SA~B!Ca@5hJEQqCqIZ%qsy;fLHl;D{V<}Wv7NcWUBji74cNdp*hMg>t8jiq53 zAyqa8G5-iRFX0u%J~C!n+*Dq`NNn5Vroj;}S{N=+gQx1rO7{J^-Eha1UO-3d$Um)3vQi;GWZt2~Obz2qROSn14M5i`M^Bs+ zS#V-eW&a7`|Hio_4~ItkQG&kcBe>AV;q_yJQaNKjG7u>Tk_H>hAm~*hb31LMId81N zB^z8!%u(Y|w{yVk)3Y-~K*lQzOqyDaG}M?J@fSo(fOH;u@b5m;T#U-=%n{MPsTQo8 z)i~YYv@UV;gB?KB?xn+bSa9%_Zt*hlX9`1i9Xgyzp>!o~`7-inDy%QzaM}`z$JrGK|Cx55QQg5m-pPSRs z`wdH8DGow%E&BY9U`YVKgGeh(Oec66=+H)NM5M^LhH9$pi`qL77jZ4YNrW+offDVJ zuzYI{3*>F8BZ)+k*95q4<&WZ24v|GIl#80!E=zqwp_RR$D%Nc3s`P&V1DVjLnkF8% z{s~S+TVx5b&b8f}k5H1Cm(#b31A%BS`B1o{?)3RV877mH@- zH3#RUta3>Hu0=$m6LnT@cx`#XKI(XTKfjL4pED@Zak=%^ECo8PApD#9f=Lg1SSUS- zx(>Xv>xg9Bi@{I@BDJ>UxO#LKz)hhL#~89;-49)6N*J+26$$%tMKs3>4fSHDS0Gdq z$^Z^dq7x)cBzrF=mORCP2_xDChFP_1U$l8sXNg_6=B>bKqf?@ceJ&FD(qzx7PvPN>J-+SZ`H85bM!|*wXI^Hf!CzqNFRxbo&Ex^ zeOLmKd=9D<&D>UJUh}@!Y)~Ag*=5L?7&49zP_eZ7=Z$8``EXZwj-1lZucXty9Q(Yj z4cNk!#QKofbUi%^}xD^5`ezHWh$P!EFlWZ%z$_u-<(ym>I;G^Gh1Qq0lyLB>M} zndXSA;OkmnJeSyU{6!I82An+@ygA~(YqCmRK!GK&!C<7jU=A};cYEKd1@6yj5t4O1J$yyCn{=-LRT!}$wqES z*NUBI^Pxb}P6%Rd+NUNp!QzraQ%t(8^sFe^geI4IR%C@8ZM?yibmNXRJ$zM00jEJ9 z5Ppl<D^Ve3U|pybE*R}>{dj~*6V%jO8cf`_!vi1E`xC-rCZw(*ClFM$&mma}+9xFfXK zn%A%x8e;YU=eK6+jtL0CCw1+NnS*1eJ)V&v-NtIKMjj&>Q8@xux0D(brCWEb#w0q_ z9+lUPL&{@eKYw^Y=vETz8dw=RCgqhxDAk-Hx3o zB#)6#&B#m-XluI!!|@5-Q*A!Y4X{(Vi*4gpp)5_Z!YPQ>7epOFR`ms+s%ncLa+n}| zlKN>$YIb81RB$5PvB99)rEHPP$1)sJdOp%EPsITgGca&gzuwPP8DtH)eo{5l)(Hnnx> zD!_c9dhwu%;qnzV1Ue>`zCt0PN)$I+pka5${?k-CYbL!cZL2F=Ae*t;D zacvT%{KYK*ce&#OfX6~THpdx~!J~B_u@7x+3W>T!>&Po%DY(G>$iyh&2Ffw{g;k~ACQr+Fo6YB@s;oZ|lgFeJFuuC`wuT%WJMn1K zJ*-T55BNs()p4?r<=K({j>c05+zYB2oTu73DVV4dR(_>Uny7k9vx7KK47hVd=$hg~ zeMY*=X#Y(|w=iKPPtRAe{HvudWA53xEzG{Hb9@jo+g)WUraP$Q4G-$t{sU+(>lrha zQr_>bNlqTC%M(}qb|@SDQoo$^sVp3|R57N_kuvbH+1-+ZUo$W7olZ}tragB!{oT^5 zAiT9fgHgr;*+jDN@_D1-ScmTQUt4FjL z@S>Y?I(D){MD_E}+lVB@J*7_K6g16VPF_B9&MWcR!68;3s1N?Qf|96{TAU6N43%8U z@xA)@du`XlAU$UHv^d`H3v>{-#SYFoRh!=hCwGP2>)R0|mo!c-)=5D*>QL@jvYHug zk}N7CAz-(l6wdbR4Qf{*FzHm%=MWG=UK6FGH;!IP;wBF+i85fn3{edOQ3gHW8du0h z&El;LHcB`x=#GR1>q5rfm2Q;tS8MFjeJoM))&h6IZ<%twi_MA3Y=d>5R;>~TZ83?IL`JJ3MUc0d>bVrx ziAoiM9Xv}}QjUat!Nn7c%G6a_HAnqx=E zFfmE%g3`0|6O)Bjy>nY_=~yyp3>)JYQ+UtmB{?y%@3im^zvDva5?dhF zdBe&+w-WtS$1HGnLa#i(re%*vbXj+gbaSxP#*;S}VK?K;mgd+AVbhqJT$-xbXWuhL8oy^hXi{GHzTNlklo_qgRe)vZU zmWA#mD37(7Y%uG}yx9Gn@~FZ2z5Z_TzHxCqm1}w2S0yu8}KDmy^5?c}S=ma&><4k;>tNBwZ#ZX!J()3;8 zP2Ea5k{rw+!p9lQ@GhvLuZ?2|e};&`qKqt;X2dD{ffCwMPLm_e+}hHidt zj}lQR1)q;s-LB8sm%o00zPHYs_W3>aopO2Ef;;B9I7k}CHyX8mA`Mup)f!gaF41y^ z)A@LUDZ!H-4>zGRZabgJ8T5AT%>2dPLP#@hy4jEDvS~ zP5Gg7XHDf9)4A#U{&RhM=qv@0!CJ`#C^~O&u7TFHz)$VmOA=8l18JN@YD$F{^8soT zhx?IlQPV|6w6b-bdT{_DxZwbqy1=$1Uex(c?q7#Llwab&t$m^mv?e#Qoc^d>`l6DW z2bg621U|spokGSo-H13TcV!K$x-4ZL9>8nLf_4%HQxqp^@fhfHu*)ZxXjCTIfbVNs zqAgN=1&i_M6m3c`a1Gx^JX1{XDs|~W*^0cYe31BUY3wn%TS5kVj#-+S0UR09QkZ#c zj%mU7e=1yfDq%XX;W^n5 z02hPYwlwS;=?tN|08}tk)*E&n4RL~9Xo0(Z7t#ZgNVDU8esCLWql4MnQ4dNLSx}}F ztfsWX#}XXsrq6mTyt7W3p=fm&4=x6_+<$(57^PAAXt5UC^Sjyh#_0-WT$QokG+%OT z1{W`XX`q+e%mL1^#iH1>+O^m*C^$S3MLlH$duAD@Hk6cr((4Yy>v-Lv&Mk){75c&a z*}XyP)<>8QtBMa1$SuxY^CIxSCKMrj3Glm?4Zgdo1{% z6(1%9IXo4Z;LmHRtUXdeq>MS`LM~5IaAsyEFFP%shYSJ05RnAiaL}qxSe6Vy_wMD{5v4>|!8)*Sgj)*k4P;+7|#7M1Ptic3%BKd8>GUs|x6mrs^U= z%;H(Ge#55f9j-d3bq4LsU)zzqZZh$R(8i6bUYtUHFoXK_8q=rE5^O2(um-sK<;g)> zlhq5g0M20@PsUVbSSljLbY^E}F-OSU%rr#zSIr)R&4X>HFDbr%);$yNd`#R=k$rT1 z_5~#d!h8bkdD6bZ-6KD3hxL;`3~hUcs*RKd){iWpOE5mjQkRW1WzQJL`Y(bIeN5#; zh=GXl zZ54g}DkPcAdt8mFgzG0Q#wAO_p?j%Zs^rxI!5EEDkh)ReJ4fm9PwsR;C!rBv@t22* zFTPijF2tkZeGpBjYdR+0#RPZ4yLH-*VeY&V)|yDEGBlx?P%Dk^*WRPs1m9#=JfUJP zQE6~TtRljX=(FWEqO3tT3o}9%gF~{}T$f zD$*7OPJ*?kgEG_U$Y>3TLbv&oAO0X`w?CkA#ynXLqX@V*>wx4u9jx1~ZM$nZOS0h6 z26MuFI(I2+Dy^AW_F|Z0_C6G;7;>+<1WmIj0~V@PPg6FH40& z;sQ_OKV|Nqk#dUrLW9UT?j5bIZb>edD8}*tgPkJJ+aKexVPG=G7QI+@Bq@&0G&vcR z-ou_n`_%1eDrT;tbbIoLh4z8t#DkvhGm-gz6wE4@yt5D{CzY!FeNyyhr-E%;{>q22niWaNCa?$Pw|v9xvX~IVTeT`m zefHHS!#h?k^*I4vKzK0{~Dy&Gp0j3E)ATX*| z8R&D+p5a6YZ4-Q;oeRAyk~R|A)nB^XK&54YeO&^#Xf2VB=2AUUWCY(%1^t7_zHM*$ z+(FU1iEM@S`oW55(utg}&kFk%v-YxMUOHc(3boXvlR;Y#bl~|r{hHl}p1QY{;{;8V zfT@z#;lyVBc9&7D6_d+dr%$a!(}@OJbwFn&_YrOM9@Dd}b-R1eVF8zB!BY+;;R#ioF$<=}4S4Z__ri%q^qC79H|@Pw6`_AI{G4`)U3328XabenSpx#w^Jl<39Om z&x?c_m%z#(Y7S>`ET!R-lf0t!R>gRF+*fK_YPK~HIl_=PI4QFMPZZ4f^q#|Eulcn6 zsL^A{WXTgli<(+(b43j~|6F7V5t=6o@AXY$_m9Tnq$^nYco|&WyZsz4w(~`dUe9WB z5Z8=Sr?-Z@c7j$w9PAZ-ll_v14ok}FRqkMigk}5}-tK}eCd`TxKAuAWM;Zt(6Whm2 z3(8_wds>=h2C-c4U&q)hDk1)N4PXXC|4le{Ji?Fue_f_TtAYf>?h^A}pd`DzmcGn} z^ie@P2G2X(lbpGM{>6@A2{?d&uvP7~(L)AI5NMOV*5b0cpWQOAu_LAX9S%@?N)QU^ zPO#N5_6h{>J&2`Uin|}kd7l0uL*OX{=dOp<2IZU13j*uGx0sz?5Xi8z)?JqD4d!xq zh)x(!T|G5UxepE2?ZFL;Px&)ASeZ|iNMes^{Z}(MnB11Ga=D3v!@#y4o3J$?@W4CQ z%io>rEr|u|m@t*|*+0QupNUKOjWf)`*eWW$@1X^|@&Jd&p`6>y_< z!r*4GjV|Sb`Sk~@cAomTUh6BzpfN3dRBf?p+V4tm9GqvZYZMyoR6g^I;MT#Xo$<#0 zVZKg8knb#{ySI+4>#08dP5c{Q;D?GW9NScr^#05<4{`QVp3DgxauLn0b~sZ8Zamms z*r*Ws=cN)=t;1tB+zj`VX_qV(svkWmaNG-7d2MgDy^5orK- zc>H)6FW-{RL)Le#^4T>YDp~I}qx8K>^ID!;9YL+_>4k3KW4PyT9gxZMt+_DXXOt0? zhtv%D-B|l3rM|dt^)M_-2lEFhOJWg&m^CN7{BK;CdU7Uv&39QUtNqeT4#xr`jSPf3 zbd5frvG5nxo2{_v#LWjs!=J%Z+7~uI+qO~rEiKh+DL-kncl{P_$$_uBve%x!C~6;x z75((bY+vyh95@T&M`r}RH#(ejXNqXN42d{Xed6S#)Rxkq<`&ej%au<0n+=>Hq<0Wq|`z zd3Uz7DN}IJ?69W|*+lGJQZc5RcrYvPfCZ&ur+(!@q_&^s1rt^0?v|(F@CT-d1pt`>x&P94t1UMm!#- zscdz)C?41*#~_HT5_2G`7^ydT5OLI8iACG!(>srNV%^m6Ihhk!P+h4%%MmE{oZP)@ zW<5{ztpYpmUTx^llMI^dC)y*wbkK3O2uPLqJ03G*<9#}THr*<7G}cz88e{rHA3 z&1+$oZ~?GFweA7hBV|SJ4RBa&z5!gu;|g9lzMmzAj6o|1@Z%QNn@>VhbOOux82Z6e z*zT^J{ukh|)qMrQeKMPXYxN<4cE_$1F;ax=Mc3M?iB0h@e`}h{P{8S2=L>XuWXgDd z{;JSjVfbI1C-}`NranFZBU!;*srwjREdLRqa|t23o$3-=L=r$T2`v1ySamyRW0dVTOmZ>NXFqQ|;wp)jo$^aYcQ}aKFdn z@yv(^$?^QV8#}mm-JC{L=|nR*AGV767rufr120+Hcf`i6|6ohsdXVYhk8dvcGp1uE z9GG3jvTy#+3Ih7n%5M?~RP`G;Rc3P;#4r63d>V+FXgpF;Rh%QzhJkjQaUoYef38h`-Y z+oZXFTIiR;tBWEk?t7S?;IX^r?g)b3zw+Wr#Ew=F$%sd%k0y1d8C*ykJG}Lo8a6UW z2{JmBXqXc13V%lTTKGcE#f1qA=j3VnRlmLVVUm8S7$phVak8>o$diJx=k$sSDM?0v zVDvN%!tXwES^0K1;vZl9lF?a#{@k9_XLAtTmJVa+P+)60_3aMXj~^*Fs?c**=^{*C z@xmDfa8z{ux(lB8X}5ol*!d@RrGYTquxmc}JpL0&obr?!JV{HADH5v2Us zvq%?$%0C5Axar|8_llTo2I>WC1<3U#x+KN4oMM;6`g8|_F{!en^=wEhl@>F;OD#g{L?f0tH;5IsB;I{+}D&g3!3aSdHDk;gHv7QsR$8m zK-j5@G$B~_$ISy(uETFAw%n&V$_7YelUt4Js&S}9OZ9H|rE6`%%J%{NtALpCE~9bB9yVDPURS^-t0>k>K$-!s}I@%rFPWZItz%yf>L zIbTs2sqsTJXai;Eg}Z{k7}K_3J>IBGKsm{0M!tbhOLjZ<(okn5mAPX!_0U{1{n7JVAKOwnPaE*9+zyJha6`@E&zm z_@@WAR^{{^dD|eJ4hW1j~oyy7d1fM z#_HV$2W5)G#fZ-ohOrgwDL$|ZJYz}eu83%f05!EaxDR+ zQ;*{0TT^?j`uS33w}E?L6lvE5$1Fft)ygo-l*; zP_$Fmv49rzEkQB|L9`dN!;26g2)n@aV0ZcY{3-jSCo~oRcRBsVaA|YNVgz-V@;8a&{*TX|cYh5>45rHH6Nvy%&mGN{P$Su} z%7Rr@9r>$(SRz^$OZQJaU1z3~=*7yl0|3^D?KJK&*ao9JA7*6<`O}_{9OBEFVyB1c@I5uXi~Eh3?)5Gq6-H#2 z59K`03QD)ij1MXcX%F+ovK0K2i3u72xptkD6io|f8GYovGF7QnKQ9SmYj0pk-BsGk zZ%UlrBPruTJgzi6))sF|<~zNpX~)h+SR_O7jeP zNjXw?L5n&WlrE@s?P*Alzkn-ud2pJx45a+9AzOneiYKaW;=8RF>~7g-(L0*L@O@+~ z_MZD;uUpAl`MzjY^4lo}yrsj!YS})|Kf@)}ROeE|ws1u)W6oFYm215XeN~$us_S0N zA_Xd7hHNO3yhy1N;$m)gT&f|pHFav&?v4mQhQ!F!4SS&t+n8S;+DK&UrBdyU$ob$| z^EX?}Vjo9u`-H#4km$n}u%hqxLL2B<6e*I(HH_aKY7uIGfpj(pq*(W*Dak%K$L86! zv}oI&t{e#2mz| zYks|QV08edVBm`L?Z6-n(Aru;H~E{obC(K;nKoqvWPrBpDX1ya^DO?I{(XJ-Wl!$H z62};(3kZQgfq{<3aoG9}55z=fSv1(V>~W~wo3Cr?^xl`g!a_E4vfj%!=$dpTYtebl z-Q?$39d)k$>J&9#eSg#fS5t)wbeogSXp4A)1xmsMVQ*zspHekgOJ8}Z%TD&s0hqa;4;LMFR`Q?ihd@y%#n8vT8Qr3Jpnql^JKdC zDyHfexo!oV@!R3wm+70&UUJ;@8jO1W;r1@F(O=D8WFmem#oEZ^B{wnJ0XeYW7q#bw0#BpG|5VZ2@x zpqr=fK&RDx46usd^WG?SJC{I)8PLLh70xohAS}Em5}ZB_*hHM)8k(sc4DzsrhVr%q7SbmH77QQHc%uW3C zM_~nD@t-wUgUrgN&lODp&@n{O1VFor#y5d;xg$4AoTM=gGf|@f zWLSv&ra)awHE%K6=U)-|+FEzfTAN@t^Xf}qTe=wlXV+%LH`s^W;Ll%^FV$JkLJx72 zMjp2Z1rbCB;Nn||L{*AWZgwh5t#{mviyu?vT$GRLp`=jIIj0x?YB7l73wy%rvT%0d zI-)o<{;W5;TD=O31=lXw1LRz=>KBwK>yN_f%#2^`X1I?Uuve2L7->EDxS zPj}dh@X4b!7se!BFbR6~DCW!w$V_0?w}^!y${5842)fuClwZGSPL@YwQ-nE;@ z(Gbg2XYoX}c98V-!0ba({5KJ83K7k(O21)X-*=qt>oO452c*W4TAKs$ko`fCC*A$# z?aLN9_(Q*i(XWT<^%=K^{ey~^LTS)^3SXk+HEJ_GKCSr0=w_j^cECJeLpX_j5Za<- zyjFCN;tfh){W=eKl5RlLw>AKlv%jLz!W6s?c{Gx&1s}%XJ3#=J7>Sn|#KGDEzx-Kx zuUPnGM8?3S0sBSB99sY*s1|Qb{9KlChFUOAqZl!H-x3)LA9t-S??K}zaO8FN1Y~EfJ zA+46E`H?-X!A=inEIPKAY<|e?P3~?-QHVhtNC1at2QMseMHgw>6gN)T7#n73a4m&&V*l7xp}x?bMmLimPm67~iE`{M<&OpC9s(YshpsyuNiI>S?;j zmX}p1iQjER`p}!P`|EVXH8s`vM4&6FPH2sG^XuB%w2ytdtUG7yp zpQQOvbGkM`_!W&7uiXa>4Yv!BO4!%J+#Z5RH@{lcA=7r%ZP0SH#5fIq9?B)zf7O0F z{BjxaFOrolH%rINHhUmsh?d(cK}K8*uBTP*L#jmt*%_8$ zgRhfomH2+=RwZ-xsvEJGS!-LsS66t<*!>T&X3cSi(~MIuR}Tc6Q3UyX6Q!3|o5YmR z0fv10XyyoW#ZxLc%v&_QPj_qj=!ABaL^lyQu>mCg0iQKk$lNxYSuRP9~7Khn{=c2O?wXV#jI=M z>m&z66CXlmmWnoXyHk#xC7h~W6`G%2zQRgJ z;Iy);?~`HQ)vMw5E>Mv47lc-PG|jyZCGVDc+bWhK&3Q8rL(-S}>UfT^$6@aa|Nf{1 zr_%2YW5nlS%WF`F|9}QjO|JotX!^cjvaSTss1KN`l>0hV!)Ga?b0GtJtmcs{|BB$& z5PZd8+2RlcS5%-|CKo&sWHuo%!-NLQDuvpF6@S>Pq?a~LF4=I?R9B4VSaeOdZ5@km zA&>oexr8|0wEV688!bqdND^0;9g}6uY9#9w!y!u6@Hsxw zR4(TaQ_G}oG-X69-=?6bjt*On%Z8;Bh~|;FR&#C3_Nc(@Q8xpr-+aDU+_8`|x7^eG zkyz%K9sfNO?eQNRSQ4p+ImK9i701dWDle3D#-nM*%}wyo-Z;+LtUoeBZ|&8OSq{QwklLnt%caP-9BPv z)cY11mCC1uQC?8&Xu+lTg)2PMIPy)2b-{&)2H9C9AO-x8`}j-L(1z0W{AGf!MQHV= zm9yg)*1F-xC9D@3meTY>J4a5TY%NTB#g>^&{0r_#=LWn<>U%R1PSnME|8yyBH@v zY)a)Z9K!g{u`u6rV$9TVPyhPdei_kDaZc!(;B3vju}=1iNfh=~4G0Ay zO^??vxue8IQIJCBT^f!RRL2)rJXv3NDlTV$(;Xb_`ad^4{hid zH4xO?gx_0Gt2Y`SR!G}aPZG)@$bq25^ELo@OGZ4wOVj4-ZgzLIX+T3f%_}n@goA5k z#&l%oo(VL~;#W1O8o%5#p<&88N?6;CQ}PYY_*)ihDomAoHBPKP{t2$p*DS;>;T&EH zZOXwzcdaV?!>iXkpZ(q)+!<81RLNz7l#pwH`ETzO*!p8f74?G>%iFY=O+MrA8b#K{ z#ay<9n^JQ4`NP3r(4U`KR;K9I{v>4@kdus|)pfw=A~9xMS|juWJ(rSmO1J*IJkWq~ zxGB88PTY6v12qgrUh&6S;$>R10x6Y*)2mq!| z72AA5*xc{|Z3|805v~Z^ML-EU4Z2~`UArtU3PlAc@sxk{T_VFdO^>H4Uj>f^nr#Yy$Vnb-Ff+&fsMJ}UFA{2H=H zZR=7sZJ^VcrSS-!6CE6ri$?>ur_wvQJ{E8^b%b`Q* zPfnM-KwqIQ{zbz#)ZJ?%Vp27N!4r^jq_$?jDZH=)6?oWF4Z~v<{k&Alk$K#7yZclC z_v$xwNH6ve$f?dNjQO3O0Ip8G$hhadjUr^`4(}_xHj;FWV8jK^Z?05qvs0j4-vN*D z)No=L)T2{`z>jUSF--=kX(o#?`X6s>%IymjIYiVVYCs5}6J z?lS|ir)l76B~q=cr#yVbYhmKpbZNmuY6W#06x8dMWKn3Yr=Ju!&8M;wo8a-$fB}s2 zHfDAwK@vQ^SBbN1-0Yd>CQhHBHgr@8plpuGdBl!u4=uTBjT$PqA0_E&UmG}Q08tq6 zuB2Lk_0YTipTjIP1H1ei#aWZM{O%XfwW*M;UK&Abe=NH5N4~e`FJ@F>OMKNOfiR zgZCkQdW_z*Imuet3;Ys#-Zz~1Xbxayu+!|HpgD+kO|m5V1lM`m2<6$JjvdMJ;)u_n zet!-9gA^j#WuqIrrqJT!5QnCg#i!=D=D)TP%Vr@0)jew9;5_t_6%qmYv)P%L+cm{- zI%jjb{_k=yvC81Z6(jaFlGPIxw1uDCWJc-z7)TZU1UB#2NAUIwZ@X3cwW{wsB!<|; z4RtF7rxwdWppzpZWcIMiqNRj9Fbm();R-2L^&m(v{ajmvh_%sf$Og{LM?|xga_S&i zO2we{`O7aPuYJI8pmO)iKs^OBiTF_p!>X~cJow?C4(G3T);Tk*f=1YW7@w=6+gL&# z=-YP~NM9I&VG#jqaMQA|#%?CsbepLcBLOFNaaMoiBDNQdyC0_{c0th6ponl=F`+lU z5*955dCY}T$ft0CLgkP?$T2xeYv9q*iOzCVN92=BcJlon#JgN9P%m1)qaep<#YCXE#;BnVSgvUN@Cd1V|TRN zN2ynu_~3~JZl;~m&gd6P<$6h%S{PZdQv(nK01hk$X_1lpN|r!JY&}|m3>ap`(EHrj z!pvwhcmKWz&nmqHWp!Tzhe(0}5y96}*tSOGJBm!QC$E3pY*nt1DG779jiw;dLX zJ0Eti872Cup`AMZm_P5-qm3={xB{hlgKWfCGnZYDW==c>gPC{~FssOPcC}IR<1l*U z(wI6^v>lWb`USc`NW+QWYf~Gi^^g}XVm8r1B7?c|sW>6Mc0+Ei_d^;{{8~+Iy^9qn@mswG{NRR> zWgagAl;HK(j21cm3XiW8FH~&CrB?# z>8Cjf)D`VH)m%ym@1$i?K zFcO$D7h}os`Q5ac+|t_XuwAO{1^$$M@clvn8nq>qUJvmVyklNbqKLK0RXzRT0%MED zsXDg11(y~M64f=r(7#SQqUB(!TBypqO5r&Q>nyoBEsJby>wRo_a*t^XGyOS`gUq|j zOVid7-{kSWu7g+a5zT$~CGY7t=w~>OY8m3HMqpB!XG%S0_44R;#oHS1+vK#a$mB|u zZpVjg-_-IPux)Ekm7w(1F?`4J3gyZeVMxOCwaAw&Oy zM!gIJU5ghnUIUpTt!vY4U8Tp!t%_zB0!q7!x{qu#mOt}(+&;3NwVK0^CI;M0%WXwp zQu>5A98+xv=?=`rcRjR6G5ZddcSc0wl#7k4HD{D9zpwIDier2S9`e~Rc=Jy)`A{YJ zE5pT_JtUCFFXjU9DV)I;G>4VU+!s22r3%A(W=*w=##haf(&;pnsyt#WT~kI^dWpslpiy4=p3GA-#+5B+w>lDuv}ZVD?hQ9*-qAQ z&H-2Y#C#V`j9i;=g+V(uPNfPCj`Rc{SI&#)d@64=#pmB(k5f5g~?CQmS7lcdEY5yFKzg z7E%yT17wreQ{l! zsG6xGAAo{ZIPscPIoPFGC(#giJa3T@X|}s7{O+N8yv}Li7s{?aXn~?TO$}ZUzXNZ; z6GyOmvKZU?{4r5o`(OB$xc6IDwvD;Ixl5kkr-r)7{p%d4 zRvTO|PJL+POBCzJqO|@GF!q%LWpirNMv*9m+!6zs2(EAqLkJ7;WLfHOS98i8oDj30 zWL*};W+cn;)57@JZy;q{P5*$3%R*Hy4Gn5FCJk>~Q-LZYi$N$M-bACm&@Hnb%;hbx z@6bmn)&$8C#wsX1G)LDaNoKp#Mx(d?pH|!UiUedu z*C76$^7iz8!;Ogw{d#Q$>UB+1gGmCMn-}tyUO&UG_+mjaO-0byQTuDXu&Mh5GxMw< zVkdxaoaO-y{1oIY+Mm_hK0o$P*C=E=qX&kq8g!m_Ji&*#O=NS1yQ*WROzVB6E%43V zvd)%CnKpH)HzID@S?*mr>xU_|RI?Jz{9dJqiXLR{nz*gRu7>upS$a;2FGA5HL&K&s z`Q|9pfS7baL+rIgWn?qTO}=DZH|a-d4rdvJ$#E(kM4m$_^87gaA+!N@PIJcW>=D^y zArd1m47cF$NoW=c88m3ZUV!bztu}{N8T7wQ=?%|Srdv;oGga4QJ3hZQuj`+YPfqcc0Ukv5^sI%X~*)`h89#& z6UAT!k-!#su4aZAkN`er=>c`}HWfmBDweRjljp_g?Cwr}A;#vmg_1uV6V&C8^)hfG ztMl;s{y6jyd|kiks$gtY@flbh*TN-%-R_Nil@u;mM}fAXmgC57QLnBJOu^{Mviuu} z(@h2nO!+$P+5}8LYmIy+7#l{Y>BKYFYO&m6sS>x=O!l<_lkptL|r2s{Pr86tceRwxw44C#u$}PB$_x9D2{G3oSd>eSH8Vy zOzNYG3L>rL%}Or55w0Uw2joCcG3xfGd>M_rJi?g(_Sm+^7dNo0g;zlI{&W(gLZDHn z%C@a_|7FZRHNm$@BioIa7mVHBv#mGdUhZW2yJ0AIide=wcc`VahG&SvDEE@OPRE+$*F z=L1*{K8A;|ItdaH%TRc>qd5jQ(wcas|OO!HYBS9@@0 zmlV2ow1t<$+%YYLI_iV)TwA-NZb(et&70rz9;^L#w5qk?cPbP#gnZ-J5DRVY$PD%4 z1!^J|!(w9eBFh>qN+!6LVjB+g8eaK0UnBCTr&y-xN`su5=kc!mIN`(T!IbvE%#Qfu zUQg5jA_eMUBeAea3hXl*$>c=qW9|M)Dgf3>L>eWe=lBHU=v|W4(V!^3r{5G|yB!Uw z_kzQ^aAbz~)@D39QlvreQf-U#a&uFrg5n5s_ORqB$(r+;2*atlZ}5@fAQwxDqsVAW zRR8OsDDF9a&vOw2nrC>6Hu#Nag%q7UhPbhKp+&c*beW**+*DMZ{6jVM(iYZ=xGrob zY>&gk`7E%zD&{7*-uYR(CX{$?QK<{{B1!VxfI)v zuae)z);NHh27!ZzgNut(DGyV}wrynQ-D{1OXnZ;8rWtH_D_snZm#EJyBz)l_O8lQN zz5gMu>R%@B!?!SNi2n+4cDDaVafkn}xZVGd^{psHfAW8+sCD%q@?It~K_#Fb;VaKS zfTO#xo+LT`4=Ur}5BxYS2u?bZp4K01OkJxCJ=9!vznBnZ9eXPWiYi;#85kM@=lm(U z+1Wz=KtVYVrWRpgnjfFAfOo-i=^zzPPzdtSvy`x7hMjRQ^=)M(<>U{QrV);->nKAkj!gH;2Fb>yB@PIK z2YKu-2-V+u{Anv2%48okg=VMhG4K!6?eI4GPl)FKu=cU}?{KbfYxDhAY$ZE@$^X&X zr~jX|Q}vUyFTV+mqaYCPCJ7;&^_rj5oh%gVe4>1rd;Y_R*a(U@t6gt!|I_wpt`sko z`kt)Tf6avd&)eht|2N?bt%S7y&7=de0{(!4S#j@9MtlkN`Ux4|!Iy#zbhMd{{7({i z-B9wxzUi~O-$U^Kmfh%V`QJ_czd+xbD$Pg_FcVxyXrFNYw1H*%c}@bCh@1 z)OcFN2Q4Pp5GUk>vP_WS%4e%ba640;vj%ONnsOsEGStxLOI0>+99&jQw8d4oYk&oc z1->lqF>mmPL6s;eIceRKnC65@S%M610Hc34qDD~TQy>$dms1FF`Y;Xg=F)q-Av>up zggy@>#Y7qCYCB_>nv^EpA5qo@%toTrl0@&5Hkwk_f&mK#(OMQTmo_G<$^}4QU*=~c zD%EH=bZSHOr!AvDU@7bg=YF0FiW%zxJCbuWoNnV4e_LhmQ%}a7>ok(Qp2I&^*8Y1I z9&?G}6@V{w3O>{XXQZwmc-WGe={kLm)#D@*h_WH<;S}`ypS~FCKyghIArO#+Z~CUp z|K;lG%eoqy+L>}N>oYT&*c<=%mHb~oIIp_=bdpZ8INMjAeX~7qLJhsmz8?f5hB#r4 z1S6m$sIdFt?Zc!Du2dd4nh-)TF^A$|5iW>$E03Nl$a|)|UqK-?dCGl#pDJ-8GJ=BGA8+TfEdjyyUD(mHbP6C(V6aZ@=cMKj#O^;}6 zfIgDs88VG@^V_ii`K0BmfPyT9EwfUK>cMC$H-q!CHhmfEouT+0XussdOkt zI3S8K%28}Y7EsqzJZI(Lq=T2Uyuy%{yT^zo=>is|i%elo+NqgoK-glR!d0%VX*l5J z$v5^}DJkr7uFt}HaB-Xf=5cj~#m+J;2 zV3cOQGfzAdjLJPuv9Z^x6-Dk&3-)xlm*vl`%F(hj_De09#y;01#v_cG}(nP9aYda#xT7oQ8b^DN) zWmO4T&Bt7_I0WteuaP`~T5Y2f8RKvPuK zjAVjYw)w8;6EK;)tZF=t@zLICL92LDD7Wy`wMUQlsJn;!DzQsI9=)Wr*WF7LcZb0j zN!~hsw*a(Ra5#Mqj=e;@mf*U(dt9bG0Br{`qn<}xAl*EKy^Gz&yMKvlrvxCT@TD1? zyEw-m2llVYby06ZZ!lV=_aa(J#4l;Nz=$u9u*R_J&hezFRT5G_lfD5GD|a9_e&`Pq z)N@16hCQGK9siJqXG`D6FK4<4=ppOKV(gFK4(O_M&~n6WTUadJ9sq9XsxI!PD@f0umLh#MSg8ztE?jJW6c~$#1q5HyxqEq;j zIJH8(fNdkhBLbfr{EeDd$^%r(YoPoztlnaL%L9ltIy zt`n<=iZ8z7m+r%y=Muds)-x8D!pEqm3}L(7l4=P~$nA>jqL>RV(YkpV+%%HVQX1z- z%dYV6>GO&Jw4u;9tK$3Bs$`@ZADAi=s~c+gnFE}|dhOj3=Tzzq8m<990w0q}*}nz+ z9V^GoEFI)M+&!&?cZ_>j@(so=y5l>d5)h?bRizpJ6UEN7-^>|Gl_iP9!Dk~{e34pk z0s9nBF2Oy_@D*;O@F}R+q$~;|p(_%>QuYwTyk>Wt5)0KdtQlc_<`A+jx^Fk9D!6QR z(PtdopDBdbo%EUGFnIVxQ8saF}H{m)S&R8~}%+^F3N(Z!>69 z%8Fj9%I2u4q#JAt6sJ9I0WqsEVX=6k)XM0CrN9>@TSp_}laWvAks{j?&lvi3yBe;= z(2=XqTw8WQ(l(@Q6TdyS-MRILPI0b1!B*iz_=q7~h&Pf_*-o_Hd{Xl2!Uj+IWk6rv zX0{IkXt8`MUAo%}3}u-uwI ziWHzjuQnAiKwp{}Qm{^s7_zaFl|Swr-nBa6wiIX7gZzl9b5P2H^j8FJSkeWTbw=DARIO+G$N5W&HwGS!PD|CEHP*|kq&_)|vG!W3oP0+g3W@_L8F z<;f_|1e~a;`EKjP%i`btWbMk$=*2i2Zdj=*XJF*;iqD(OYSJMmM@Ux(x@(Pvg|>nd3|!pi1U6 zEjQHa2creVZ5L7QaPp*hS8TBX#gfzO1JLh|hOxV)%whq)eTPzfq2)mNJIr>``QN8c z@m`aST( zw;Xw8np##_$T1w&ulx1wysX|`#AyWd}%56~_2RpOQgOx6+cvb+hZuPGbyRG0F z#DM}l<}hU&pLs4>CB(Of&g$F#)s@wXThW`+8TI)8ij6kJ2n?*G4xZFY5zC2)3IWq+ zPs-5T9*$22$dA&A1W$mvPCCVqD1bR5-=KA_XDBYy9X%fVILkE4tN*R4_$*_s5;wAF zbuC6Z{(Mw^-rO~FMZ>VBI&@D}TWi>WJ6B9Cu1lTz9{sAEfM-{TZE3u+2vE0{F6hbA zyf&!Zjodw=Y?w!w`3n*tyDRf|J_>JlMy87=p1bXxGXosugo zZ;;fC6yY6@^S(>aNuMRXB%w&v{6%dO4qWk`G9Q0CCA{-H#j;b_(YgdlldxNI>VxCr zG;>dp6>)M)-KR@TD%i-+At|#_r7ZZwpah~Q7)syj%%}}^X)(Npv;@Y8BCIyy6QEG} z1^rIb5t%X*$xoNmq=G;v?4xKBC?>sArrb*lro6!l6DCzDDw=APAK-+zO*KjB)pAe~c_r%OY6*0xE+ya*j zM5n3*K-3WzOOC=g-w5#$gczZnf>&*Zn}K6-@tT@9{A;nbLkh>VqN}=9-X{s;hUHGfmaaQukc081gk zAfd%an{Mf-tzp7aT6HCpotO_;j$-#Eg1xj^LqN`C??Pt$+L-wWB6YIa320;EyqegY zZlT^@JQFfvy^P2cZaY*HsqRvrhHA2_aEbc+2gOv^N8z|)Q{5TF52+QgDdBs=QWarANBl!8)fz5t)Jedm0{pMle=O5Xkl{3X}e zV>$ngwR4EBtP9umpGs1(Dz*qqRc>Mwo~gK*UErPTS(=^EcSRJK#o00#1sHEM(vosQOJNzqNb!j!B+X! zh<(iD`gduF^;}Y=qNI$WQnOy0rG3cDCNse5$TmHfv{H7mZInG|bKgNB&n(_~B7`n_ zrHy~cs2(k7kw<`7t^tpInkeB2@9<2@HTG}cW+_{GOzz=NYjby_^RRvCxEX5<9_W<5 zl1w2zt77dl7O7w!`2d%dy`%Oc9K7Lvvl0u9BTQ^qX@!Ic6ZK=V#j|oSO$47sl9+q1 zMpl63wj4(vu2VN6T6Arc2ym zI{Dz?GKJtaL*0!gQ72wEsAt9z`AOH4>8$Hh!POhN1@vDi22+Kf(}JI+kDqbq%2RU6 zVhQf-jFML2tmnr>u`yCfWWYC%evfM)9KH-ZEyQvDdulR-VbY!G^UHX4Ctm4)*oD0wZ5ZZ7NcVuL*zSP$S{O*Vb}0}Y(*V6KOZgJ!nqIzP z37d>p*64`UMB(16*B)CxP^7~exK2FB0p{upf}WaX!xuFghi1&TV_2AGLB(Vy&9tJH zi5!SF;+AYmBF(%;h1ThkGs5Il(FhsBO?fO!JP+iT@6G5+H30TBi_Bk zXew+<;`+%p&{hI4&pLOTHE1H=w2GVwdv1v6+IM||;A4I$9o4Ge7zd|Fo-rl_TDgw> zIuANx!vB-KRp6WniCbuMK-a9`7|e@{%?DXxPk8bzE`dai(4iGMozC1}tmG3IT0oPq zATMH>T$y89U<~Da6;R!sn^J*;e*f%fA1yac3M#kH*fj53(Qbm(fN+!EVc-W;$}AuH7tmV= zndz(vaE&?kxlWo~!i0Zlin+27t?)igsmhcutoWK2sJg~g<*hLmxXa(iDmb=^8WZ?z zg$$m2J#Y>qne@!PN(bHW5xlgfA$}nEwp}xsgOwL#x+9Q^T;^U3I(f$Bwd4@_Ne{bO z!WV7bIz|{%?F4t4Z9xGA84pGb5iq+Co@!yfbBGv6%GRQyI<&JCb z&Mf8xmM|BT)%TfNIGRBYX3l4dj8ackvT44~?oUc>>Kn}y25Iqx$7xuhDklQXwbXHH zkjAGj3I*f|DOSOzfw9i<0)6CyIg;;?Een4IApe5B#OQQBur}Y*ekNwD2kzrZ zz({Z?SYd>qDRFlg|@mkG{>Rp)<4$lt$+KaSj zq-d2>S4AF_G>NNr2_@3Dxm>IEhz*$X8%50%Oyb>&Df|V#yI?Rg5Vs@nG4({vXVtld1=Gb>rCT1+$Ji^l@Tia+W<|vH-nhA5Whv-$FGucISO>&HHt^i3VX-sE`vkj9L5hJ(fshXUsE9+S7 z+e@(zxBA(6nOBGy!WCGTaSTeY_d zGvCJ}--q-~JOxpmYe$er8~FVU{o54!;F-~`h0RiqM6{^PH)!vmIl2Z(UNVX&<@NU{ zCrFiRoVmIzHU_pKwb1I=Wlb)In#o80ngNTIT}v;H#k!s+@5$^TVcfEDx#FSy`~$DS zJ2RL|*Xd|~1#P2}ex+0nO~L4${O(O`2fwZ^l;IA2gviRx`Nz$o>cs+V6vhEynlrBB0fLB_z_ z9+np3A@HQk)D@UGjMky0vE>4H3x*+I*bFs(zK&@NQ^N3R={8}!~s^%VrZk1a`73IqOE0`|Aq?4v}Ckw=!A;Vi!cg2&QmFS841LJk6wd%GN{jO zo*ePUL|!GG` zI_V^hl;m8u#&BtFiK?7i@0L81a8+)%=%)O{)oSBG z8Ml?B*VmLy$K?jdF@f#nENh$_Rg6WpkcWKxfurt7MgA z*MM8YCHQSLk%Y8+mWYsl+j5Q>b{<}2UEgdtE8Wy+)T0+LoP8~pSaa62dWoxoRdhoP z=QogdT;-YQ@jpkSsrMTa#_dQW$Ia-AHiuSUM3gMK4ntY20Fq?;VDC z6cWkPiB>M^Sa*?2UdUD_@nV%qdPiZ7sT2Ozc*L#nW=Iy&lPjqU5{Ay!QTj{Nr96Xn z#btHsdd7AIAtoW? zWErAkP(VyC75gQ6b^hEe!T^^>&{e18C>P@Ue|+_{Vn_RLvOB8XU6n#~$dU9IBI1tf z;!+d(({gN&(dWsiE$1L*k%C8%Yr`*OOd&nj#GLBw#TCDFW^4)MRqPTD zuIdHhMOCxxh%{7C>4G#OxkdLNmi9PJ=h~Y*SLN_8;5)hxUliKF9a@%+DRj?8X}Pu7 z38!2gIfFU?LBrdp>Mx)h&E>Pkkqn@@Y(lp^j6w*7Bo)VOnTVa}>XLPJG&V-3=AHWj}dCesdE*21>TSQmA0&qJ1 zmxMH`NT&ybMZRD}3Pizjo57R$vQDt?D_iIm?X~WAIUQtc6 zQjl9>Z&ZFMNzA&0{?VsJkmKl<1%ROC;qp0fN98&@Y3lwIV7&4s7$xvM7$=R3noh3= zhb~uhEpT{r`Z2oE=O$}tOk{%d%3gh0PzZau_r9-~%sqfo4Z$>j|K;|)f5Fp)@=C_*(K&ucY@D#RP`#XtFa8r7n7wD zr%F@>#^>l|8}i*CV()}fn_?$EI;AC==LF#jR8uGF=28=G;0c5y^D@?O6ykSW@mnl8 z(Os34gtD)3&SLw{SQ`7xW?Yb*%=+4?O$A_eDij-aBvDJaaP1CuFD&=-<(-bwLz#yv z&v%gt2b--sm37*pcz)yHQ{Hw$G&DQ&!)uwO9kU;Ag@*9R|B@R**y0jLh4z7C8@P}J zY<5S28&%cNImgYjcmK?iGZ~le=%uVv^@y|WQ7B3~8CzMEd#aGd6IxJGR39Ck(*81pA< zOLxIG^>`lQ%Nw2gSsJvI)?FVhbQ`Z$Fy7!FV_;b}TSPjpA7$G3G9M2jUj2`|@3G`x zEcRRO96#OBY~Mdlq;gyZ+V3@~j_-@sJ5K~?VXb&uu1?`8{PgRt3k~%!8`RC^lAAW- zo9^yR*Kl^>Cc>Aos)U}!BbGCeM-L-MBTYl`B%w7?`zN$tdAkFHSh&SU*4?i)>kq7_ zQ~L(9M7mRFGj^_J@#(g2$A&k9x;EUhH(T#p`S)((-Q4Wn)u}+dL)ZTD`}RMug!&>A z{KI?ZKjMbA{GSQ;d?J6?|9z(U@r`)Hi{_El8K7}|;Z-%!KjFy=r1{lVuTh~x>m>*} zx9Q@8NW0_d$V9$33rMz~#MsW_Tr*@J)9$KVu?mLJtG`^btlIvAXQGqWbY6K;89Nc) z9F&ShJCkmJNGA;%cdegdII(o4t=dx^4g$ZZy_=l)_}P40&WEPorLUilfaX!fqcu~r z_RGV!a_?%0;=cFR6&UZ*iEOhCVcV5tFUMHZbP*)RBVF0^+Y@PG9icqF`STg|} z$Wv3f8>%~*J(!S*yfZ5?2!i0zN8xCZ{qM(|#lRM$%aEfKqE|K>TU|(rjC@5!dRJ+klQAZ)s<;pUR!zh%&-P+j)I z%fm9`f4uY;;bn=J7L>0~B!SY_K3PCd5X$&d-ywR#cJ5BUcL=>KNTYfTdF6d4=rbJe zX#c3EpO&0DG}oztI%s>uOyXX?)ZGwd9nr4=enGM=(taSP+VjF+xx%Yq|B$w6uj07* zG~f~X`9o=c$3$pcjLs_fnN(GwPYDm-86Tu%Z~VDVK)2A{%Dw&yMb_yC^@={cipvGD zN0ZI?uE8}GJF4UQ_jRr-Ovb~>C*C<=s52O$+PDp>0`A%9Djs;BoG4z#sl_1FcBtXt z)2r{|{fBLcx^Se~WQ-=uEb9|R!ZPbcL&n{xgy;iI$NiUB6W@swL_{g!%;H6d(fnCF zg;Z@dHSN`iRrA|xD)dpjooYq*KlQ|yy%DsvfiLFJeZgik*-*Vy?)gY-GNCWO1QU}!e323-UqE~sSFEO=-SI@hX7hF#>9qD{zPR9A!*pwX00qphId2okr#4Y*qXw6wB$UU*g@x~H`T^8UV z?E@Ty=c!ks4Xb32*l+ulke|2A(yV?m=z5D#&oZ=VYA`hgn(r@S>(M;}@$OoCWVSEN zPyKCnj{Tj09TVJS+I@Kti#@{k`_Uj^sr~y8pZ_jJ%kAlh=mJx6gc7dj?Ituy3CQKG z9pPqI;=y^fyUw(sztr3IJzqk3T-*W~Zik|&uhaH5f)nLt9p)cep-v3OejxK*&qkj+ z;-=RRbAzRtNWRD&U5=y`6UE8&L*SNy1|H4#cS^0}fAzMmR)+z-GIiD)dpuD$CTj{~ zcZ8z~=fNku79*iA?8YmHzCTCGF`K*18Y*SG|7aA(I{pBESPczx7^Tk&%g(Rt=X~*T zCt}iob>{H_D5@eiU zpKNV6B40c-^`p8)wozvvvoHDv1XV%a5MOC%cAJTTv{UAJzv=n@4%W9+`Qi&2%5*dL zd@ca%*u}$6XJ$%C? z?DD|-KLT8F3oyFs0Vb8I7OM|Gv)>?&V3!A(B$XiTiA=ZsTaj)f%t1YQb6N#(-qJHZ zxI07puWL4eE1iFRgJd2cH?8+cFs}OIXYD0*yP0&%SJQLrDZA3Sg2c|)4RU`!LV8+Y zD;PX@zXnI0LK5y1(cjldWB=aFnDs%jKHeGl2J@2g-p;=v-so8774-XJs=sUK{yJT` z=VHn1J-GJ{*mOV{JyN-)<1RXd1Uglsp;M}dOPrd=sed5;Rf<-iT+i_afV}Y^NrYM) zpwzDlQ|47K)D#kPhGIP$NqaExbBxpa6>ft_Z?Q|@lZGyizq9q*Yp=mlv@djZUHZ8=l<9?qetOxr3(IZpenljMH7WFJ2T@ zb99q>9!VqGp^}y~C&I@C2kv?4HqEexYj(5yKbGvB6^;D{5BxP?qm~cwa05+DeN89y zD*Fryn7xpaVQ{}7x~v(G7-dVnk8eQswPa-G$+j;HknJxatopeAt+hzed{O_%$#EPp ze*SBBkoPXXqf1CKL%KBWS*vw%X^WxDYxq;JN_>3d&M6E*9V0K{gF#`;tKfe95qE6n zvn_-IJuLjMT!XI_Ep#$edR1@OhmH0Fg?_IR9!N4=TAG=Knc|1dSX~hbTGp4vne-w0^hF{tZ3$EWPS z6%KX20|~q=^&h=npgbi}3Fikx*+9f{-v5)=Z_;!u5JJH;nK(IB)UU2$AVWZdu$bv~ zbIAhgFAfik;++3ZLS)n#4C*|rJ827gK~Z6HFjJ0<8lmu^*hTEk(4bsoplN!`ak+8c_D zlJYlU7|oYxt%b@NDEChhoGi@UbHD3^9l;emTFudjhMjvf?B#I5dd%2pUyJTd$oxpsr9Clkz0qU`(b*eO&Tl{;n5ln24n z-ipA4UAWZ-nO3qYiP;y6f^6|{#;MGXRg6dIC7kX|qIH*%aZ8+ZV=D! zeq;>yD~u`l;33($HJ!b}$Doc)=paIoAv$TX$N!6&IU_v23ODkE+*uu)!X^zRG9_zp zJ*hp>{CLQMJK|j5>;bZAY81_)^*aBU%u#wrlTZ0f~$sbn!(EG1St)FvD z)mfubQUyvvg^Prb)Z9J@T|%cOwPuEe3b`sg8hLRzY_(9*OqFap64iu`qRM!?GhXyc z88|kqRwvC@9d}o7;3D0mL|AMJ=kiO8HH%oj$V`cLDc>@SNl>1!X<4`;T3pCdYP;T> z7^27!MRm^j$Aaj;^&@3n#d#;oMWvFAwZnl-HPV0Zs?brYBGrnLs7N*9og+_$Z`R~b^a*uPGs1t&95>f zE9C_$Yw?W`%^v#kuD+%LiD;Nr5elZoODJycLCEO#rntr=566&FKc+Nj$&)Cya%JSS z%Q;Oij8jU^spe16&?oCvaV6jzEwXCo^BU>H>MIpf$1z|~m}N1`nxyuf`bLw}T|;c- zbWO=F-I`sP;s1uq3QqEx5xV2PWo|%hQXUr-*CjA9RsP z&;pWT;M<$dTUV*{8aa%xS>QvcgsQSirdDWurduiH;M8aU%7s??-i2=Zh3r-^##m(! z#G_5I)&={j5lqa&#miKsC)n9dIwU4bPo{x(Nr&O)ItvMrfjNstmmc^Oc8a zf=ibgbrEd)_n}LKgJ^~;^k8YLTExnv?3G37QW>qNCT&KI!=oZKDs!uSl8luWlL1I5c@&Th6es>sm%8JmXGr2n zOeYs;Wb5WCzHcO_|0DRMHy`ChPF<0i^Zqw@-OqV;$s;U(o@KMo%a0YvH!uo6$Kq+XeT8_E=gR^_>%EJJGOij! zcY#fCGwD3AUe9ok&Fx}7WcThbe8K#bpX;i-*_+X{q?4A7$6#a@%nd6Y-8Dh6KR_el z>{3t!Np{sAzq*gIk`}VrYrdURdc5&U+K?taY$93=mRDwi&C`!=LK#Jtc*Y&u%WAqK zDP$Gr1W-qVWMpvwxY^+e(%+IBN87a~D&yqR9}s!tMjQkf&|<-kjC?((NH&poY_28+ zD|t(&?5 z4&T;;&pGLWU2Khu|IgBMd}krJ^m^^uD+fn8LQkRolNX(&YB!AfYqdud#+kwREcF%f zK>LVb;8gaF7JU<04d=Gda*yGFb%lv9qv?4$dxqa6i$+H)o^9ac*nEFZKUvX5w){hEXoI3k)lp}S@HWS1ZV0Ucvflzqzow>@9F{5X)_HiUkvVAmoi;OM@#-7M& zf;MM?H%!y~V3yHtyD^2M8);&t>M?U!ABw{8@5IzFO)e5ADeB2(TJ3@W}{%Rye@q&4B}PIXRP)Km0@%V_qNk0lb= zex%pN1)+Qf&N7X`J)0!j`UVd4cIfJThc~AlRXpTV@jpVYQy+(N^(|z7@=dV$3KJT! zua>(!r$-a^*?UVrQmFdRwqIVsOt9_*zG$mPjmKM8T=jz6_L@6qG~B}M(QQh3tjH_K2ZqC$uTV-mw|-wZ_bsNE>ci++L$_x z`)7a(jFBT;%w(!{B`qb}AxJWVP$hfJY~ZGvX|-ZE-m|%A+q?+^q`(ZCRtuHQjGM7* z>p1TB-(avC1demDHYjT1B*$Oa2jgAoiBvlg zz>QRJWbWY!QcvRKwDigINyv?f@FBj|a6oIdg11bTnC^XoLCUqn!N#5uYfu3Ft6>!m zZIjGHMHwuxe}+>yLMdo*c0rmHKFHyTZ?$j}e<{?KkZcdprwRo8^L2<-F6ok65~w@u zIiuZ(7~te|T3S;=#h7CX8kR3zk3Qcrr=gt*6M9D`M0PStXnlvd!qM@Et>Kh;TLA|F zWa~Kx^+X2aOf|EePxddEmE&B~GM_7zyY!{b1Y2YFcz{K5TAx@pbb9Y?3arWUDnIs^ zoxV${sVzooc_I)~wvT#Pd*J<{0<^24+AFEj3>7JdyHPOzr$}LxP{SLxh)oZd5dI6t zC)hfriFw>SEb#NdU)BDvM#BS9wNb*j-G{>AxN_=R}%n+eOTVaKXQh#@HO9cyr9sanLTsXQSl%f#NXZpSS?_W%jz96 zX>M^0?XYf{-6!`9AO#)7IVJhW8a@}d859f_H$ zv$%oe*1_!@`Y((E_sn=@fG*7zq_jwlvW#rqH0!8w*F)ZXHm+Xwb0g5S`Ww*iylH4(L$~r0jh)pOjW(qF207 z&u}3@WujEmOxjg%n0n~9|Eq(7gYjI%Vk-6pzFSR#DxXmB==V5`1gvZ<>9c{wQUC5l z_}M_>x#|a&zu$f;*6FTvaQ=zl8}6GR?+GIKHSh-WSzaM;{3kGE9DuCg)a6#c^33lFF<7x!FPdFXo{WgB^YH*Yk z1O1ytV)1O~9fjY31|zit>XZ+3Px|kZ}ZKN+#XK<_NU(M zI%yQP% z7ZH~qk~F@Qh;8C`xQGeNU5x@i>KDABw7!Um6P>l!>h({RB@P zN^$P-=hL%(z?BPaGfm22hYbH3#U&2lDhRb9J z$=`46#nwh6%(4mIIiF~kyYAQ{YQJjsz|s*$9R6#t9 zf%D~>|Mdz;uj%EANmj>GSxj-EHG9?b)~Jm{G?s<8n;|IX0I^&FmW*&46CRSv-l#Vb zJ5*A5KQJ``)s2zU^%wmxzN%_dV5+G|4&k_&dEB%Q6{*g`J;UjnlvGEGtH&P#SNVwo zMxvEXt++id>^H?;mtalx+!=St_mi{o?OH*fW3jg-NPi2!7Xo-c13c8>Goo(*c~X2t z#m_PGPa7_Ygyffm-QP0rw{Xtad*u-qE)bSow;@@5P_5pbMXU6e6oCacVJT+!Xv%nZ zas(AUnm&YmnG5c`S9>JdJ1k-Rm%lEuSZ}-ghF@Y(+^z*84k~keEZ4iB?$A%?CvA;a zV>(sCBw5=n3_c@aq@9q}9?htyYdkCFGxq$*W) z4KC>If^oY^$@BJF{s;8Z^}5(~G|W0o102CnA5V{D;vA9H+q51H^Qt!&SW=xWs_AS5AQTuMm6N?vhu66K7W?d9llGhl5&sS> zKt@vXUpDCqnm2hPah|r8u1mC7BB5G~)rocgK60KObJu(`O4i0~m0AHx1=0UCr@rjN zb0g1a5bmZ%%#X%5fwG9Q1I=EWeVdycIHun-bcYyuR4+gyJxB$~MS_c!b^kcfdy6yy zL7L_iHn^lY@hG~3uXRNb)K%`0WxBmbb2IKWl(s^e+#)yN2h=2l9x@=g4p-(j8o+jd zBf)$qo@jA~K$@(=z3>G%%lEpka)q~Z7wixZLC4zs4Zq1Lkk9K1S45)gJO1WqcoZYjMQGTvamL5i!Pejn`cp+{@Z&&R2lfIn_to z!kbrQm%=_!wDOFzpJB;Cfq$Qi(_t|W>fpSiz1Xl!<`{@caK$WJ+UH8CIM~Nh%NDW{ zDso9KSQFapO{)b~B}w#XWf{lADuk!4yj*9JH{l0Zg;55HJW;9W_0yN_GZY;*IMdw_ z5K>ihm@WgI2%(c8F@r=d8N7p^%W}bwc~u3$FMy5_P93!wEc|##4yk$)FGUj9v)sf5 zcNBaDE?H8~WG1npl4<=XQLPKN{A$CjbZ;>5Vv=dh7 z-4AM?268GRt5N$9=vbTYkZULZRN83bt)0?#pP|D|R!g8a91xw1SWo)LYS5o`xhmGdAp7K)MFTk*H5aHbS=(r`bZUGOwd{(j=sV0oPgC|$a<=yg4UO?~2 z8RAQS?rA0G>C^fwC-(-+J&H}`(!gL_`ix^;`hlk;%-lADr)oax?ca)mz4cFV_Y6_z z9I&|R%UsTKC%J^eP(GF-z!-P?<|YA~aZefM6JB7PpxNfs28%mk2LrmDU|?<5;Z<$M zq2GoWfvLpc1L8TRm*^C;MSpQ@%ggD780V+K2LTeuU+HLmRbh^ZaInOqDH^tTBhw3S zPtyU<*(|DMkQ-B6pKn;p2|;Pqzb-oVF)*GbTQXcb}l|DgkXsIP9y?=4H&^2iDWCTidXFIQV zO{i}~onf#c0>u7~SI_UO+?BrmpPnnABCl-*UG*2$=>(gIl4l4@2Ia?GGV}BprPTul zKAWX3$tl6XpMx7V(;L+W&>JO~7z)Vdc3MzzU4&Z^h?-XY1gk_+5fo&uQZFCy@_wr6A%t2U}uFhAEP3*+Hl1} zty3SE8L(5h-@hy5Qg3%m4)almLKWd93cH zxi7XzzlpR_>{{?4v(mWlyAFA>t08Xj9HcKMNJ-y8byd$L2{puv)eCQD?DYbf&H`om zE|6OPb4~7ptn3EK?{R74^z;z-8}2vB zl2k-*;*V0~MTaWJ9}EKz{XvueyGWRP@cX};52>7O-p-wiu4kiIuh;W~q}Wd7nke7H zx*ngGB@JI2m-kb-$9%@01TihHhtGZw89lz*-c*bM-+t5@+voBEpTj!eA2NPlu_cV; zIzBgEh9A-PpNBbXd-l6Gz8|q)SKvz(J)a7Xf-aRk?-ezl`=J6_QhtYzgkSSL3UGuA zwFX~pcN;se7vEpaC$H-xZZ&0m$;%~vd+xqR=7O)+$2Wc!x7Rt1+aL0WFVm0BUw2S~ zpx_*h?%M@F&|1x2jhXLxmwPwf%YNwhW5(>s*HfVTH?c)@SB}Zo0jkN%RZI0ca*a9&y~)N_bHsc{Z~Ab&(oA9;9+FveG%oaZ>Hw~ zsln^V2>-)7{m17w@Hxv}5ZvbzNxcX2Yx3s2%kQyk&ujI@Z{Obcz+67-RHFw3etyGo z&FFjH60>i=y%W`?5AwdE->)F~GGIR<&pMg|_wr#lXV9WfZF_|rMTUslAnnf0%6Yo8~zkXv204@^eiD`Sy%MUUMv`lBAZqESp z8Uj$QkK%2h&#-qZI{A;^Hs?%u)Y#86Y!jnrXCyR>6<=7?BUeOCYow+aiXkqQfdp;g z43L3A6b;m745V3}_0sewR#4b*0o~18-@0po7K(&FiE;mRPR3B}Or^Gxt_+a5cik!7 zzMy4Zw^-tU1h`;HhiSlhhOQISj^9D@C0anb@C0&ZpGiF|DaZ=vQtT?}@HfNW4THduak|E`Xh_x5d0{Fl2Aq^oa6F4g8gs7IeNs|f1MyR)&>_?m=$M78ylQ^oF!&1K8AVw%TL~$&X?CuT zkH-tj_STmvOUcpH^QPUs#}%q39={!${?crGE3Z)MW?5ngTf&)P)9V!AGN%(i=4)J> zD_CZ+SZ}eLNYmjYEDCdtxJZbZ~M3Fo1(WNnVuf9zPi`;SIu)>8b)89K zqL}Asq+uAutu28EnhemS6C-=uK}Gs#ERl$b{#Z48l%+6oHvp`1#waM}6I6e)fB%Vq zsG8}>`_tRpsBc_;54gJa964~8Z!<}_!8abz7L9ssFQnvOJ?ao2#YJ9xPtI|emVFCP zt#yn1oG-(WwTsv+;qj6_;A|2FT85? zRKF~Wh*?ISH&9&H{L0%O%U@Nj_wTXfO@etajo zMo*sY@`g3H2AeC!;p1xgaXVMIU(ke)|H#iF9w)+bPvn?m60Z)CDSVW2W0j|a*LuH@y8?`G$Rf()m-vR18=FtD_bkc@Xe|^I#2^Lo8xgK#m zNynMyDWc^Od46s2ImHTXSx7^tGsd6)vv(QB{tI435~ok!JBqQ$#3SPVjD~NnYg+7G z5L^sa?mjSPwLbTW6%}9L#cw%TtnEOkX0M*8b}Ro0B;hnvVn^waq0TeXx}ooVv*Zv! zO7Az@0uu17IlJjzpbfGbDJcXqx0;{N15wX}FP)K7+0K*F%lX>f_@%Owd8dYQ35_-j z_sT?dr%P#j`v=qkwT+uG8|h(Vq5dd3yR=vRiG4 zf8k+kg?|X#5)4h(4pH?!g{bP8iksHzI#eWO;23FMDk0Orn{ZT=+!Kw>`RJdP^E6>> z-@tTNWJ(u>f0n$3jW}x@kr`+dRP8WTR3)R_fWQbUl#JwF!&023GH?s5oBm+!J;e#O z!~v!b(s3@q2leo~_Ba4tON#Ki=i$G0dY>%13hb70(n%s|q&1Bimz?AaEJe>JEr5;i z9Z?1UnJI@!vVqBJ1EA>FEE@365_-yrwcs_SBzCmKx@UK?iRE9xOZC$|5!I7*<}b%I zLKU6aX(Yx;{mxlB#C)KJ_ZMzsSqw)a#Fx>ganRMMD>eQ|Z(_EpK9kZ_LH^3TmPQK6+*8vcutJ>TFt^WYYDl7TFK0({$L( z{bKm<+eeBIgqEtqSJMCO)K39iG%U!{Az|ZH+m%sh5232IkGt_KRL#v_Jx3sQA@#9FJZfux4*m#naiN<4&2oad2?=WI5c zypG|rhr;ty$~#MVFqvulsJ^pn9`+(_XxnyKz+q(0 zOHwOK5_fQ#<(1m-SZK^J?uy4_d0od8ZZe>!51?-6-GdB#Eu4n1RhvwP~E z=unVsEwNWN0fsj@h)V35_`N27w*lzxG=f0fx=H}J$Z!=@ZbE>Nf!1g_rI~J3k+Dt! zwC3(zm7rV^<-H0;+h$v3mIuys^c{yj=!T|+$}x z?5|t$sU{9peV<9|GxyTg-cM{-sOuCDGOnLsEBqD`B;@bV=%;#gxmhRb*TT6ftq<)$ z?FXL3HFkSr!X+BNRp#bwOBHYm;WZ`%Eyjun;1B-ndPEfJrCu9y`_@+?B8zp7*!`pr zRpurbm1Phbd_Pate_4Hz_oGRakS&1p_H2yD5g8;pVRO?0I?u6k`2#}8Sd!c#)U03F z1Jt7P6x2wEKld@lRdm}9TlJH=H)mGtE^W%p2L7sfhd&gWVWdVK(3RQHJg73r8(>Jg zvAG$l6NXW^dwXxpglsucZ)70+w9DWp=CC_`aTOP30>x%nq_ zFPb-7=r$_ooooPI1xKejAc__cBb6%#1UJjS| z&ifVai2+|$P#y#RtKRv_PU`ui!c_;Pb22MXEkK(k|G5@UQSvA{sf4-V*>236#`9Z0 zc5C;}S0q5k%6Z9Ngu~f3T%erStM?iUyTW72h@CnrOIw$8;ge+)S)L-yyBjn_MVY9# zTD4%n$2!^Wq+dQ#=0ST;qHqfxoLW@grQ#XfcRI8{71PgSA94!w+1?cU zp?wJ2IMG|4Qu-d$r2XRUqV_zTNuND_c6(ip?^;&PBj!B44Qs}hXHk)FqwVq|t>mi) zbKj!6JZATrF(k{o{K754RKbotE!$aF9rlHrYS}J2EIb?2tzHhmn_SQ{b;-^fi{lRQ zG9J{EsK04{TSds8?j={BVmIK#1_<{J0lD1(ET6Sl7}qsdTMT^w=jEE)YN2+~z+X~? zcbzV4qAQOJ$h?9Syh#psZNN~2XNV$6oq?@m^$qjsu(ETYP3)bj}6yk+! z|L$x`SWVdB?Y^weQLmA)8`~DTZN%YYom`8?%K}@>g*?*SJ~{r(jNQeISW`XtX;!|o z94_q}b#uU|_$fHibk(+k>OpTI?h%Pin8>Ly;S>3S`WFN(JKG#8b4>Ldl~nw~Hl3Z1 z{V)Oq)9Ad_Eb-klZ?Yi6=hlAR>OubTz3?1VlFtI1Y9Cp zfCzUTLi6Bgu=&=*@HXM?a)-B6~&ZOtg*v56~Kw$9mi|2Fgr8`N{MmyYlx1xY{ z!5TxmWmWJKg8)G46A@UjLOG0y9 z+vxle_~p=Dcc8&0FO=zRZ~g?_%MS+MW4%5yZJT$+XzHbzP}eQi zVb;DwzU`QEqkzvX^4E;w%am(|d_ZsB2(!mMozvMd_1(WEYpTrTCpF*qmgn<7yT4A3 zuK%Mdr6!*PwlJDqLKJ+?X9|=cF%c1we#p;G2SGu7jSwI4ulPRdK{$T^1bu@c3MNN0 z@huf-n(r2XaD4Fc{OO_SsqQ{`12lB}weJ7>`0QopWp)7gUXj|<UW z!mn5RcLd8o*v3=Cf!gw_CE!^+Lgo^;2m@;7DRO1kAG?|urue6oq~un6o*-Gia0u}s zNShTIh#EY8wBe+ghFTyzKYIaz3Q7%B{c?HaEL=rLfXeyg0rj_}LiBn2yCU@zLc0uR z6p6|cNjOmiKjILP7&5f6kH3yrwJXn=6V^|V>?G|l7|UgUfJKB^JJ5c+PtJXeA=Q3{ zg5pvA^tsKn9*UJ}y^QU%-jqE_geHr+Momkv?(+yvVum-0m_|I@pnXx)D@9h}pHnXc z49Fl9&NhhhIXNpi^Ks4jZsxBncc2S4^Cs>r*JWdn5rH}0_H{=C!ktUsQ4x>V zq)>gqtR1(=$K;@7I7Hptvo=Et#wjVGubjO?K5GqHTZUt`_sO9?#U9-88R6$*j2WY) z@hUe#3$|aifC!v%6vY(mFXq7vKPUK7$cNT(Jtyez^xHr2%=!H&klZXx=zWG&@Ah-e z+*$4#>O8hw;hEV{79$P4ue3E)!!Cb2P8nhOEclsd>eymjF$Rkr7x}f=b6jtZS)SZ~ zC?aX2xGW+leS17e%24+_A+8EhYA`gFvZ;TIMf`Dz$f};*Ye#-c%Dte3IyG&x2B2?8 zjOR3t7wnD*&5Um09J{5f389lpc6|`G2ivFg@?6QuTs=A}j6Sg)4gsHaRE$|}6`$9f z>@Ss7$Lj2ae3A*|#7EuuN&(g8b8LnZW$biQUsm-@!0Bw~)xlTnnGPsJWh#>R+S84Y zd=l&WtYPYycD6-MRIyoR0(hpgiHNlvu%Ue>2ctp1JrJYxfovE!FgAWC%$0)*Oq3^s zhryBVTK~uHKGo#i3%-HxmK>>v-~LT6d1p^c?1B^mf>-&U{cUrz{vWYxAqy^%cquiv z_g7C2CUYph+y_|e#DNJ!OfhS0jjB|}8pheiRFCvzDVf4S>}HCV%@%V)p2KX{slS6l zXtpbvP|)|yfCR-_7wvKPc0CTftqtGJocXu&=ljRE8-s$&#oddg$)};`^PhhvQ3bs2 zH$HVegMa!zOx|z)fmiy0-JR2-M z$KIts0|}eY=XC-v+i(~Dx4+(V6RLuc@?DtEG&3>S$Y6 z01RhkKs2(iR8(UovqZS&S5^v9jbiN7;@Yg4EB_OEceM@MUzBua`Whb->kbFCPyqx-eJIl0cyMj0cidSQpU=`2!0 zy>oplzIBkto-lQ9T+_JATKJr6wFTg?^o^#f(%m`7Y`)`tvAa4ZO}u4y^3=S|R*#Fu zcJ4FRY*&EBdF*AxWF)}i8 zbj;|eI59m6GFFU_NmBci;_qw901R~eXoRGw5h=A8F)@ik849u(piW9E1NJC9hc;C* zx((}p41m8L>ED4ix#GnoDNYH9O@?1UMK8`OBCio`7GpwL$W&NUTil#h%*-clE-LNF zCT$=p?MtYgP@{U`O8LsuJm+F&e=y#1q#?-AimnYMGcosY}1(T z0d$Ex!4F19eWfb<-NG}oo1HUGWW-a7Ob$k?`Fe3b^!+;9?&}_4{CmwA3 z%zbKI+=V|XoqBKM4NoifgojIC+dSgN=K7g8$qK*e#iFPi(2bF^j*W|Xi%EFPrAV5k z42%DDMgF?f|KYaNDC%pH{=JL-Kjx`42{g4cg^MvN%IF0eDbX?TEo4-5WD7Ch%A=I% zF$&27DgHJE&ohH*iSDSLq3N^fnjT%7N` zA$B9-J*wgUQYwDe1Z0eD#F!*JOyjUMNU>9H(-61b%{HszMD9`EWaiRP;VQRN=b~Y> z&Q`^ye^(^S=kDwhSFUNKf-%;Xjt$Wm3~$+G3}m3KT__Y)ON$wciW_59#!phl$4o`X zPj&w5g3$iD(tllT03A6i9le;jn1p#rx!kr|^jOpmo%~lD^@{QsrK&M8^)MZ5Om%b{ zbv-rla`lhO*aaGt;_r*#_cM_dlpI0ga6tscaTZGtmE~^|iDbJSX;Vt89 zdy$6L=}gGQ0cq?hXIx`CIGU-PbXJS`=FYKClQ-isjh*y6HF>>nJ&!*k&ja7UYZoVu z{?ned>pu;z{$I^Yvv2gk>})CnKq+WHEFCE2_VyrJ|k-k9@!*a zi#Wq;)E*I-m3r*YYpeQMy+@1!BI4vRaaq{Q7Qc=s-X00=Or)N>M$x-RbHDnmE z%$g*NE5;h&bTbA>e~RtFd1ZLgMXJZMVi_^1A4H2CKH%b{lhNa(TPZU0P0o?#q+Cfb zqfgeC;{3egn-X^GV7)fiP@*lHmxjZ*p-s?qYi2zxqta2&Zgku|=TWjN)08SeK+ z#r`A0&}$}7v-gocH-n$!ad{6k>IzhpGsx&-e_YsO0~+3p6^DvT#wy_CFm`CUwH#T@ zZWKli`XaU9S+J~|gbhl?D&rI}4r#@;;90(YE4r0BPM9Mop_dU&F`%(m;>~9lJocTt zPC+}YK&EK6j@wCJ>A zG*2{SG%D2*)NRzpROy=Ta0M^l?&hT{0&WD^Hg_@xHDc{?))+ms&YHQcoJV%*2OTqP zSSC$629aVxI86)|x)Ti-{s+1Y^;%X9BPI=l7_lWd;|%X|D~ap0)y-Pw4XY;c zgMio#9AAdtIz$cJHj8N3U8=7ZC>z+nwa?nPjT(NK!wAb6bjIL&OlGr(7sc{q; z1zNo%H!BHWzjten-$Ob z29)Q_vTt3sYgw^q+OUY3RLipFoVSgD4^V!!Kn=&|WV5$enP>%>vMpKUPWolRa3b0Z zm?TZoX3cY2+D)wzweV2g8N2o$6i;3pAI*#wG>PIy)}m|>b?_lM;T_FPdlRc)qTYJq z?aizeB#0VDlAwGc_Tlj}x#>UXy;SkZoV4Vuv*YE~Yi>7lT{(!ERL=_B%bax2BIM+< zcUW0(Z707ob)7!wnH0S0J1CeW&l=`bw98%fv%8r%Xm3_6izwgPIN>_IHUyn94;ohv zut$oc#1iB1PPk_rGHVIAgp;C(5=ZbLdE#v|&l#r-SVgj+n3@aI#JS>KFoTT?2mB&o zQ09p(xF_5)_7~uZM_QMJjavtZB7ITri50lB-MIIiR=48@v?ISF91@Xm^Sg0xIn8V* z4j4z0qev2el}4r^@+|k@zHk!$5jH>>IgX-9RLiU9d!90*PFK*>?;c5r!bR-Bv+nV2 z*J;T(dO#)8Tm=VVoLGaq+D&V}e$_aBKqt~2<&>C{+fH-&sD9MAX#gj(24!upZ~!GT z2c^?2zkba)VZb2LALZHHxn0f^w9Ptio~A3ha5;X$b%R&SJY-%y&YgY*z|^DakaIcS zn_DS~9fuFE1#IYbbly50AJ2`Kw8?%-?*Y8(NzmPTf)1}owi^a8Bg;@e@$tKQ0nST? zMvPPX5hK%4@_79YF2@Q&WhK)`0jheX9dQ~N2dsVO`Qx9{!2nUchz=xIe1NwTei8GW zdCG(v=$v(U|LeGZ`Z<74&!eNRh-irJ%w^@gaoD_WTp;}w(5C0mN#w*sb1Jf(IBuLy z4v^IU+OFr?{>x=x|GjgLHU~nwe(g6schE&+pLxMJS^6MARxfQVQ$_2baos$8Tqa!| zP^JIlTeZ9PVPg;MC~00a0-+vfhn>sHS>vF2)i`JRE`VB3VQfc}d*5YsKW2lMyW_8YWL41}N&~cKDrLPwcmk)2A!0XOcwY98Sam&W=4yeBykgJNjCU9d~Yw0j%UY<+9U$qX8ipPbZhaD2WkobOidbqu7-(iv5t{mn_I3?lY zcko%i{&wy>yjz=+;8%!{T zq_lg+UcLvUJNyH-b<416sXS$YBLCJWJ78I|`9afwW!W?!Z$rS>@AuQ$&B}#qUmdMW zMzPb(VWMKow09n&0IomaQSZ0*LDRZrV?RP(sz8;W#&h+R_G#0UW%o2}UVJiTxV%7? zf9u2gHP5l@*kSWDeqNKn;meXTiYm6lGtqCJ9oL2T-Wtw%m;zjW4l=;ReMb5I!^~-S zZ5YTOj!h?sRf=7UNg$3LXORl{rH*V7rf_hkmP2+Jo8s619MZMpUR4kCu+gYtkm zLs`bG=Q7Ic3Jf=e9EMVW&PKu`VJ0&&>njhMg*4qe#u|c+L&W>UOloA(R~$47srsQ5 zRu0iX^1XFM27sQ^$fA$oks0a$dIC8E*9nlXBOn*f25AZP8;%H`8_$Vm%c(CZs0i|O zuSi4dQ$4AXXPNBqKJc-guWZTh4vJpfbo$aQwhwpkaw@)l~ z9H%c^5`8y8iXRN1yHcnyF16#61~e*86H7?PMKr<~P_!{v>CHSwvJ=}$*RT%4SknWj`z=q3`ri<2w=^?k{Gz#tsjerwHNh1d`$s?ca)VMKR*!Q$Xt`adxsYMmS zvr%|3T{z8vFTc=K7{k#TS%tWmlRVc4=d4KDN=#)EV=BZNhI5aIpYmmCE0$oy50m`@e+4u4{VbUn*^b`dz)9!Z#3nv3}E?salN$ghO4k z<@b|@4_r%+)j2l~(h_NM7l;Ymz8$d{7XSJ`)OIBb*vLYko#rpGjd`4lu!L1hzpnJH z;_{2bsG=Q$Cssryk}7^JtH{3UV~O~7YCqRe{+x0NP{uM9K&P+XQQ@Wus$>;@b@iTj zo2I&{<2}e@Q#Mb27vs#O>sIrrx>Y`|no}(49pl2!XVo^38Iwv?rYlk(DvwjgFJm>> zXTmFIl`^j%LrZn1J5}c_w^LgwZq%+JEhR;4i*t0=+K(DzPd%n%Q?sa?PyyAjN}AV> z8L}#x_l$i=4X2A$!>Kw`U8!>sGLIf>)2?ikGjAV5NKK`yQq!oaR@Sa^QK~^lRuh;F z93V~2q3cxhs5(<#scIBAuNvb_J*Rt1Bc?-DBdO$9X|HjS+^-$`lKM=Spx!fpnQBY7 zr0!6@uGU_1Ew^7j#*(^wa{A*-QDA}H#K%-1U74CfRkkuum5V}&&VJXJYbuqVQhT|J z^nT+Qc4`%!qPjp@%;Hh4vIuH?E&_(g&&j#ewX#pTil{VG~At5Jsd6#`fG1DlEYjJgIP(}E$g zcou>hyebw2)2<=t_-FzkJC&{6Y(^!Ed=bYyhlSO|0(Y^qmTB)0WPB)rE&C6fx+3`1 zdIi(&A=vnMf-LsmR%eUL)QG~n;S#zAHhz`Pvb(KA#POX3_v~M-h!(j^XTRduJIs5=?%1`mnGOA;h-lpp~<%q8p@EQ~6wD9j}6 zDEvv-P+0I$2__vneejg#1CTzXD#SMAG=wlDD?~R07@`_7{~?wwq$$KT#0KmQh5?I% zHNc!;kKjoZumTtWb^w!r4Zs5605Bw2ICy{*Yz#&RD}!0U&ft$=doVFrU)fbGv2QsEp0fGI$C-r-*mh?N7`uFSVP(N%kC1jjP0A z^%PGIJ+G#F<+oB-{lkbU`5bEAMczYqz>(fUd+`z$x?l8oW4$}c5&uGaiL376*D3uR zU*3E7&8?&<)0|0OUANW~yZM#cCQ-|>DL@WWldfgLltGR^@3T9~5zzuqv8$G4&=qgY z7CjO>p4la6iarMgmKd2A-;MJ@>ku@RccpiTnS;yAIaOkng<#6iao95CDZFfs z2hu~NO5`cRb>?p-PK)_@(t$uoDp9!rF-M>PmA^b}CK4;Y-P}fTq_iT?fJ*S76-*r3 z344$9i;$n$%j$Aus{sropa?+-=vSrn54D7yMixj4Fq#`JZj#1*&rS%IWQ`(*ro-mR zEHQhT&w$N(M-71fRDqL9Kx3)_d5MfTc6@WzRkOHJEub^Ufgsx}@~%8ph8^dMN#mkf z)~E-Nn2Jx{1N6e!DgQfTf(2yDI&Ic7`T-bD6)T66)isC#ETI~gOUtTe*0ydOGiw(aNEzhaW0bNHUx z;sVIWn?F`G+$uf0DYYJUyP>W9_TY_je8_T3)VdvTOU!^Xpt7%3EjUpdR9oo*YfOQ+0hz>{`&96{P3T{C%zBs;()jj@A7%893a# zWWTRFhWZ`iG|gJQw_UWeQXV6^D0A>oFl_`HL^(bRZ9oOjDqJI{#D{5S9&#q9M2)#; z7P2m<#D%%{%JQ&!C>&rxDcGcvs7*Ukjb)*Mz(y|)N-dm_>3+9qO@xrM>h=D0-u6dh z0|SGNdKxZ1>fU+}XN{bCryq)^SlD}93}f__nr#N%KH@9LvDY`J@6{%w^woZ~+IMVP zcWg(4pwyiSnb3EG1#KntKCK)I56CI}%ZlU_{$;yz3jZ>ooWj3MnyQQfsG2-V`^aBL zQ{|DErLM%s)uy3QlkZS-cE$@6Zk&^y{jwV1(LF9WUX`VvVklT=pzIJ}a30d}7!ETy z=zfie7<*&$=GJP=F?lw_-XuCdfg;c5Im-Z?|CjLt=esnhMku(LCZR!9nSo50FIU$; z{Yjcfw*GsY*lPZ7ZEUOgzqP5Y=6~5AqB41V@@h@4>2w#G%GCMK@Ggyww&5@>r#OqF z=b)K+}^>Fpj01BFj3FpuJcJmHk?jfmIRH!iWRu>?3ci}X{| zfC;*h=~~sceCOBX!u~qXh1&&y&zRzE3h0OyYF4$aw9HOe@uQ*QsD|Q3MJ}Dzm%IGC z1?Ss^_|xbGEf7I(Hi7ElOz*psL(rQ=pn7{GC>1X#6CapEplWw~Q9NW=HgvCQ8pHou zwD6EnafLd&ycO$Zz5I%5C^VQ}W+RblCn~LYa;SQ;iOl$_Aloa;S4T=vTsDoX>SzWHl&bnglE^;``l(}v0QoIVB92K4nDN@jYRPp*1qF5Ue6 z(%gL8{QUjge8SxP^xS;i{QSk7>WTNk%JJ*jZGeZ5AYXT0&gQhjg=N`&)BN*)zkIhF zhvDtuqo`S2cKf3?Jy*q3>ALKNoqQtYGIQhMHIMm6$TUp*&nPV1wF(3mia_@jZzLPBDg&|$~wf=evVp!hq!O#fcOBaf!eZcCBcKEf`I18 z`oIZ62npZE38foU1Mwlp9?k=T50Yk}G+4jS0RlYz8^Sl(AA-WPsSk4Nb5K|x3Y26} z5k#}ua4=?{G=#LtWo_dw8}1RHarbDg-41qHqR?7SRdt?4K0~He0hiO=rjbKhzJu+~ z+Qv$!L^@9vn8|6qS}JWAW5IsDP@H1PdJ)+y>o-TOtp z4f%3={=sxvn$tNMOH~B>q=4XV76N&W)n>+pp!C^la1zeZO8(nc#>rwj4w1v{M;%$z znb79164r4)=DFo(DCDOLC+wxu@?=MuXV4 zb=fnTRXPKVeQ(!aJV*-~JYS2GnmvF+Gr$!I%_8R(-LEazV6A&^_+^f?w~Q5eF~HGF zQ8*P_t#a96>*nK6)UfHRN5uoZLLwHMA`cxgVkdW8#!`nd)nK{CEw4zu>#YD`0T>F= zZE=IG4rjBpFPfP>_*maAyfTIjdlUv~l1lpOD7py7lB>E3F~x<^Op6oAR1)i@DCtB; zuv9ciEtvMIMP~w}I-m;y;nku#%QLs(w?xBC(6ON*;vig5lL>)hmuXKFC5VQ252_&S^Lrl&TqrP&DePrhSTKMv##i zO{_{q-3Snwf#j8nJYps;C~}MYI{%SZx~!0y=_Z(2A3#)a-6G|*9l?Kb?c%pjOvfwE zrSajTVtM0+AxaT5&A_(tlG)p2yK~Yj8}`BAG_>IY9hK_CLAP$@klhUBY-06!5qDd(8x9!g#A0t@9wh4mKL;Z__zC@F!85hdu4}yNWAfwWJmS5rKLj68;a3Pp4 z8?Tn)b%sP~-m9oCcJ>=~QLDY;6PAiHFo~Sex>T4h5tG>ZH*_>jr+M8`Oh_XX-?l-| z_X{wT*)HiX5Fz>*nh+A-%LFSb1M)dKq;1PNkv)!%_W>&0YxRVpDpP>W&zkDUkBVN7 zSiaFT-meYK^gXRiS9AGfrd~x+OhYr#)lunPeU;EL&*2!WDu&wX?x?wBgbA!`ep*#` z$@K#RYY}N=wk?gB+8(7G(`TBXbBuu56W*bPPp-EMt2!zDtuco2Da5N^RB&Pp&S-Tr zDU%+$m8}-l0Py{*5j}Vv z#{1<-i)kk(WBy;RA7MI-TTVD8aScQ&vviBd!`w(q9!+HIe({d4t~Rx#f3=G@y*p_o z@_n5C0ogs&a5PMm^;~(rO7pwZQF_i^s;=qWaz#?`b&2J>+3@k8qn_<}%|q(Oo3p;qejYATK=wKNGY}8K#qgx6GBv@{_~DB(pVV zT!moF_~kikTm)mtyCZL$Xo_cT!W{O{j%Ugo;c(*kfHld?&~eC*aKyT2!kl4rq-Xv8 zbQ6o}ZwCePQ9HE5QWn)q69v)H!82Jf>yM3W*yJuf1R*Fq&<}qLuEYo#X2Eq-7nBrN z7mg7grKzFg&xb-hY~ZP27lO^uY^ZJK?r+F{FK=_t@SNFBf-bU{SHu86|(;kQ7bOE;_g4&*E_waT73AoTBcF*XVo?c2ngHvXG`dR*FD?w3^?yQj73m5#kkLjE%aA9a`xHZFm}rQU z$`mvvS$G_&GvJzZbS1BmE-*`yZ=WCp9J=3B|GU67GDKJfuyy zC@O#^*&zg!?{)uz7_l@8(hS;I9m++m@D;VTb)k##ZD+O0l@Tp0&>TybILT zGi4-vRCU`+S~6Q6W3(ZQUs<){|BURvJ#UTTUUc96IZ|hqwLooHNtct?v>A*j{1Im| z$SgaV*(g(N6p0sNhP5{e)w*40DfkA*eLmmx8mKy95b^l6v<}aa-lF87Oa$$-q?f$Y z=%W5yN7qQ#w|2RtBBYh~cBa*~Lp^j6A?BjC$Liq-7re;OV+ZHD2oz6Er0f**9pRL= z5i3>zLh(YEL83a{oW8W1EQ{uoyCJ%iZ!T5JqQejjDS~QlMFkuzE2W45nbMQCi{scW zB@g+}Q?7SC1=e(tMnS@^k_*q}1VpX9q8wQ2HSKcQ6tC!6k)7zu1(Uq2LKD)5C)4Z(^9!O=XN zKPu)9ZolMEbnB1$6&ffbn=>T2C9vYlnvVwgTbXSAtgUd4_FLa4G%lqOY8a2rvYYE>eM6&8D?A!ZtW{JStSIW9;FiTikIIdwO0FNgWCWTROT#Wp1 z_3QVAeP3-WTnPrMQ&1>m(0aXNn{^y<<=U}iw!rlD6wtGz=m^QLfc#OtF8xpO@2DKjmXk?JedRQ)WKDm}=3xvq zBwQxOBx|A|JVEg(9bdc`(N7T%eF+W61Mpw2jl(V;nLAqj@V@ZHB>@RNw>2-RAK=!F zBUbNLt*#E1ky3wI(|_@!|5RsnE3&fzHO0c?_bluc7l%<-Xbk9sgJHq?n(b9l)k#gj zxAN>__|{kPur+h62@wYOnk4J9s1tbp%Tr@?{@2~3#3Fyl{O+^kPzbjfA0abmq>8qK z2toRc8IHU{kz34o@rb4cg1KwP_ez>sk7=d=`-pP$i$ThK@t|dYD@r(x)RpiIb@q0c zuj(;g^G7!m7kdooO;iTH3MLp7q!DeQJY0ILWRp=y5op)8w-g0I@Uc$O3pQTJ;NYJ| z+>nS2WJ*sdkpiw8r7+Svcwk&2k+U*r!ziILU=2#Q0!m$+a7fSGt$K`*;;CWT6-3C_ z`;Vavqg_AG3ncm73=%Oo2XUcSomtz-`N1WvPWdb-LqyZ&u3hK3GyaYCYiuE(3(K^y zSjZeoH?hQT6BA6=B)>M0ilIimw>@O?N0Z39?Hm}vM=6jb?Juy{CqFXa4q1z{0e$W( zssUteJItzliJDbFoh4QdZ{o=c2lzVn4;KaMUj-QYH@?2U{a4`PhB+Jy$~TFq3_z-Zm^Y(v_+QoE8^yXC7rIhj$VlG`zwc$M!5qF6hP)JhPo8+; zCn?|D2oLtT9Y|O0eCKj=ZGV}CPW^l@* z))h(}8OkKTwv%597qHqU7P3=J+FlCRzhiW-zoe)EsF$`9AGRPgHPfL8fQHOM;4zbk z=zoKnAMTfq00Yg!m(5J1~uZl4I~7@{#(R6=^om_ zFc1){?=m6(OE&X2V*fKZYwAboieu%1_@o^?Fu9)Re+6<`lvr1EQgOAnwQiswsQzJ7WVIi_DHqR_@kRW8_~iTha(7Bp$kI~!qJ!C7NEk2Uz)=R z>n8dfY=UloDR%ff6#(~gFQz~|t_Kr5mI^3;^iFQ>GD!_iH7Z=6kR@d_9@ScH)NN);s0)gW78TPb6mC7GWaEbNi*%~6 z#>;|;qld}sBKJa4KWUhZrUYByx|W;}5h$ndEYuOwWU3$)+dNz%*s!6I%obb(!A_JH zPLp!PQy(g=<-sFUeJu#yV_CoyjEK0!#=?^>cV$-574}s<#>P@NjdWT=5Fx*64i3Rp zN~z(*G8C-|C6J5njV+Rf5FH~xcvo#KfOJQ8uZXONjH&n-*Smhdc@foiI2Cw9%k~ps z+!L1?NT*quwP^9f0iC?5;RO_}&%DF`5*H235n3;FgsJ=UUF<&QV}yE5a80G6W(-Hu z2gkK2>L??zts`dAOPHvF0tr_p4{P~9?i&KR^>k%bl9M{MGZVIca;+N%KtiWulxEJh zJuXN8=I<1q@T0A^^69wtuUwt#v8Y6)2BTSa*TkEN)VjX=anZ)2g|SEk7K3S&XRI)E zhc-pAoJoT&lM(8tv9W`cL`5q;4MHA5_1XR_)oQUC+9CFwVHfC41Ya9lZcBFwe43<+ zpnb!veDZvL*1@iwJL)38#vk4DkJz;G@{DRMJb$J?ad#;}mF7c(ZdZ}sgaiklk?mTH zx6fNF_ChHiC~#D1k{MQyXCTXN(kHdEmPVSOlEoMAvg@buBy-<6uY!*q6e4+Y_HLoe zyvM-YB^e!!1Q*!XNm5hzOoN;9F=l2zy}}fwoqRIpSIi?x4ljm>iFq%S3i{&0j$kX; zOxznDCQ7kt4LUgo&Q^Jp8?TuFYI3gNur=cjJ?8Wo9G-P^Wls6bOv8HjzGS!1%N9=N zm$oeC4_sNyA(mOpBDh&BwOq!x`VRegHq{0AgIt$z4+wuq-l&k=hwY1wJU5n>&%hS$ zYO1ykx&#_{rf`=m&f`d5a}b$9=cj=EQuA!zlU_?pg_X8nPvqlRH!aWH4S#B6QFOm+ z$aa=8?G6KsFcN@Xncv+BmLyHZl%ooFp{2{&EBbs{#ghrtIf5z`R1l=CDN=_B7K3E9 zjioq@WbA5lDLMpVW}i@n34Pb|CZgYKk4U(MppBs^QL!qK29hv#8lL@rcTYZ6Hp*Sq1`{1!dSdm1| zBh8{;MZ5?wV(KMBzMwP>Gx!o&COig|dlTE*Q9#q@fzd@h+)p&QFYOJj?myxErZbUs zG0sEu{C4SpOX9IDjVrL-@69t5f9_rjWfO!_vm*z#?dbV{`@28bWF~Gy2zu1TcO8X; zcxODC4qfSZW+?xt`)i(<2Ob(9XBXLrat2Qzp^9RHTn4lL{qv~(8~TcK;ICn$jx0#+ zp$XTOgpoZvv`sFby3G7AJtWA;_UyJiJ=AdXGY{@(KmM9Mm%ITht;Nj-)IAiigT1uqCdO@X> z@;_)!rMZ*+>Lvca#f!9k7nk8Z2OTLB6lb0F>LWf8RRf1y;Aq7u%0FAp?&x;kPpvz1cFx~Y&SXtYfqVrHvVUg#PH8`Z3D)jqs%$tXVO~?Z zdyLyMTSNw{K6k8QE-q-AUHz053~>1~u{Q5mNJ-}}B}LeJ%8`k?fnBx@Lh(N-nWHWq z=Y-03Vj>Y}LHG%1Tk{GlBIb@@vhw7%}pw^bpt~;(Ufo4d81w4RBRhLzA!y+pn?D!MT`vI<#I+M99R>}0 zH{otn6J{OhSw(~aQ*niXDosyu3S2LaqAQ>?yMwWMou(>P*Qb7dK(;uOx5BuPWWqF| z6>mDn%mT}X5UeSa5fSp1drHrWa*$yWxT7x9paf?L8Uy~r{f9HqQZjpmv^X?aSuSu1`WTTHH>!B`YiGjmjU)rt+m_b0n8d7>2& zBefF`DAf(JI1Dp4bE~d~;z?WwAelPpG8?cHeA=46T6*y9iigr7kznEVb+r3xtvFj}o(O3tR+t3(sEApD_^4Z$I$w9Y_KAil~dGN&(qh%qA0W_$*K82iW>?Dt~UjQ|CC* zZQqDC-@pE>4V5Sg)>?%Tmd+HTC(Ie(+_M}YppJs+%ivys)E*Gm!%!suwDX63s_X+l z{uld)=E_rwPiX!~3dMUsuZ*8tj6O)nABl|x>FH2@)bCQud)?)g;S3Jbi65z}D?X1MsztPhoKuG41*ILh{scsDrY>6W4s~+!- zpc&(G_-1|a+!cXX#gd;;QdaZIub6@Sn4pxl9WZAZHtUpt39%MRC4b!BL5! z!$zz?;L@Mdr+-6K#@dR`e+W^n{y{`7`d8{;HjYT{ageAddKBRg7y*4BKL+ zT1*WzJfSuH{S2EEVKv;n%``lz9E@en+@XjxaOx8dR{54z<(4Db$)GG9z2pqtK3gjj z8zVahD{BiQGmDW4lDw3H^z1Z*lGqg?H)pCM{`CRMNAU_;2T%nM?V!V>czZOZPx~Wn zg`Pq!nDhz^Sz8PEix5UZAJ%@lYC!!PLQI_r^Zi3Rt({Z#dHe+Z<~BXnYxaR_04$Pb!KgzMOX>$>+pQr`n6 zx-s0R)JFZDB>1oN?|9wxpb{1W!VBYnojLd$u>ToSwG8dB#PRY#8=b#47`x{L=jKEb z(o~9Xm%G>2I>|FhO0a|z?X6;7W^az(^?{C3JL$psbBSmq3!k?_XW?rE9`dPV{AS@y z{aBSr;yI|qFh$@Mp((CDzF2r1@-hX$tOA5`0lIwXkzVlrsS+>+-!#X zbUlDdxO3P&Pw=sZ?EN5&Zpp(7q`S0xy@j>CzW;#gI6+n#S6j8#3c=-Pe%(KcanXA- z@ZiRD2APD5C4+Cl>n>#6i}LoWYgyx{y}VlW=fa&%#mij0dLIA*VaiA42a&od(OP|_ zK(BFq25V#axq-}TNh@q*TQROE67mjdGIR{-#BwT0`OlJ=%bz0@kbrp3CKS?g>85Up z)-!al`V-vIgcJ_RUU~{aM#)jhVk!z00|W|V(lXK#duCy+5}&lpen(oe2z$*6KJQ!l z!~3GcHqj#)A;6npfxbMUQ&9xf&!gLXgk>$wyMn}&2oq38(qsV`W92`eP8sp!jP6&? z4ZesOFaIu^660a!j&km!%yL$D+b+eo zfnm-HGusn3Pz%S}6Ai$+d~gyZp@?CaG}Z@kZEa$ZDvCxk8XV!KTkrvGXd)sOiXYK~_ovpY$_*K zaYF3KYzs!)s2%oS!1-`H7lLu?-@KCoWjsNNcIX;8MCptlYf2V*b?)YD_-Yt*kmqco zG<^HLX2bg&-#DDSnHVZXz7VJ3;$Wwx1MYeY1|rB*P8G#2;k5e%AT<%WX|@O;+u*(8 zgYt0d0bA!EHfguI%>$iqWB;G#&O9Ef_6y*H8CgRiWGh>?F=XHO>{}_y&R`H>gpi_; zt?Wy7vV{^Or7X$5M@2+ZNsB0Z((ew{JLB!Gsqx1zUCYPEJ>PrIdA4)TbDtrJeN2#( z<{gxS&qX?k8(m~wLgZibHwYM`5Lb79&S9eT5qQ#sq#xE{>W!r#v4~0_G-?^<%$-?! zKCX1Y=4|r?0qTRR;Sbcyw5|>t?=8=AT=IXBk$Ny4HqlIOQukKLxJgvGSHZAtnaL-E zjjOX-r?uo3H*v0Dd zo2TG2=a$1=1^L5Ixk?FAMaWX*pL+Vpcn^G5{AFeE^ruUoWkshd{g|bX(E2N8ES`+0 zOXEk#U6(Ji)Jw~Ldr(3`W=K2{g?MebPcCM~n@WV`I;T0;bj>T_0Y5s1hPQMKVw3!^ zH*SZmM9oU-Ul-;*Tavr{Oy}IC5Lcn|IKCWB@0{ zv61GR($~gb+v#ySuJ7D1SO;&^W~0^NI5+Z~m209#g}` z7gvLFm+HpACvNavRJxwZnmsaS@+!=HPA;cU5&_obYIk{(fwKRO+yDAHpOzzQUBc#KjGpGW%GRD^N&iiIopaooXz|k zT>^df^QkbB8i(U~+ooCewhsh$21xNV^0~!mu^l=Ww{Y(!W%lHDrOX~gOGD3gam3tw z&g#X)uQT)sbx1wpBeS+ql9kK-Z3_qd@05Qe4{)@yRCv~|_85N&`|oxT$@weLGGTF8F!T& z)&jmRIy+bdEZ+L`T2h0J_5oeZS{1@8xu#yWA|@+##{*Q5PI+W&Wx|yrz-#t_~_bBs#c-e_-p%&%u zR{SEi+eUPnp46|}?Q)*qbwcP!TyX)tQsiC%O>fPmQr3_=8TAGqcwm)pNh#z=wd&IA zHRLp`?50VZ$<>`H14HUsyk|m41{!@L%VM8Tn)zkUeDT_?9G|S}{!qn@al%{^K3MkH z$r3r{ew5Xdy=jG$u1!vpyRf7%HABl`;T;RZND|8vh1J2cEmZQtBGTfgNwl9SXY#xD zC1s|ayrniYO?j>I&FJ<8KRagy|1kNClIL?^DQUIn1P%9adhJm1Jkb;~0nQU2UET|bb-KgB=KqfE|*~~iFe(Qh_lHPQ;AP& zUmpyAD`l0bcfU6Y%r#}ySY{i%QJF5;i$_&ACx%)`f9rO<;Qk%cH`o zE?6w2MW2nPEuavE zxzlEZu-T5lR|`JgavoV4*LBb~+I3;q_Uj!dS_qQFPnRGGRs<0>!DK}mX~apWFtZ^f zyZ`6@9?Rt~O6;F$*MOM4=txH*>s!truCzUt%hRp~yo5Iw+(DQ+-8-Frw6~|OmVRwa z_T_=0PM^9XtZc+C=MB3?!iXOECv_Mw&k*+?uipr!5>RCw>f-|1Yr* z7sqaN?BmMc#Xf?TXJzsTuvOnH*;Kei$pq&u9x2cMtD=z%UC~&o#HXE=cGqA<5U8W5 zTCbxp3xiGY?JGUAQ6J$T4M{jcFa&;he|5W)BW-WKY$1EZw)k#!6t4o~-tKb7VuVzx zuECcyLxV>0J~Og;)*eLRngQ){(5g)s>>A-A`2}+9D8<&kd#CChM4Rq&^YSmMbRPP&%`CU2APkB04Y)i6?{zHPYEgQVE~y_a+O zY{*kFGN+iFy>uwL9ZpIbYJA1jFGJ3qayE7+cF!YZ%4VkCVMIl*QJ4e+21|3EQSKy- zbi2j7DH0#Lfd76K8B1#Kqw9=?c*WqV{wl{zAHG3-1j-=@A51SKO4l{oehoPBHR7d2 z&)tu#$?O%M2j3mc&*y)cQk;_*m6BuK{iwa2sV}HzfSNPcDpK~DgwKbjw}Au&f={J{ zj4rcuz8X%gwON9GFfPp)UWqMUSZQ!Cd8b}cyPf740a<{|GUE~tK|v2yMxUtH2?0~y z;t&r3sX`}I1w^G^SA_dYo1*E}Ov41|jN7CJlq;5?ao{SdLRB`ooAUHXcM|GB@AYK5 z@c~(%rvqcUqcgU}iRSZFd&sc-9ju%KB=89y2 zPWO_AW*}dScqF?=?n$}RSTEUM5QBO$%O28TPR`=Ji*Q*g6#9zgR8NjeHe&lTmM8r> zDS{%p=Y_8YmZ_~26b0VqHZGl83o6@3P9~*M%kyA@vADS_?I?Rc>NossQk;Aa`r~!-yH@CAn47HQ*52_w$CIUn;mBe6ON=Ya#Cw zk+Q1^bOu^e)$w`P+UZJ7L2r?c!#lbk@ul{)FS9f6Z8LtX9ezFrB4YqkIBLvZMCU>> z-xlsV6f4%qxoqD_-Kw_VxSXU1T_-xgpYVo>);-%oFqa6{xl10X6D@uyc|-THk3zpk zTqVkD%oploRz^BC2F@3Y$$8VFQbjMnV(WNh;jQ4zs_lx&ur5sm;-6tQ{ zsC=Jd;&;H|$ZBuJ(@?(*?YV_{DiFzdEo-H1J-O#Z!^b>hn#&(@i=5gVcG~Ku+YjDG73>uB?)Co*cY1lQ;5&uEfO4>0G+GD&uh~jRoNO6@bywWLI;wx(s#pHGJWU+D43CXh@mWQZwBMg$g z6v`Ol>^ot$BFSJ7SyIUd9dPv-ki#>==j^p$Daq@U9GWM22o;xB$(LJJJ8pzpqI@lM zMAT{RLP>>Sugu%QpYG`9ycWAG@Wk%INS8>C(Du&0$2Jt=VgrFG)dLMrf||=t5wysy zo(ol&8c#B>-_A#_7zR}zVZ7_4c4_q4{SLYA@m7r&;2DAE$1NL7B}T(0a~_9>`_$&}EZwJwN|hcw95}v#L%{uof$@>v)*yK&F9< z-SKNK=SYU?0$j!<80%<0&vLodEwo*`@OAU(V!wOm7OjDuw{T$J`D0+Wq5pqb16QrD zfgfq>X~T*7no1iqE@VkQ4 zZo`d2RHIIy3iY-=x2YpX#KVjCb|1{SyT5X0icUXc{h61dL_`zH?7`_$^1KN&y5^Fh z`Unpvi{he&zp<%`7i#002x7T#0tCCWUt&O`W~hq^-UG;C;gV2L*{L%yfdo`6(mK zJ?|)UAUVRBset_o4IoG~VSf1f=uQS`2?VMJCZg-#eEXz=kIuY35J))G#m&}vU1csn z{Vw|2RKts&ifsr-&w??9%l@5kq=&Vur;D3|(60=IEw#n-Ejd3R8hz>FDfPQ}L18Iz zC=%fKf#LJ-hgW#)vWw*VUjV`ZHwDofBH&VnPO4%?p1!dCYA#yFCW0m&O4w6Y4}^0I zT5ISkXpJo*hT1y+v$N|bu(nnpqT|G1$&C#`KX(|(zhSZTL|P-^8}>qb$WJQ0|9N_} zi-vxsH5CTdO7_5|jEBT^xq`w_PvoyW6kTQB4WwXAV%tFkkp`)4T5%EcM~>KAXgQrP zi^({f(RZ!t#z4;V&eb~{f>xB>%a9`9_f17yTq(X&O?BL76W(y`y270ZqH8JuB`h(d zCNnWy2HI!xF^1iGp@psdUAkI=;XV2hLRx~YZ8~}aCt=CqP~)^1lX#p9?VRAZf|F0Wo3t0u91@{&jP)uXA5L> zsHt9tm5_aso?@#)7$+w-^9E3vm&VE8c46ZQSTTEN@I2*BwNvct$e{{*0V>lj<{c8w z?RG^=QnPz>wHe~8xziPVM~4HXogF6PpdX;Q>gkE;1W|@$S=U#-W~Pig9JA50w{drx zy*5@C$kv;@O2`yV5vU^1Pk_;%pf)vW?%x^9)y)p0%YXyUfWH3KWe^au5^Xr(W(g1w z1JESU|ENLlmC#@9tdZ7$Z$Usre|Rf8tXnC3zm<)*ttG%h6ECCb076982@zc)vA5GueWJ^vg5z;_YT-Wqw^=v!?Oz1}=2MqDk z29B15?#@U+Xi3eIc4bz;J*$CB=!YZ_DDP}Cj*=W)+-$5}1Vz_d_pgNhyeH%OAr$RM z)DF~#EMIh&tX z8cQ&+e)t=~nEv#i55D0~X!#mSxsKL=cliUC>JRy7f1&_(fBeORf0Ms4guuQ#*?*F3 zIN9bYj?@nlw3EI4JCb#SY&7AYjM@g;|1>PL_~EJqz2M^@5Go!Wy!qZ)q7`8Q6tnz= zA~wWZ+Q6aqcEC6av@{$jl+2?RZ2?ojfeV}B7!q#^A6zL)*V3ft00L*g1+gh4z3`;~ z%FuDt#VZHXFbRM{=3lzVKZz%WEBc^f+$>U6qF$&85FGs%#Sw43DR9<>v^RCO8qmQ% z11<$D?_~19lLB2~kDDs?1*x0ZDS|+;Kr_+Cf*AD055iK%%NFhm7lT>CpmuJyxB_9H zV~yEn8*XnQ5bn-`eHIrcP;~oN0^#g8*oQ}90=af>9nktwQn>jL_K7c;Kw5oU2ef|v z3vNKzuLHpZs{OQ;K)@0bHz4fyNMHidFW?3A?X42HQDDEE0h8juBHk20W&<}0>^;+% z6ymFRQsCb0VQ(JBgt+kqFNkkjiE&c}_J&+ciWh5mQ{Ze6?7dx>6l@Rzd}c`-JH2pI z1@LEs1@Wz7 z9yfzv7oKBMuu|bofwMudOZzY>lDY7$(F$Zag5c@?pO+%FlyqDp;$k1s*&|Ftbbm$R7q^WPgC512CTZ{tD!K0_Qg| zn14b**!d<*qfFsR@Ervs`}8a8Y{bXVVIk~<5+>4RUi_5u9cd%I^edX5`-&u%iFo+_ zg60onh1lsOOkJelAssW>^m|9$oY&y~p#^lPPF>(PTENJo0J46X_<#W6J`hL*_)uj8 Lf%1Vnf1Nqm16&EY^Zx{dL1POu*VyNe+N3Wy^3jz*yWv`6;k2$%* zgMdOjf`WiRp#Jk9@IM`n_CJT~yBO%0SsNO;>M+qU(lP!Y!r}a9aMm`4Mh^c?P`v-( zBHsTVl;PiDVgD=FDgFcO@Ba!`$H7t0(dggN{}t5#B(VPqFbv4Q(2;r@p7j5TdCNaW z|KCJ6v9!_Gv!rFAb8!5(8T?O(v|`64`uS0VN#r4PdOE3Tf5#|?Iqnxe(k+!KX$G0u zn?pMo80XRMRY}8J+3iaH5$NR;SSCgjzJb(yf#DK3V=2Nhxkui{`nu0%mb^ZaieMVx zf8$1pl_@+fHs~+|6{_>Y-1~z35r*e6GbbI*M3{E0mv7rPX`lLtlBOd?dr-OYHFP9l zeqATv$7q=?_w}PwIdnfV)L&|T_9u!AoTBh72&w(j4};(7Kt&P6Gz6Mu)LM$`h-!7! zJ=PFR{1!ihu&ZFi$Z?VhUa_n{Tqr@C{ z_*P0x{TXo}dYhxAB1%kGyWlpf4u9O_;G6p|ORk{Se{2{nyA)p8c}jXyJp^8j-1gp& z>s=Gxrg=)@J6#*-2lVe2EFgI`m_9W+Se_o5hky~gzY;{Im1zEkRfo_F(Lee@vj;Pi z-ToQjEh+(M;`0Xme>L(S^N;kun#zB4V`lI_)BC^7!~Y1+_}_*9H+j{~ft zUW5HF_4+>+EzbWl2mdW~{+pGI{GXVr>785&I)Q+s=7E6l|KCvR7;w-T8~%G~!}|YD zSFca#47A19;ppj9zT=Hn6YBBI&2}NV)xpg}nQQ_6w3o=gs1_`nRgC)8C81h!ax|30 zgiR^>`m$1aL)xIdTmGc_0zYetJ^|C3ckf=Gq}x8))g{jt3l3TLJlDk;%-noFt}4^) zQS0A5BuG-&?F8JL&a5ZQvl>YJ}ge7Pa(2h zWuHPuU!Fx&_evs--bwiHS60^(noMR}?G{^ZY2KBscFUyu(O!0wy|T38rFLXrkAp`z z#<{oQtkBo$U+0;rlEVaF|J=&fj>G>NOv6#3!}BStKh(u_t?n%b=ADjOdKzst1fPKtT!u;;PSls3GH*8(ke#4ZT=CeuRBm;5XJj8LxMCEEoa68 z*NvRwvEdy;z#;MKK`If9ZMtG}wb6fjY!J0e*A^;EHfodfRs`n!~v+i-5mD zHV8?P{w|e=cUJn)FUlLqrc{#!dW`I;9TFrsT$Z2}UOaF&8<7uTKqgXq2DcRxscFH7L8pFvywC$()FTN=KlB|+*YaZM-+Z>ki0Wk{K!8Gxu$e&a!CE`V)_wOk6Mjci4-*W zD~OL)pm7t)2cRw>~Q3L#62I+i%>gH^yB!Gn!H`{dLmGW-dVct`fXc8yrtyH1LDwCDJRJy ze>t_iaLDZXaq}m!5j!L$F;&7PLUOu*z1nmwTait{&Svgqbpko4=!T&5sY*ijgLx2s z`Bcb^OYo9IQ)%Sh~)mw-Mlx@zEwjE#}B#Tb+uHA;^D{>;J z`1@i#K7hp23li{S9=1;8ZRG9PP0_g(Xuc&wteW$WZKVCC5OV-#XQ1!oJYW|AnC=RHEn zDz;d^_L?Hd6r#2ol56F(Z_OyZQ$YJ4cq{CaC;~h}48KsUvj@+5d&vk&-$sMWh>;yD z-EaI+C20Y}4Kec&2Jfce0=FHPhbdbzd=nk5Wopw;iybc{nfHoDS3@;W_q~{+B`~cx;0r$Ik1dk+n*=@7( zhGvO*{i$pvN%MsrJ^S|=uCV)HnQA}uxb5c|jLoXCt6>;?bNAkQslCltyC?+)3`Ft% znD_z}aX$CW;IzRQvF6(jGZslZMf$-+t-Y# zY&i2{s19o%2gqa&#_)}@gL?ZUZ2MkB^5FMpBqF=brfUEetEfGs`GG6JC^j`(E}%j( z<;CyDte~iuTZZnIIpr5-t(bE2lbiHgndqI&I33l32pQnucDcQvNbEBLJM`U9X2!Q@ zyzCyOd<3ne8t=2Tw}aA?3$t~TnMF$2Id0{hj!?D=LwdFH#F>(O1*72*>KE^%TAt!@ znP(EQO+!jWbK2vFrO=05V*EN*_iUYL*}@LmbRLEe=&QRZ-Y?lK|8;MJls7*?uZIxjTi51}WyKfvb%yZr_0m1?w7+o2tXAAg?V@(WCEE7l zgUHb;Xq=*jS(RUY ztn33~VV-E$%yHb+b?$Z*lwDg_#nJiOd-n)d`FV89n`%uMv`Pmx*zS5s`mMP8Jg{>Q z&OxK%5sW2ydp3RWvT}#MV+Rn!&ewfBZ7VnVK`jq^=uSim$rM-HfgCl`bk=?mfc-U$ znk~UUU!VnM@l_*MT&eDPTr(qFm19a;cXCoaq+xGed3qQ4%AL9%3SFB)&?~aKR5mv5 zvwkTXGqT>EMd3+a1swB<>fG^v^GyrN$`h@)4}S{@e49-(!PxSeqUeVp0VZ%onA$DP zBTV+pw&}4e!#)feF9ri>yep`D=Elum?Bh9AJneQzzdkG`x$BdEwN++!I-ad|IAUW_Db8CS&g`4aKRTDLOM)sNXQ%pdii`FOFi8P^l#nLNO z+)}0oYa^TN6W4h6gX&BP=XI7c+pYaFq8Rgwz#@|S;v+=(SSIuyC^)Co2K&&2nW45N zwk&Y~t?)1?oy*Q!_xe!6!q~M|My9(UB@oCn2LDTEIK^Z{7L7uTeyIOHWz+Q8C`oC1bJK5EaEM{zqMr@zAJDH zDq<=<2a3ZYBHiXlfo9!nzj#a22?8Ru2N+Xqon?cesmQ=!geFKhacEz!xP1P`BH|<$ z5$MxxmAVa{Gt{-n>I6pB50?eV8T=*IW-lYiu|UXFW$)o6FRK$}@l0PbRC!3t){^|i zZOXOm`=xEDy;S|iS)@&?ZfN`GHohq2>lQ_t-eVB^4_t&G6B@gwsDh0caheLI4=UNc z;asEZnRrE(cZJ7GH-R4Tus02H>=pQv8Wc7D#{5+WA)`^`nW4t})&a-SspQDBprx9L zQT9=Ri>zo_7HN?KJ1_m@2LeaK7J4TDwiw6zkBYYC%HstwnRoH)uCU__BA55~k0XR- z#6OdR^LE7-`bRmw}A&F1wF3sTlmDBbgTCB zUOo*#Z6xKR$?9d*vmR_tB~wiC-Q0*U!10zq&6{UxU>_iSQeO_Q+<|KO;cezL3+Yoc zU{-Xoa02Jd`|UJ1p1gNLIUqVbe;y$>IA4TIJ2Q)y=0yZ@EwC71KyrK%v2CDMpRjd9 zLuYMud+)qbM;gY6sju0gPZ^4Qg`}cxC31~8eUw3VuZQk#ZG-cDDZ|tOanDcmQIW<0 zk4RZDnyYInWwTz{OE)>Blv8%d=pU708fp+o5W5{-|79z`!+F;DOVI zv85Z&nxAlEZyC!s0gSw0-ecvr0MA*eI zm|3r?n&poL#41Jj_iUVwW1YHlnmGXywa&0`r4X<>5Qz3{irc5TOogNMO0=~zE>q5~ zxk$H^a);dFaF+{)Gl#P^-$$=sH>iC0PY-~PPyI3#>CP7oXaV5gV9Pb$&kV$&I%Qv55533!2F&ZY1R>AWi{5!Z`QbQv(c=0F|5e4 zwZ2OVUv3WtG-NE&HApx_i!Q$lRxY7~?nW*NRbHRAu0=Bm(0?v}3`U~d)=Jh;PmkCY ztH$w{s^Y1E)Y_PFtj#CtRx!Aqphm<59EkfoO*9adp4z8KOJSSesBze3kdTNMRTrZtxWbF~gh zzAha%3+{6%QODRT+%FA=k>bn?xMkh=;upAHt=toa)hOf2hm`J3Bbz#l3t3V!lbb&L zv0<7?wA?Tsknk)%b@|K1;!blaCo*>j9f$b14*nwbyI#K=jNT(j5Zfy$4=iSq%Y6(y zWaN^_o|eDu0h353m~joEb5UNQS6>f%T166FgYRGM@Vha6amfnefWD6N52?(>-xY4x zP7!LY4G@(|)I%uFyEeuC^Tzb40;Z6Cg8Oe;$x@lnTk4ITJYP>pga`;?!tB?ZaceS8 z5?$%fgK6o;{)5hMRN^yW>3nn4TXRUf)=$rvFk zC#ej82H+q4$i}gk8HFi3yQyGbc9s_cK8xF<305Ju;tLmOl$$NbK&3pizEpBxHr1k9=ARv zrtiO<2m?$e3H-7pzcgLtu(|>Y$()k$VZ78XfNX`XL*cCVY?IRzPZ+|GtnGN}E;EYg z_JGL^((%=WPZsYCs%F2sP@kMsaJ5@m7+|~d5LBNF+RNi(2OdVW`+cPm zAH0I0=Xt}BA}&{SO?R4(Jy6r@Xo{2j5_6;@hNukfLw(GM1GYM4UQTy10F0>NkA~!6 zZYCT)jXSC$1FYeGyW$^Xatb~_WmyhDJi~n_(#*nndkpx?)t{)`Sg)vEC(@MD#!p1i={>)^gI}2r}S*Ikmn-YXCZCZ@2*D( z{WL&@X{zdX;p)`iO2#mlTsWM-dx==M3Cq>L=q}w}V!`GW=!5sE<`K+JsEXJg*rQaT zH8;RR(FpWny9R##J~*X9p`T%}Tg}>wR(3&>guhBAFBj#_HCYuR7>h$|t8cfC_nBSE zxd}cM^Y}&CqXljhz_G(VGOzbcZzznCNBnj6HrS4Yf4$z+YO@f!5>ej@UGQk|(D_kn zu-{=GlrKAvH|pVv>&$BYpsxL(AeIGf)tJd($E*~{DkrENukJVPZ6ZHr1qpoI=Jkv5D!f+xHb*)kU9oX(1?8;dI_KJ;50fN&(Q_mliv zs!8R>_a*^EclU0|It-3SMrLNPMT$ih%~dvq%9|fv20PRrk}Ow?p-ho+-TntSQ7{Dl1-8ufPvWI-7pa)?XtDahRRfJ-J4n zs_;HKc0LdXpu2|Tz=|VTJdo0?79C~lrbAe&Nye3iep-5u*io_H45-)$nI5kUaH^RU zsU0a94E%FQQe9x8ZJmQ@KMG$JE)%`D>_CHbq>!Sy5f{#yD-M{%KX>-J{e&OTd>WvTu;EZ^DVw z5T(~&&}S_`C#qNs0J}f> zLrDamQt{X@abZAYLb3?U`-}UebuT2z;U{%87Vwakb*OtcE-gGyK*_OjMEj&hV6iGW z$%1hK(MjcK{=}!a&xC_dvA8cJvcWFa6!h7a=#@qFkU~I#Wj#=>9d->1MXl?&PwTx8 zy|{7QiwlbdiE`!Q@NRd9V~p>#rD_r|OIiWzGaHf(jMmgZAfWu_iWmi`yM~RgQRz5J zOi-9b`p*nuFxPIyP7AbZE*UTHH(~pX`^pDnez)l(4MaM$wBRCt$`ZSFE(V8%5~vLB z;(33uitV1SSuzd1H-p@KR1_&)UM}XgfD#Q&tYF~F*ya_`eYfo&%JzsVG&~JbwG~HE zUKi$9Fu_4_v3cwR`sjT>Dud-XT8FVC0<>_wQ;8@gUY-~t3f@cL&hktdb&S*v7Pnrq zknA+Vbt*y>96%k)5EYC&#(eZ}mML`WIQ|@4XzUw=1XL>1WxBt3NMpXoQCOa-T{}HY zYQEXPai53N8XVCO$bPH0{^mUoZr0r9tZ9{A9E($IxBGDEzi@MaZ$2 zPA#W!5a^F*O62U)op+|yDBF*-Ez(B_jC@W=Y6wV$#DU8vHS!!i108T<^cLR73IKJ0 zWBaj(+*qSq7>SoWR{p0nFX3!1jlmtS;&tah0npqYR4nDShbWN?zCHg2N5k5rE)Lsk% zbMzCF36yB5?~~O8dI<+{9)?<>W!IZYl3fociE48PY6rWVLRDOscL5Ac)he;v;e9gkdG99fKmT-BU74E_>QfV{sNip?HLBH4*9buwEM$zg2o>m#INBLmq(m+-%B7+|>97rL(~`3x zeIBf({j?b*yhrQW8;=PK62` zXK%$|=(+piBt!dk(LM}Qj^@Cr?YqnnMGwSGX6z2c_}M0mC0hiOO&nrw6~Tj44Rwr43J?%v&*m?N7`N zSpqAx!l2S`O?G!(b-8Nw?b$oFN+@&%bGx&FN}RH^uW|lLKS6SWt%>!hh+}kNP~Y$h zxq91yD?!jWp|^x3cPO&V9`HAGo|4#TDeeeH$bq58KY3BVXA155#Ct^;RPS@o_!KYB zoc;5|_rfY~u%PsihG6hSXK|2LGE1dGaU$W%Z(I+z*o>|xdpTNr(NW47^l#Rk9Dy0u zwu6fOvQr^^nKQt0Rp1&iFZPMq`NK@iZGnb?f;IO za?m`pZM?J0BX9XCCpz|6rE!4NuylxEVX)GbxcxP> zOT8*_x4D8xI?VtlMrx-We-t^?Q~P9tnrS$HeY!DyQyS%uA1B;!?x#Wi5jXojW9 zLs5Litl2$AYu|F+K#gvGztv#iATN88v}CY$G}{xPybiYV8t)xv-?DFm*Q|jTLfOuZ zO(rmcw!rUD1rU10s8pUiHi&3$=#HQ3Zifw1;HuXPi-mVU1b;E2v5|uv1qIVRH&90X zg%^uw%*-_$Pr1XxZhY8}fuB)p-`}nF3t4X|92_s^nk!>;<72*IIA%^`9KEGt4E5^Z z!aOD<3st~!#M@iyveT}i1pM-BrPX0WEtanTipURxUf-3od(}*bOT6f)UVz^$N{d2> z7xxAmi3o>Y8&su3ZomYjf}d|7?gSVJlwmppJO)_C%R>>{Ix?UfiR(9!oo;{pe!zC+ zoXtdM34iC_Ep$=4)1SBj=elsewQWB^_pZS)e#q0CVuJioNjz?5Y!ymZ>RkE34Hl-6 z>bdBfI267?au5xnc*d;cKrwZd{PCQWUAYH03@;tnuSh_5lp41lGtsP-&#rypx_ZsJ zTFH3Kyv`?eJ_%^dyl#3C*P{6SSDCljsn$khOP86iE(GKpHr{6Tu$Yx2(i+}T3eWYt zH~zU_Hpl&$E|&K*_Qs!F>NS%K>Ykg{^&WxNHtsMT>J-RD_AXxj5_>Zb2(6QXQg6tr zJWAZ;68Mv{qNC_b+oge7m?2+#F}U#4F|A-4efNlY;ul!^A5;z1n>73mf|G zKy?*wBS+<1dw2QV7P?^R@2oOWRN1^^GIqau4-#em1n?$O(U} ztS>c>-zZ5A&o|9%Bt0XAKlYB(Uws=OR6NMb(_~t9-|&%+i|$extNi0ZV*yaZ9DYU%C7RM(G9)jJNOHo0^%*Jig)`|MzOR< zk-QB@lWU9ClLBZT%&~~FP0u^;$939q2xJNH`Bx8sjpkg1gVdTqf;yP4YK zpB&cNk<=>ZKJdvzFnMQeOr8tR;anfQHWXt+C1-&gzu2a(0F2V-?l3#xD_n9n`f-S8 zK)I9Nok-@RDDnBVCSz)Q#;pxRt0j;G4QFnYz7l0rse_1l2Y^ZV>ttnDAQ`#B0CHZD zWEa4nN0$+Yo)`W?QD)uCzp6miLx}N|x%a|tBL9h<@$_8+YxFhp1W}x~k<#_+M@+aA z+9!MIUjX7l$e#X|GFcyrU(OGb(%c2!#c~2^fWTQ~T2Py8OS1=itbz@^XTG0YUra)~ z^o+l7Hu9txr;2LUf|m5Mgpm8CKEK*6|U*3<|%!4j{(ho6P%(E0x3DzK_i6rTn_>$gBENncvVP<6t;b91{c-zg11*;7BKhi{M1osBd1?B-xGRg&xJqNX7vty1OX~P>&nnp zll27!_xuI1zx7Ajx3%;~UlKPK%gTvDSnf{D9yFQ=I&N{sFr^Y&>VwPrU^P1;AQ)qH zVpYVLT~qq4-l4=eJ8ua^vKnM27-is63u`0k(5~TZxvI z@7;wW^(ZLnT*xq9a@3AwxdFjZ9iuq%cLRLLtm_oFVliBd^bqcTSKXy^MG}Ma`JP)H zm9He2tLAJNf~@`|ZjsliR-_uA`al-fqh>^g{q?7^LIWWX)#8?xo^SI3h2?dy zyyh%BfztjJSygR<(1F2fL;E%_^X6=vLO$ept0d=EK`Vpo^8iX06Ah%LSe!n8g3IUx z#Voz^^te)+3CZ&Yg9Cq=W66was;zzxQnvKYY3N1T2%{H`Q)#RfPfBKINf&zy{33JU zVCp1S3$L8LbLJjvGkw*y-;+!Zc+kptpQuaGJ1PE?PT1@^{$b~UKW1}Ple}xWL%z)W8ba1 z!%n?BYw1MxQ)OQFB~*hTPxLxlnReC$(VK8>BYTp9ar@hi*(bpV^Hhf{0*hsz3nw7Ml47dS`21qaMU#(3G%C; zb|_|qTT!L1^`Vzs?C(Ez&&|9gl}!2_Lj&8VE(ir=nJ7MG3M?1=@Rq|5{zmu=@- zV|uD~y2mzn?9xRMoVV<0@P&FnlQp5a>W39+JYEb(y@2F%qI=)ytY2^$$E!4`j8QQk zTOj#JIk`AcKcc?|$cU8D@y;4+ZEO=^2(nEdE{XrH8iu)TD458r-y-sQWkR`yal0NV z3emtT`Y9zp@#rM+)di8b8{>bth%Ya)Nx}PIA16y=`v?AtIKWDl+1D3U~fGxq@id+CIWW8s+G1PnVc_{ zlv~4Dm25qLBU`3#KTR5>k+~nHRFpCCM?M$Dr}}T_V;ux3Cn6u*XVu~XwU!jV8hL8B z0EW+b;`JK>hsAczay~LDa#WerTB;^fMAQra&%UJ3^*Z%Cg<>p3$$b5;SiEg~F#YyY}2YP|JX2rFdNeAX&u zvQROSp8;8WFdDot=C_Pu;?w1x1a%T|NXSLNw$lXjGKoZ{%wI*6oC*I1k29v85)A^= z_%!QiK;YMP_Z0SBM7bUjg^27=ar=n_cQf3@pi_2(r^c%$+Q5wpy59sfG#9Ss!jtm^ zRj>C>F6Ukh!Ff}i9s4#|$;j(IRx)Q+mK}pM!0Cva+7E*fcktH07^z*7%%xK5%>B7mTNv zymKg94BC(Sn7F^Sf1ABid*zW15J9xMFlg9FLGO|O=ph~SLKQwFm1O91sD@CD^nieA zGxMWv)(cQF$C8(#y=dK>nBQX0Yb!T7_V))gD!)*KH*AULRKzRVuUG_$6qS%~YhTjK zD-=snw7qj}67xV%j&ie#!tTtm3oGcJc-KLbT~%8F`>Or)=MeyJbPsP_0aU&@d4sXa zYE5W-0<|C?-UN=06bZYU;X4%E37(a8Cl$dcl!DP@)v6{v2xX0~ZF=*b*_AybFTElQ zgFbZ(ek$n@Ku0BI^rOM|6YUS~O3oh0)Lqi3mt0XL2RNOP%;gpSv`Yl}OsP_TW3CA& zNBRIN(sUrW&3X%DMYjez?5iC@d)yN|l0%BzuTOjMimh~n9}jPX09}+{^JyupeL)kc zvj`K0$v7uzy%m13C2QVcGIXcwSb1`*V`oNgLFGeO&j;ctK;t0DhJQg!wIKqk$|G&R z(Zwv^H`>8PZH=w(E^IjkcN*=LI^~sRd$E*fUE5tDR~(u@(1G|_V63y#NHxTa!?aQD z73ilY|8jr1L}MhlWa_(3bcMQ}d+-8WQnduyt^Y(Xhoon=fXyoA(0U?Y#Uvtk(#s2Q zDFxB@I~@xiDEVFhZR*wCE*KwHJo1!4;}XJBf1$Smc+ckq{y>vE-=mYp2mi%WqMr=7 z%{T($>9oGTmCuqo2Z8=g7O+)DI5;9+0Fkb2snOoj&3-o=Fr~_!UU3C@nyMoX zshm8)S6Iqv*`K3jIi%5@BhYJI`{pZ(A9}pJ%i-B$4qaZQ1!?TKG%nNohI=;&hWtTw zk#m9w6;8b7<{-Xjxpk1e+Ski-w0I(h8rT-3k!0i)$pS-!JJeVe92E<*;iP_Wcg^^5 zuV@zcq^;=0movc9jc)#`%wdy^g*gHbGc*z)2cz`IDS+3^J~nfNo-G3pvk|6T>1#&+ zBUoAeEAFFHVPk{3+Cu4o>x|%Ksn0TzeJ#RYqfW_`6m;uZ|GR;d`N*rb$!1ADX36T@WOi+m){9y4N?rCR-Hr;@E|s1o z{!eVk-lm7jf{~S^OMd5J>&oMA{rkwt3FB=kVwN$>sO)DW$Nl;7@$w%7oy%WS7qZ_5 zv&IjLBdSkmf)3xv{NsIrlPRSjdH=LyYGc43Q`W?5GSb59q+u%$oMS72#9}{#JvqKW z>jC_Kh05@8sbFeD%_98r7-QvRK!&Vb4OGSeSMngYe84y65{Vn6!{oWuNvhhqr*2bU zef=`wYP#R$N5k-xEa2=mL^u>T29`+t`4I9tD-orSAQK&?6%t8hSonFKda;@T4%N9d z6kp>slG$LYi@tQpvbr%=V_irkn>F@}$yj8(1q~rL6dsCr5__pNpf08bg!iK06j4K9 zHYXXr&mh`dU#+m|D=N1(=P_y;vsGp z03b`zKIETuz}+*WEIrqk9P{bG%dshwPsW76Y|8@m0&%DoV=TwruQZ(wepb?I;H9^$ zW1f0aX70Q=IG7w66Md&vJ}Eb<@u@h43{>K;MgaUPKWxPVgikfum&0Oh`x(ohF zn&qOsDt-M_^}$@h9<> z@62xAmVrcb{7B>1MjmI1$Op$9wOKmF9-L8O6i{)R#X@#R+?=T6xoG0M@nj;;@D!Vp zrKTV%>setpdqs{vmVG(Nn8tiu*H&o06_cr@{XejR?_=dVN9pp5xO;bgGyxY;_#YHX z`cvTAOjj>gXx8{C3G!%bz(r}(jT2bmnQ_OG&p5(Bnk3`X#qV*Y#(P3_!*!GmM|LAR zM=rdb7?7V9sBJh6>nWYZ{T_Io{`}4Elp4Lr)C2QxRX#Mw2t#RbP{C4Ifroibkw9Bx z#nddb@KC{6X4AiKay@BUjpYejbJ4HT=)}H&W2RuV)``66L_uD|;`C`UK7+)(#{N!!DjK z&Do0%^9k&}{&j`y0=b(@ijAHDO|B#^$uiB|pRB4c> zYYc3}aT?CF`1@CCZ}!uF&Qg&TGtnt0`&5xpUjVve_Grw^Dnc?{XJTXFLAUnPlS%r0 z+UZz)(QD}f32A<0=)JdoC-jC=P2!?Ar!IbMtU))Gn9Lrq&#>bn$5nz4FmWH8yrJ(B zRuB_MS(pQUmSDVXl&1)BM&<7Hh0u^8*IfE>ymlmRYu(m=S`E7O)Il%!ao|yR1ajUT zbK%zgWuU(0oqX_=IgPt^&WkP~4K~h&%c(2r($*ZvOm5BYTNvNgm5Og?rEl3LCIH6L zF3jkA*k=Dhdda84BAH3W=$eoSa}-sdVyMglf;|#6WoLw9HL0k<&|O-OF@q{(bSIyK zZ@G*O{(i+vWGaeCwyYg=jG1^xy8SUZ_GfwUnd{TCw)Y^Pitr-a*`-d99=t!LCr~Qo z^4KZ1_xV15=T$gei@vLY>Ut(;N8D)Fg-N+^1_6*A{|P|UgsmAL>407aN@G+G+|&iI z0zO!24><$lnit4&k|mp2-{2Ee?fIVly%Sd;6bb}+?SAG9JEeAx=n&R-HfcF3m)uEq z9rx_FuUA=mNg&5j(59Ab{p8iM?kDzL}p?i9=g+*Q(I3)|R)BE&O zqGwcBcf+n+E^6kC$RMtCGF$~jj8bjXYI;d7QE9{L5c6D`4t*nIHK`7hi5-1Ep zUzaRVny$iEMWJUhMH>+rY*nrPs#~2-3~cq8t_c0NSD;tZSRt`Yb^V zhOF6U*aMn@&R-vHGszzZr*kmeN*naly&$9)k(nI`Qwf7}Uuk@1Z=U*BQ(b?VW=+h1 zHmGl`Fs`SOeu*tU%3@1R$zFZ?f>dE#(?qS;XTE}T-{)X3q(Yr*pV;J*NVlcFGpcKG zd^}~nN|GG!pM4aMs7Ci(LlxRw`5)^(MRfVq*5$FDq)xBT<})wd^2s895}y=T0jN9O zLqV^~!Pt^5%F1gPrtDA-AH^8-3S*0&`@aM$@rc>1m^`I4F?o zdq5>bOV2+Pangn{)*Lj4*kcTRZvDHpLJrB5214=(A!dpIq}xzz7mG=CZd3D~pw~)b zxRr%=GYm*wTu0$-^PQR-5oI#S@N#kRo=7gxd-mvS)5q+BD1q2usz;gpXMyvE<)nXi2d9{|P-Xr{F_ z`}qq>AU{CwW(Qu4xk+qBsNj;^3MYG2g^c=5Ya}YYr3)E3L#d?Wz14QCS8XMv& zY#By=cs_6GmR=B29$lW8qP&5(ppx_{NlzD8CMm@R`(%%tVFw(F@N?zTXxGGb#>5mw~mdLS24L2Ft#}YvAg=j(SpwXCuYKDE#(@!uUp%@4##f%GvI8 z@j}S#k0H&2l*k8^s&|gYH^>jlQa+&*C;Xo}O*2tphKmy(ChZLE=V+*8T4)PQuoO@%rmbknHqub;(~*7BEw=<+jMYQpXVHB&>5Uc2>)IRbYAmVj{FY{m zh7+=7SetC7@gk~$Bf=KVgDGJR$4k^i^=6brdi6ss#T7CuC;ZDzAhL{-+u~Dyv|b=L zbwY0S+FnSS=H^sO<$WZ_-uTkkF19uS${us(DFO8r80D7X9UkV^D$+It9*e|%Ty^v4 zEao#L6&aT4>bRHz3%Z_laWykzYHL30>@4LwH(^42s!;0W2^Mf=_(zd6P;7cXV%-v; zCC0%nv9GWT-K`r|UUo-uBD1Ks#?grHt?T+K<{D!s7%K~3WB?&Hm&fDy6_pY>J$ff1 z+cfZ}rI|qC;Dt*lH(%6r(^brkH|8PE+2p5ZBBWr4PNEOd^A$>eRfua>7dmG-oF72w zFu&3L#KnCUOlb?^fJeFCqm4Ut4}CgXg&)Om9IeQ`^YOz}nkQ$pm2KaLbTgA2p!HtWCyRJi)7ityba#WXNSGc7ohDa7~be-+U(&{=TN(D~1yJ$grO# z$H;POf=@n{O!Qw|dzX-XQW1)%7;y$F=_hgPWEi=_uFwS3piMK6YfX2$LOfh)xHfH@ zuLYDjx6ZC}tfT5wk71bk;@2$%8C?T5dy!`jtSTccqEw;uEYWOcv6b%nQSstmv#T~X zV?UDWG@blld1<3-ciaS@!K)(J>@sDj%PHdPG)qH@%lAkrqcKHD!ENj4m|y0pxWGT2 zfQ6cYH7h?{TD@E1J~Bops>Gc90=EVs&SPABKf*>upBs=jtZB;z-dkYQC~zzTihY2A+kwtQ*OPQ0e5r zjeK7=wX|so+P_MYRC114PK=kN*d+5e>z_qSIOnf@r5n>{I{tjIe2T+*s9vNtrh(|2 z%$=)iv6%`hC9D(Tw}9`C7_NZmyNwbut|pY?c6@^4LJmooX|f+g5NuJRw*B)iO`R;; zIdab7dKgw(d>0j<@<~2(r~}!?^DaTFZlU^tk+WW=wFecn_(me(6+DU?x`|5)?mMaM z3{o&Lq)Ap|K{s#d$Cw5@=nZK|U9D2A0-z8sfiw$jdGsMlNNO7#{Z^)fzQ~VrbOM)- z&I2brS+fWX*3jus&Ri{0P@NdOEHETaL7OKmrVO&8_1(iCnt znxL|K*|AgX{d!*#{=Unj+Ymat0+2(4xo3C)r^{%vkXV3zGa5k`mn%|5!6` z!kIK^3c53zMqON87&pE@o$;eDn#ry5`76+4$>)+c@oX2l`ho_+0`aqz-l%c8$wwW7Z=N9Y<}+Tz;Hd8 ztFL3trZ4tmNOhGH<%VR8Hsqj$$pM}89DkEi3`R}xqL2$@qdQP!caoquBxD&@o*6}} z+j>*#wICQD%OCCS3AA64W2D33_Jcc`aN$VdEK_+h<13NIwPfsed(4wqnoF=N92hcd z@6swrncV$+0LJhM8h!OU(R1KzzR-kn)GB_$9U(0bLq%p1a&rbzT+ec@bs7JY^75JQ z*n4nGD3$D;4?xEQ+n7oBlDg=BHa4Eky*ec6Y(NH}ZJZ^jpRo-~T04Ara#%6k(O}Yb zZT=)*sT6V zPZ)Sx4?YUs6{xm|qBkw0aF}n)n(rcY{7ZTIdU;@8H{sI`=1ULx;FU^A6?aF(*v%)C z-Ud#mdvpDzma>{X!`-eEbh3O@X-2vE?9k?C$sKeXRk2_@8lF;D`!00(tU^`*;2L@k zpdQ~1nLfdgIj)PWUDxqos=76p)614=lfOs=rVk0?j1yUx2T?<>uI-JbJod`e@GLBy1#&G)-+X5r7qSHL7Z&I0%K<8#qbmve5kQhTNBz1MyYEjdx@l_G_y@xzn7V`rJ`b(nv)UU9eieOEnh zbKDe4iJ<;Idc$m=s8dYH>7Epg7LY@ z>~fugiOs;EIN3V4bL{_N?X8093WGImBuLN@+}+*XgS)%4VWYu48+Q%v?(PJ4-8jMB z-QEA3GgCD+Q~%6d%(+aDDI+4t1j*mtD@!e&`8QjM?aoh^ssPxH32f{A$h&Ouk~2WISy0Tvi|X z6+BuwA=&?&{ItSy>_qjG>&WlCam=xQKN8D%QuTbW()OC=eoqhQ@vvPzsVO%ae+N6u zi67a?r4EpcZ0ICEk1E81DGg`vSe& z7)DOMmHtXtsIM2$N4g5V{;VTU3*%(bGA|(S@ti)mH)!w>L=tt`pO{lHLa&>~9?Wag z8`FJ+QKMBFP=nPIB_3#3v7G~QKqII(@+xnx;+pQ!c>dWcpa}hrhcZ;}=rRx9T4n9s zt8tuW6Fb;8{9b-j*G4CfU?Eh1&^b7Dz(=t!5;teS{IV9et1popbi;fR9I{PKZ$LGb z%SnfvwDCJXv}B3PVmuqRd+cua-Sxo7I?PhtT}k|j(-U&zfSUj=h-a%lLZEYU5Auo| zvM66+Oiw^^Zmk^dFlJBrJBj5-0@|bs$)ZFhe+_Sa(%Qj!$gPAeiG8D(hHVI*$Etef zCa=00G4WRpedNJc_v#UG7V@34V(d)7^NhOugtvE@dZ2D9X*Vj&9FROpxSWnEI+1Sa z(>OG-kM!tJE{&lL2Xz%be$~~)S@^wSD3cqf*hw&&k(p185`Y*Tgs-JUz{GU%TS_zS zt!uzR^?4kqH~G`-hHVa4y5Y91V+*7}^dHJy}XO)~CgHF{FF{k0~Pl-|{8cio~>AzjE8(|n@B3NvY zL~p_%1t*$0;hs*)Ypf}6($diRaBp1G&82NRGYgVOx+2i-%#Opkw5)&#n)7yB%uIA+;YIy0cyhiG3gVVj4pZ~J_ z`PD%*vmwiZ-pp)=L)LMRgtr~8xkGqBIOO})+MiNZsXEvMSdQ1Zh*lZgv3_1zqX^-d z@<4?#5e#On7c&nY@gK^7FUWZ2B(l>4TiV@AKfo9)og)F;nr7l_6>ajrQ&=rY=*s8= zDB%y{#v0(hk7dq;5IBlz*r&!g&_iCnt8}2bmzdi`^sFY7 zaBmvqXkXlEEj&IfkGjgPMn=_hhqKuK`V*vY_H)V_UbA<&DDg>M*RQ$=+m<9Au;M93 z&D-DczeMvc;yRR@m<{^3dFFAT_!Vhq2Prff=t8wgl6Vxjo?m&r;c@Ursv68xb~S7( zoFV@8T6$0vIJyaeBxFBF!~}N=)u|{Yh|tI5FY;AnF~QgBDun^JjP8!Xw}GMw3HbI}z8BLzP5bXuQ@&=IsCt*YK{8*N%5Umg|VXn86FVU{ydyUV3>^9z9o_VcJMlGG~#$3WVZw>e(L#QqJFUmO~_$pjuCM zk@WxKbtG1Q0V=GiB?(uS-&h7*AOT+e<5pJ+$&TY!ftYU>L1M3FNlE}{(%K6C;I9-% zVJXeuT*j+z#34BPA%>@P9)ehc=x4I1%h!gDC*AO(_i^9{n;HWN>3<=>At#}8KBrD( zI?q?Zec?3t*V=zD!ckPH2@T~nHNhYt?I^zepyjCCdI)gU(*9>2z5porgd`SjxmH*!0n2 zG4(AcB$E;+XY%X^AyU0JVI2nB4WYO{WnORh;&zLi@C&|v!kY=QZ9Fcc;S-fhuOnX1 zxwYwJFH3+70|B(ySY~SsdKn;rr%)V^@!M!E#`o2$S;y)e<;pFWjAkC=of)_AlI_UR z#MY<77B*M3)Srv0?6kZbyQzs>!@CAHb+x)Oc)V5ybk%L3Ne zsk%M>)aK($R)L5}?pzprLI*t|7_=gr)d8raGk3<|#bxDn6BW^ZJUbdhS&HQ#Mc|ETLNLv0zVAC|_!(onq4*cFO$| z?Hem^^z}7N#*5M_U{{`yEayhzR^a^(FB@dEsA_M;D$k>DDE3@Tm=^tPl=L%?xnOnd zo-ifD6kkth6Sl#HZ|2}o|BNf3Z_ddmmhZv^GrdW|(1yj67YP=0yS!HfBoBZq)v!MWk$W~yYEB2EHe!vSdAsBr3ZO9R`eZOvqawmvuI!Rh1;SZu-mY2)9`#0 zh~`oF*#Zn{`8@ydpiobSz}vAAkccODF3=fX{AN}klgN%S5F}Wu#B&NIqGB%Pd0IxU zqIld!_^fl_QtN!QB*3N}ykBxkS}w`jSwInnm;-aCeWgIWbr9ObQzxIM`^`XgMn(ZG zVX(Kh!5xZMaPO>{=nAZ`B42shyyLXAiHcIBnaX$s+@WZnsn-M+27&vu`^$V+8>8Ku28E5n=PJ|`RYI_gm}pb7hX^ZeCG6=+3iW49#eE?- zsSai&f-LnHE)T*`8Qj@Cu_$7U+{hdON?bs~$9=x>oKUgH-#-C2S&itytfbZ)Tsy{e zdIeu>jkFUH(Wi-Wvv9`Yf@`<^gxYP3$+$%(l>PZ%ug1@b)*6M2-WY=0MKl{t@JRLE zu(=X38^N?bSG}cmy602meSd&%Ig8c zp@G&WBEQ#9s2v)m)dKf7a*_6_CEejCn$XuYvc{SqA*NkWftATQX1~cHnS_@90HiWJx8eZ>5PCDGy!#SJH$^9HCDIMCjHAca&J8Qmi@?x?r1GZ}CQHc9h@{4%>7OngT>?F#O5jdMamcj{7bpgZ7AxCLA4Ne5t}@qFu1tRMtT5i=?%5UGihkR zTLBdp-33YElag+0Q}`L_uR}h=g}@oFEb9=i*&#BO0y0f$;nSv>+gS>f$TIU*MD6-51uEnf z;Zu3=a|4(1o>cn|j)KslY@P6G5n(}1O5)c7RsN9+5kmp9@K~g0R=EBR#nnly-zWQU zJ?`*US}IQ0`IYG0fdV-YtL!Vy%x38M3>$@Ef}-i%4r{(u*2ogex{;gPPuGepo!+=y zF*3dJqk6>xIBUvF6et%+YCc2zT5otb7QZU$j(IAIo4h?S|*{kw2?`yuInW-Ycqn2o*$!eI`8gR-Xqt%gCDy;dr# z($SYDBcWeKx#=b$UyOr1b^Ki_1I%aJrJmteWMaXO#=F(*rM(y5m{D%o$9e_NR9P-z zomUT!w*~ol8<)w7Ca3>1(gwS^pxo2MXwP^OWR|Fg7wm z;?=)zBipN&XXaNKXlT1EqgaHMWT-Pt7i$Tn?0NKLug;fIg?f$WG+;c}+Vy`GY$03O zF3Ey~gIa-#8N*0GRRN^YVb8b)cWKJdf1fK$2HA!!roP58y0P!cjwjwp_t>rVWb|2H zH|RlTxhLZ%%X1E9Y4D=_a^$lUnDyhvXyJEC$m6@!0Dj?6VDB_$X7W{01JF($XIKCl z8I^1cnr4tLtO;ei*@`R9W)yzdHkFUlG2ZC^5mfQ&J^)a+-Hc%keJYpu*(G)t zqjy?m+PXU}_9#_QG%a`TIIUDa9sQMV47(xknvCls#TEC%hJI%q%QJD}2_D~_khlT2 zmfzbQg3n$&v=Xw5sumZGQhX$GLe|QU;TAK*1&AKEkIaDlu;@`Ej@UP|3y);FdgHn(RY8TH9RJt&EyCwor!6OctAtd;I zAz9ZbnjE3qDdTD-B)ZMvL}(oXJ}Dy0eq(=A%({DUkRtloB3*>O7>TaIfal*@a^=jR z@`a(?(+Z9O=`s4vj6&AyBKi49%@N+FXpGl{Tn2Vr(9!Se@ zZh8F;P0(76fl!eF2D~H5YV_9KTQ3f5VPE-b=b1LEKd(QtyvLDW6WI~N>YS{)@?Dzi zSiM}^hHWkL#wf^!2U1G0Xk^t|79@G9r9y&iz93*e8No)!eJC6VSk{`$QhAwIu-^T= z>Gh*Z9TW`=05rm>7|=|ZM+b8;8_%~(gbR~2~kr3tTLL@!W({s#G% zZ5sT|PKcEWnI=fVH^4Vj5!y@nFEUx{SIu4BI7t*EeosVW$c*)Px%miiQjP=T77O~l z=Zq;Alj2Ce-#E~*XPGXzZB!vIh!DOaS7489NtTY-=m)@|JqUS4qy@`J|3YynQw0O_ zSSDo$Q?;J60vHlMlh6|$YOO;AJm;Yh$I(Im$*IiJvb*H z0-Z>r&hwK_?GbvyXGlmiZoJ{G%C1Kf@e z9nt<`o4}mu5#{iEv?1b#AE8%Z`SVnv1aEim{RJFXHG4Q*7S=D4uek%ccpE~_p6^{#{waA??VwH&{*?C8I_6}{%HxKtfF&Hc>q%nE9dZq4}JYG1{Ke`is z`hh~J6^n)rI`GjF%@DhG3|ow1AgN1iX@0iU!X!_eLf4ao}LQpFn9 zKDw@$&&yL#eF-{UO=*=7^%kXzVco0UBKx$4aeaHWcx{2{kmo0t=Woc;BLljah?nuae)Qh`*DfuSmSSd+Gec^_&QN0_L7xX>JsL%CJZ*C%?>rQuJoT{7HN8qD; z-(unKPU9I51B50AxoAEUnG&A)`@r-99<9_y0+TK}kOqCE+#aZHK4bh&mYXV%Kn#WE z6qzo*UZ$#zOltZ8tM|AZCvVi!s^;l+?7YlD#C&ugwg4t)(&~i74to!#2KhEpA+@Kb zSy%ZuM0=Jt=S)ngTLLQquWOwYwcygC>Er>|y5aayW7Gj*s6=4jlISn8bG8diL(iOgXj#n&^=T_WnRm8HrlC2S#Yk0j{HIumCU2yTV*Nv&_%Y z-$jb9L?{nOR@pI+@p6sV*=xqHE!MOf5T&M9(wVF8E-&Oli=%ywxghNu8yrA zt5Z{|qp?z#Ijg&rMtE%JrapEV?^O8mG^JipD0xfwyawh87EK<{i}KGT^VF3klx;l; z>%UZsaxWC4zTwe$ZvPkM%^1-qI`} z5Wn^%Wy@{7ILELtg_Jn=C%5a21aHZhpi8Jg+xktyV1GHN))y{Kw_TC!>Ni=m9gYUY zj=RcreZ&`wSq#BNvbwK%547A)Nc*DI!Gr-(zQ?b)jY@g5_}L{DC`z;ofVvR-9tXnRMG*~ zW<5T3N{cIF1D7r%AJD|cGh_x;&*rUD4STNF86<5xF8hYqt@)ACxUXequJShD2>AVO zM3FpzYZtJPGpJXf-MAYG^RgWNP+8PzjLtD_=_@M`H~?M2vyb9$^^27RiOQD5O5WsM z1z{k+REZeCNlTmnp+={s%^Xt>xmlXX8^v5i$g#I~FiRSBCz2{}VV1B*Uer?8=#pci zHuiS#Cur70nddLh&W`1Qjc zW#Gzp6unTpKC>pkM8ttSU|G=2d&ID@$GC~kS&XL3JUO~~BKq+s6cY&2R zqU!@kK2LyFgf&AJEVRf9rS6^bCFUE!m4iP5T4yHfFvruit{|`B?TuYi9*e%H#0MXy zbJ1K{ic#+#H!>F!b1{3QEhZqin7zEH!~;zp(>A0MCYOG$lsX3&Fdl%=`1Zs^_|W3m zK}AyfH!ibC7n<}u@H@5*SRv{CN?Y0Bd)H*!9E|nx%MS%!-8S%=qkJV-Sl?OA1i4&ja9ujWE;n%7U{ zZT(i%BP9$2cL83|g2f|dnqe^QTW=sUM&0O=_`o+2w{#$aXt70Gk!<8B~=kU73J<& zGCqG>V)k9AS1OsvVggWBD{@JS znar~Z^=8_U8^kimR2ueEtOo^}1ckXGWZEHx=_gbYC9|H#o4>%3+klQWE?imBs_L?- z(v#fl~PGF%lfu zsD`jpQ&!cY!}(>=eTgolQiFYbT9Yj?_9i&?IB}jWx7KH-66}QA{kfKwyM2}-A?)nh z1a!@OGWgVeJ5x8#O^BYXr2X5N8n_7;DJ|SzdixOU769!5-l7(x-C}&4{bgKjbf}B8 zwilJE+XwCBTZ9+zIeSiO+XbgkjC1nW?Hb2@0wSCJ{Xvf_o&FfHW9WP(n)(T5)Q(q|}p7c@oV8O(p(0_{$;&=`-bc8tW zrSz<0$+!OF%RgN!+WH<%pe3jjF|IId510X}1H zD+v-RT)t5ICabINg3d3Ls79&J>4MPE^QE%^eC;A2~uh(Os{fw&Rn z_-|a>9yTQT5sen|QTdwF{0~ejJ;jV6MX#xS_E)wkss<0L5!2sUIQto)?{p^WEdU)zkO z7af_wpyaF(bNH#9Z`?_$&h4Xpt%TK3S2kD$RB{Wb!Pq!G$~W z!(bED6tpcEAru(cZfv1a*HMxM_uJ3CL(rwxsEp3bhw?TORjd>Tx?j0L>-gX@-9?>X z7ee$RIyjS(U8^rJq(l-0Rb+EcHgs%}Ds({@33jEaMMBE8lx$o6oN+U>RAanRUhsSM z%HFrc@R5m~8_#3lS-ipH-l`3Bk#*W0fCxfL;oSr)Z^bU7*OO#wTY=`#ktnD&2?(5_ z5-;4QDb(5D!VUzK_>bOO-mg>SPaw5Jcg?F1+33b!PyQ{ zPwVaQZ^j)RmV!9Nb5sLP98vvKUvL#Tg5U}Q?ei0_;Q;^sdq0)Y`r6+`fdhUar37$B ztSR-mU7DIgjp`NAEyjU8cu!+QFJqPMub2qgtBF9Nk-mL$9R%y0E3UQ$+)u^5j^ix4 zAGL@+yJxhkV0YH{JrTfudLd3(5RV$=(&Ucb zACCe|MZ^i1RI8Q46RemRD56BPve={z52s)w2zd(Jn8#u!m)<_qrq^q9j1|O;jKV5z zlf<_tu5#B|t5_e|0~<+aU;jmV_4c=LQ5$OFo{>tYYbaI=F_*6W65qVkUg;H6Do{gH z{a`McicnFKG#&0m;xfe;u^Qh@d4EH}KB9~@ty7Z?sztboIv+pHSNL6ngPb30R6i4< z^4TrR0AdJYqTv$6Z&vMx+B*SEYqb$+^A1*DRlTC99#YgZeu#o}>E8Xf9H4JSwSGTM zc|is6*T{Xy->FwA)+w|T6G)@oA6NO{BQqL;D3{(zew~u#31w_j?Q`7KOC3`SXK9x` zv8o&u^wVAm){)sSf{V~*umX2_g%95lqyzhVRSKk=G>Dr9izxDxg$tUf#BP4^cv3wi z^^+VVeiQ0_h_rKgNfrVv7zT${_3@^lEVnDUl+D13^{tXKf6PGIgwtGD8iVr)+kI}b&2le z71WtP%fY#^=>cy_8&5c%8@}aA{+7}$+xe4&r{2(lJL?iRi7g(x<4D}JzmZk$3}*+e z6Po{>kkE<8ZuSwWl`GLMR8_bm;v-Fd)3W*ywK1}j?>#8_hN7{-8U&IcFih%qP-I1j zkp;zxXBcedSsUBW)=Q~*X&Krlv;LS(BiUZbAb8P^`r&SjZzl^;du98@9|`Fd^ku4_ zk`WP}iNy@$M*n=8yZ=pXs4v{jX*TH5&`CazE(6Gu($W0<58!wG%t~&a;&kN3<}A5W zzEo16*cf%62=5xte16`%vpje27<-ZzMwzEI;g8W$$`GS(rpFgDvLD1P|6DKvfPpnt z(Ra)82-%@}a`j^lUnRzG{yc|I94MMYR*5jU+oX=zzF4ccU*Xx^FCisdFXAvpoiVql zA#T~k7EF<+1wx(+@yp>#<&bKHNHt&xl`cRpHJouSeWm&9v|2!DMO{hy^7r`HdnL{|l4_MyRR zj-Bw!Q$VMq=)Rcx-~zgWnc)-Hna>s1socojz=;SB^mN1w-h6!BHm}`^vZlvis?PC# zrS+Ush$$U<9UXsedD&eEC}p2_-*otlfDWoSK0sk8ESGhWgPD8Zl_PlQn|Gqz?W77I zMe+K^$poAc)eY11oIIK-?Ydzes3WBR7=c@yV<**1w^|!gI?DT_BsluUVcril|$cHp~hB#I42qzN4HqI#KqG&p45f75Fo` zf_j=l2#uZlO=jfSHi;gb-f~MH#S?<*}l3vuLAzihr(%XuRmtV zDVUc%TN|RCAV2#ViOxTL?`K##Og)s)wF$>XpId<9toq$%e%*>pGa2JRRMx>?=}pl$ zC|R>FKctpX2|d1Iz^Fd;@|mzzW@3x@A&y1wsU72=7UXJPI?~{%_ZO%E&||_C8}~IV zXjAsHXv3s8#`O}>PLa8rwv3#lR+4kRKiUT_xarnohM77d%=`e#Y+dCRC4Xk7=$z+# zWnNNxjO1E*GI8&}h*qq1G!QN_R_fy4M_5OO~RCuejJD$Y=lcQ)~s#032Z=V?MAwPSjQ~{Cd zV2MDLW@f>r!f8`^KpM`*$Dh>0WFsCulU)AOnc$=`(wIU4-8V)>$V8OdVKORO=ufVB znay#*j%NMUArJQ?W#(+LQ^>cljaaSg_52wqc3~${7hIuCQ-8rTq-9@vB7UW;x;(UaL)G&-6O#&!H{Ybdt#Msy|~P zSj-;apkJ16<@O~CC0KQHEYAcQtiTYN?)F=W2~5|LmYJKwJ%t3ZAX1exXM=-3P{{Vj zwH7vvHe$ai*!6&&E%I%t1K`DkpDwqroWc~JaSx}deWlq@vGmFIU6)%OsLkAjwGl;6jJT zf@-)=&5vgNjVitNLe>mxuL1f3+dj+H&n%5g{KX^pRh#>@~;#4Q}N>PvCTveXO%EpR5CHc$OKAAa^rHt17xL!^^ERH|ZlE$q7`E1EtxkY#- z)}to-ia7j?1+jB8i|Hwlxe)+$DAi$+1t{v5P>937cfTA=<^B`A;=7(%8LHW2B}p z5i_GrxHWD^fk+Wjp;o0zC>zeBBkPCB!glfBv8jI^rcezu9DK@K-wm71MxEph#rQL# z|GtnTgh+5ma3@n9{JW(ihb$A@%M?r|2>@bfu#m0mAdzMNC$r#(xfkv0+24ARYuC1d zGk{8dKQUkdUmDjsNOI8gI)^iYd^%nV1vN~}2%`51y?=^)Oi;PLQsJ>-)kO!q_+36~ zY6)o`M=mlWr2b4h+AGZoHLDHV$68sCaO0IBNtHwW5~&}uR`q4p9M1+MccidjfJDD| zbnVPId#7UWs`e3y6^7}JyMHuDS1mJCs(B`#(9l*TNy=Zp(aUe#NVcMRsu-Gtcy_g8 z?vbN0h?rdH@xhX{(5YwwXu6lFbye34((0%89QB%GemQfx#0Vgc@R>E8QKcB0&3zJ_ zizHrn+tSg02}P+Y`D!dPkWrZk_W`3}ycm8j(gnI@Rz8qfYSx(EEq zchG3&(VOe~-1M8nzh>o9Q>I)gsw5DFOR54-kGYZS(CqgXMVL}p#(0$@(g|?s%EB1R z-}=I!%mINyqJL!kPs2mr#44gF7a|leOsXxXN|SQZoVfEwT2VR{*J12f>eGbYZ*yTE zam=nsq+dC8xs{pzEv%y1@4P(o;Z?uE&Vs+zeM$>Qkr* zD)UXX-MnCp4D0OAsw)eOOfwr&AuKO9!bRMPbhh(I*ML$MfpemCYESU_zlwBpFBfTR zcQ3U4|H^qFfy8+G^9^b$D?~Kdj-8xMmwGySF(caA-Gp1^w**=e1 ziwR9CSP2j@tQFc>D{8?_1>1UHvI`Rf4&@A#(QdlRu*y!#ni8k(UPjZ5_qx>Oeo1YP zk9n*5QwG_Yr&fpZp}*jkhD3iCnm)FVcp`$$+o>T8q6ez+feZHoXOd zn?-s(Vovo=`1gk}1uHdGVjg%!RX^Mw{xY+Qo?X`F4zv)D=oOz7TZzb}&S1?r8udhA z`|IiYlS(HR(dfq7#l$0X|L;&9`D$X=)f8rB@H<7n#J6gzDn-GPGPoVyWH%$vvK@Vd z0`HXmpNsp?JxPN+P{QF=c|xhsdD;a#%`lZ)mwbCd`mX8tnSMK9N+c00Pb|AcC$|jj zbXa6&?Q|^vhabbMrtC!bXc?yI7(%kcZA5Vfsxd^iX1$Ln`1pEXAYQ-g2EoNXeLtz% zLKi;@v0c>gqj$Bw3R3{8X&$bt6{KNuVWF{u;hHRxfMJgzOi1cuv75ySK0zfFhHl~| z1oJ5OfS-0~)NLO3^p2HcY>xX#K>X_cf_jPrbKQ5x3+oI3fSAFAEy|dd3|ygWio^cZqZ#r7{-< z?bu25X86fkZ?Y+oA*`UBXp@?alPl!u<956VfibQb$|;xd4=&-y(N!Zo(oyXyuU_)% zzgyiCjM#Qn#ZA*UPVlBTzsj9!E@-WqOEX6?*UF@Po52cer3f5$MK;wJ`DBsNut{vn zMgunL*R~Qwhg>OY17IXHZP3ALTv98SD<9{2Ik>uy3Zsf~H* zcevT=h(Ubl-K<6+mw|vB>j~_gvq+icqg4MOS-FGR-dB-urbDP(XQjaT9jTQkXkEh6 zv!YYIy_RVSS`JnWkAjI~<^b^(s|iJ9rthx3*Lx$X!eel`Wq6-+eJxEzJKOb^dYYY% z?H2%I@y<*Qm8T@CG^V8kC!+*@7Hma(cIxfpYHpfkwCqh;yycVV$%{OtZ_2#qm#dDl z_s2}kLR0ZZp*gb}O>Ri|Iw=>kV!#Vj?#1#3sRGK(%^N&=$7m=6b*_V^ZgChR!?eyq zn1rcH8gWy<+J>$|h-C_6>~o+B7gLUliXG$N=p<81PpZqw0!}*iV~ROeRmqf7z-8j+ zm&T{h+w<|7An=FE#Agi@NPvrXaX*yZTa@FMBbVGVf>n?Xcr2oh6yaA=j!pjh$&84R za`8_OHG#;5qC(q}qOm0c8j3@jiFMYq?5LAkA$$7{&X@(rrG2s?cu7|pS}yv=wt-_> zCNslY9a?OQlAkPxY+NGk?T^%V$9*>qU$%$hX98R+TVtU#GWUsT_Z9Vfy2G+jZw{!* z{}E?mnM~HV3)vtyuqp6q>xJS{G8{25zR)N!?a~X5=HEcm?B>^7b~V2e|&qU7)saDuU(ZESOaxxd|> z!)W<}r0RRZ)g2^ZxEje&$d0C)O%f*Z{IFal$xBr&ztqNPQYLP00`mq}0i%7^wL`ep zR^Y9dzX@fP{9Q&irkLlzU|8l>4QAvTedFcMQK)odkiKTtkHnuk5c4;d(=%z-4dVhb zSf%TC8UkaA(|AFywj_B#Sv1|d-)Pk8S+12tvCXukq7_K{>P}DB89UL>fV-J8I9r!p zCf4a2uFi4(zAjbXvFwl~DzZFG2 ztjZ5B88)J=-*Pq(!~-a;&4;Zp^5dDc8$~CfG|nd8J(zD`Lys3kWO)(S^o0U--c5tc z4e{vqk;3vZJSiC&+r>*zk)BL+nuJuHlj%BPTpoR*PN-q440s26RU)tQx&5rGIpiu^ zPOKs>Hka?Vleq5@@no+`sswj`c|2qBP}zz+fpY0lI#jeJ!XVDa89rrQ@A%^_^Kf>y z%H z)K00wDul(XrPy0G4A^69T0*4kbkHxsn(4>e5GlS4FL<~ zswQFk4f3nA_H>Bu_ndQk%M{v+CuthLaj{3$zpa0iY<_Pp_Tt|jBcjKbpFW>fH7=08 z)dqK4L1}Lhp)@!?@r%h@IQ>jLbHY{l`6L&LkVC8t7! zJfjg>vp=khnf8g+Q_t2i!p$q zj*-T@TKm#CnByvEC99XD3#tv%&_EIMY4z{6&kR})o-)?{Y=raQ6XFWPKZS#+Sj}R; zI~8o|%9WbXW+>EMSTn{1$}QbIXC{k%oKxw#t+eO$_&WA%HL_P=gO-|tzwJCDT+Z%W z!a6^2(Xh7X4Hf|JTQRA2iFjlSLg^nL12$?)+JJHf(MRF7`l8B{_ zFKK4)T@0V@zV+83$N97m)+k~UD-#(%L16};zc)0(;ZcUCl zB(wO(!nOMM{1>oWJ^m9*{l8(i{s#+}=~OK}Gd;!H`kQ{dQDXW|a$0(V>RGb3{!Us- zyl!t#PtVi#+1-*atwpP%bCWYcF=Drw@pp5evH2HSoeA^<Nn)jbi?kK7Mq8@%d)tX0@3=EnAh*CUGDa^~jTEc-D%BMesnNnN?&}7-+f-Cl;?BZY#4i87W08)_|NRL>= zJk|fr-x+@surT)MMW$Xb`m#QemXMQzf^?hz9g9kGK@`8kc92p+|3}W-dyJB~|K*(jzoMq>-OT^5IY0XEIX_D=O4K|1$66kN_(D9X82g@LntaNk z#XoG)cEum^--^UPW5rGBe+l^J#FcIjHF^*8NCNV0i-Y8kw!fY)R)-TSgAKXR0AwujxcY zs2=!QXM=Cxw-=V|*XOg9gG7G(>XZ1z@a+M3ni+|UP2E1 zr)&GlQ1zaR?`=4%L4_f246$UaE`JO^Q6rx0Mt^Ikz4M*KpE&Y1dtW=t0tz#a_>V2J zas#=UM+=Q74GSa1AKiKsG?YIh;#GM4v+3XeLIgo*CX?WjaIFRtsnId`nypCDbYv)&5~19i&n;-zLr#J*et`h`=-2daz z8p*qw0qg;sEJiF$=0DB;Pse=T>8n5Y*!t*ec=>pkfJz#4B?C`7@J$?^btsHprw!+ciwL`0d{BN;qyDOC zYY+5leW2%5pk$u!(tn=~2`TT+Yx(5iQvEoz)SRy&cJHjU|M7S^Aj zEZLWw_a`B;zazP~9n(U##)P3F>vgNEtmgM< zs$k8$x9|iE&bZuPu~|?olLVl%xsg^i_ELy7?&|HMdUt8NBV(A{Alj#g-D)$IMxBr) z`hkwEZaoIJ^?=y?+CK$tv}?w*=YIEm2iq*fVhVqTPR7*mq6XJ2HvEZ>48~5*9WF!~ z>a+1LdzQW8v-)`#mXJS%+BV&z=QZ_1ctX)}f>c*x6fM@G6Kru3N~`D@@6!4Jl*24z zi2~E%Xc#N)X~z)yxB7D`>oArQMMpE<=CQ54HX0YkDUPW&10eZclbpR1Q7-63jJB9`8x_hyCb?ZVH&`|OWk41 z3pi8tR!nt>JDwKAyUnCC<<3}pN)d$VYCHjw&KAp@jV+F@I&Aa~H#RJocekeQBWLJ= z`L=Bl{Nq5aBu#heHksm`(A*Ts>FBH-E1?hvi&uNDNhi%`pX6^7kvy79$Pf9ECU8yw_jHVnYY+^ z6*vVQ(yMFF8{fSLSFk|XO(iOq$3HxCdH{9m$qo0%vf;$&iU}MX-f_$oNHHLbc^<5P z1S+-QRWW{S+fMLb4Groop7EMG;{(t14Qw3aJcRjg;G339bIz{xv7Jt7clRX&Rkc$l zoS5EzksOvLk9%jhPN2ARmp0#ZX9>L>!`nWX&yvfVb4QIgAl!P zw+($|ei73j_uJ<&P?PyseWwoA;XvKN8&u%>Ng!fDy@Ea9V}=TIOu@HZtN%k-KA7<1 z#w$mT@nsDYwzJNM7%J~}gs2!$<&^Q*zXpESVJe!VI37|-7ZhV&qHHGc^Q7K{wrLog z9vzx)Lv7byWoS|IOF0XaaBH_)Zr6EckR-K6n&3|DKBKU0U|Gce8G22!DmONMF3~U{ z9M)_jW`lo;Hp%`A8!R|H(8DWz{II6uh9QyuBHL8eZ;YJG5nbRxT?yOYFuo=1cPEO) zG8TB%4wN8(z1IO+Y$I@_UZ{pCh$kF#$dRAH`e09ki&>z3u~#0y&a-FU-q>K=+#h2* zMi>yViTodCuN{N#*1{qER3SLDl)E<%;CV_q#J)k9yoS zhHwX|Z||i{Q0h`t;(c_tcqK#0rq}`6lHX>3zVF)Zr~a=?-PmP zY)jRr!l9j*lb-NKobsC`__?MP$fgTB{P|}^OQX3m8U4{zX{FIVCzqlbe@edATws7N znbu{OiLox|D|{SSqR@(HhnQq*PW4P(hr1J3!L3-x2kY~P6p*`Q48fnKnC1@T*(j=$ z&(EFp&8C+#!hL{1j}RGDy@I&bLp{`;F~A7rPyzwop_KE(NHS`J>ma34ib}TKRnal|W= zg|~ggCNI!av}QBUHflu4Lo=kE7{I{weTS=gKc&v{2trpF zTq*L$7;gTP&1X$rXANidGgD#WgI^bR6=OcfZE$UuxpeY?~#3*=AAatTWi7Sg~$przAn+9jQ8 z;LoZn#f*m2lq(jsA_)MYM$INy-4gDdh;MMhonap?9GjRn4CAeZoAoP|69oqJ9H8ce z+GgtSSB3CIA@M9rN%CuBn1;^L#0q(&OHh4xn;U#$h*kqBHlQ6;Vc>_6$~u?`fdv-cX@V^)&;UA=htNf zO!{T%^&sSLC@fwPZp$5L6Yucs*<|J%Qm9r$u@4YmzQZ%9zVqM#|9Ib`5+2Z*n}wF= z)BMdg#yT|Usq7RdK2l(jE~cf;GgW_IG9b)3TD`;2F);Ze&)%9` z{L&@66}e>QVi1sDlad3YOWF`f;4>8#;W=Zqchd9US*^J;C`C>H{EI_r@ei3TCGNI8R|V+hP6<}fq;Hr(m}<P;j zb)P_&Q^ES(a0DcA`Mw^9+V){06pZF)$1m5 zAUDnJ^9k|dY<9oPZVftB;M1Mmg(}uO9B|U2fQ3_Ch1%fc#8Z??=^hI!28zig@0ptm zeBxFI9`cD`$Wl`Kn<5K611Num7eW3L_72Nl6m@ZhNyaJwDC>Z@*f1LGLhHNZ0@i#Y z@KLC^maaFP^+8SJwimE1E+V?5Zgp0n?_Ys2JeTeAeJcR&khCk(#d8N=P_2V`?&eUa zQdW)C9HU}X=scR@$~U88+>s@MiK=~p8JBs3s0&z^olJ6#U~U<&5<0&AH|n1p11FAW z{8S_gBQ*i!VjzP3e|wVY_U;H2bx6)={Qk~y{0YRfU+{q?6w0m@2f519b2=sywS}u z{ZaCbyAm3o91%y1w41{`PpmxWUW7VTMB<}arJaU{b<(Ms^YBtE!6zpxqk9aH;DpuV z5@#>r>Bqllwub+zrZ$Xs|6_HTrYNSaZ;vuDp+9La^tcYTBaSB=YVd=G_WfU6t4(@u zlyO!^(y1V8&iXHNO#{sQqjJ}D)7oXOfCw$@&_kf&5sZ161ZZAB?|=vEK0m#EhsY+y zIk?ad1{bUUoQ^*I$NhqiL=#^|{UtmKOE^N1 zXG^%!k`9>5LXSPAIZ}p?^US(_XaFGR`H0%Cuymo4)l1k(O3N2W+2-?|iI134E~OQ$ zlGtr6!J-pbrHUOylsT9%$T3RnxV-5^P+56^1y{HO;R8^FwEp71!8~2^NdwJNa0Q{& zX}>dy2f!HIRkhNb0q#thhUY82PHAg_2*saZt;bfXy#2UG#zd?c@_=p^_*Kp)orGJ4 zcXoz*Y)pdO)H$>`W3W0+2XRa?ZX_K1;g+(?`t?a|@V{dBb&peMEwn#B+6K+!%IuM& z6H8fi{SPtt_Y{>DT|+ueAxSAr-+&e(b}4OUL2HCX_oS6pDNRD+o7(Z?r`Jb#m7xw< zTCV{0b9s_n)!bW@I%^yDatW6xM8%?%xi*%p^v?fSKuoIdoh!;!U_>N(0><92F0G}t zM*mp_?vHl_NT?Kx?pfI`K_n8HBo{qnBFYoALT!#0688!QG z&@I-5ohGZ2?zAITkiv|HS1#)j_#`8+1Yw9MUv^e^7gK!pU*bFBSKs;{F^Z5}3CUJk_VHYL|GZN|D+-`a|bw zADy%-Nd1GhKB>mf#y_V~*_XH$?&~qiv)JK}h!3ZU%-za(_y&?s+o62nQQUH!#=y7A z4c$@NuS#2zJG*>=0O2m}RFiauO9W3@o~;+U13etP=v+~?abhrT=5B!qT4$oSvXW=A zRI}bfkbFaiB5J)W-C>%W!934IHjnHzJvV^;=Os%jJLU{kZAEIyUHc!C8-^x^6W{6o zsNVi?Bl!618tHa+>Jf&ukAK~vrEzq@fchDn%+`r3j=iY1SDO`B(>3O)X~B+HI=wR} z&7zW1(6XlUk-@LaM$74fu;g>ji#<)yxtnCLsHm{pI5M>KODapVl)-LhvnUGN@b@)4 zqKjV52d57)9e&})>RjRX(Df8zMKqz3dWQ^eN@cB61^*oM4b@SE-?+bi681B01>0Ql zr<;{wUYbOKhIWw4X6mgRGO};0+T*TeaH@+9;`!G4gdjy@GF2ZusGO?{& zBTcKkFbIx%(XdOT*11ymy%_Y8ow8`x)KkDK`Xi^C#aW`^xI`(t@eHz?eh9r%hS!^> zdQq1x7W{`z5Gh|f_ru&@vzim+Ez!?l`3LKc;4b>ZbC}pS3x#!9uGDfz0aNj?g|8;B zPej%)loqM@S8@w(#_K>O=E!*aBxMonCJwRbCD|o&j%anoJr0qTZ=k>~voihug zoNx;!8gmY~zB@Xr>hji0>{ucaO4c2~-^bh~H(<7jmLdlMq+8;BvoDMxofc4F{I=#v z)gukFC5Id~>c3Y1D{h>T)+_#>55jVA{)Q7gGRr3(;ys-tX(T>z;Fyw6_&Pw5bbY_0(lXeJO#KK|MeBw`he=e5Q zHAAy0w)|OD?sM(z=u&4HhL!D;*5XlLb6qZ~nyi+#0P|~1rg&n4)@rlRb<%^o1tPUF z=|5g6J9l$x%uFC1OTjHu`nQ5p*N}0`_}43&94eh+XsL*vc$fB(+*PQwnp0q!+`=^% z@vf-RJk9<0>OG5kn3e{jAlP}?u|1+fy-SO4vBD|zU!Ax5^z4#R_k;@P7VS#}#>=xr z1rYjpgm*wYgrp&1d>vUM3?>2DkQ2|@Nns`@ElQ9^NJ{nNCmE@uqN}u zt&=aeB9>fb?jo%t@0>>tX1O>;DyP(~p8;CGQTHhA?QQgh;-$DGN3DM|E2znoP3dxm ze7Qn61e;7d6g#SUJ?1vD&%mi9zhclDh81Sh!e%unJ_y-x%cLD*ZMy5WB1#)I%P1;6 zcFH))qF-(PE?BELGfB=9I!5l5X+;8q1uc-xq(slPxqfrUYdOQ+|4Tuw307MI-*Zjn>XZfF~=4%g$F?8>=-mxf(?+ z`i#oWk#$tHc!?_+w^oQ+fd!%I11)(yi|g;wnRt7yvKoZE_ETxS3pq%@Kq8Hmp@5d41$QkQ3>U-)HoLG|Dw_DhQC)Zl`|pM&grPU;%^2qgF;7 zehcCd8(xO+^|6x}+TzF+RmWaPkSbVCsc~>Sy0Qgao=@nEQgEI;h)|!jYxbj;>Z<<0 zHLy80DvvNU1BQ3?CDkLnao2RhcThBvV!>|4lc!!1QeiYHI3$lMPY`f?b?J&+PT2=z zo~wl|<<2P{B(s%)?yT&Tw$D~zEx$b{^N`hPCdDQu4JlzfXJ6q_4PYt{>za`hT^Nm_ zJ2&uQU9`Orz?;qMmD?+tOQSpxj*ky_$!MY*O-9gnu^ za@BdaI13B>WvI3FmfevFT8dYSQU&E0n;s_MGM}%*@=F^IFwLjx6ycz&@BVb3`1~8W zb7dXh9-LKRl9oN+_-RauOm~@iJq!!3JboB;LHiSey2J#R*mq2-;^c{ zErX+LqyO&tw~{C@=IlU)`CtLx%D#UM`$T_Df;mc77nd&S#mux;)`~TvN?%%MX@_`EmNsgctTXl=<7U+G_*;a~5pIFGhIHFMX+6)O9jpcY#blJ1nW zQ9Z_Dl1)k3gRqDFL>U2g)=mq3MAx5+cvk|Qs&u;I(gPL=Ab@#pJNk6dhBz0AM;P`P zV!X+R`g-C%Yo*0O>VH`AvH1+%G#7k%H$AR%UZZpHA5dQ;DAi4(2=W)!43LghIR=@- zt#ppUG~nd^=TJKwX~_`tNJ{b!E=oaB@@j{xukVgoT|b4?k27+g#1r#Eu_S!zyZ? zo<0_U?fj44+Q=_%RnA~?-MkvwSKcH>KJ@D~RC4Yyw|X$M>6JE3>qw2T8cJj zsyf|I$Ga#^yo7C4d;;addmz|~rbJ!ZEz)Se%z^#l=rW#W_!R`ZM8HfOd`?gpWSR!s za%KytcD~BxWZodk)4RgUFD+R7J}lOirCDjNaKz&Q!bdZ6)*R)nM-ao&o0 zl@bBc@=s6-9KW1oF%t$ZONIUYOz-s#b^(a+sdV1qxwy|yaE$1{fh3E> zP7CBKx8u%B`XkRIrsBOztC1ebX%6AbtQ09}$E7+)4 zq1?}Sb+C~v5Z7G8ycD&6J~Pux&Zl#rX4yEYp1i~h^qG^=im%W3yZUVV!h==(3=-r2D!Y0)bM=isO7RHPvtVS!9T$MAHxP_kc3kVLOo^CfG zt%G7}1$IIcO()t7M=ZT`GA{^Wgv%@nLRAgys!~SLtM++A*ZkP>n}W@e1Ewd&x4k0@ zt{K`BQ@gq4xf$eAfHYBS>XW5Hd*UKKYSPsO>eO};kG84&T|xD~0Jlkuku&#XQ+!kZ z`AScrkxcB8;4z}Ys(jZ5e!0b>9UM{5_Vuca&~yU7UOcYVuW>aw2En}6ZLLTT-wV;^ z*-fH5#0$#ymug)yd6QTY^oYrhWSmqs!*MwBs9nc8N_#t&Bzc6@>_C;&e?6By1}M&P1sX&>}nImy_cYGRh3 z_$CqEs?B;xLjlyj*jdJM8=y$t+5PV;E%d+Z_|ko2EZX}U%%z0B3p#QiB~~re4^T}q zow8P6LS`03Q8}^OFi5-XQ0<;VSK)kAMt|h9_30x_ZjjeXO@!wHtDz{O;vI3QmqQQz z^!ok_&nExozp=v^6UO~y7+`bTTah zOY?&eNhRW1B~}zsZ(Kpf5eRkO1rmw&$H>t}@-O0T=(F!3UFTP);`X*uW}tHf@2Zmy zmSpawX$m(Jy&7DRVXwt&05P_w0~esJ&lDL`dP)bcOZM3HH_x7-ZTh)tH+&N)I^C8cv3 z%Fo5c(9)&-OH*ZPB#jDuqI^D02mlsST)e`rVKln2V$4uAX7Bwmm{06K&ymN5cp z)Ro5lK->dk@~jG7mO<>k-(qMbI+~G+md5&zO>|oWC`78SXx4o6WZErLiOEao)nGkb zeniq|XmRw7W-2R?R=9_i-!UNRVy{mdhq|XI6(ecn?0(3fva{I|iz<=KtH@`epO@s} zo1Ok4EryL9rP4QRN7uY7CY1K5RVknQo|I6Aq$ZeOZRs11Ry*I=J2h^j%H7XYKY|w{ z%<5JL;*2CyUsc9Xzm!xd{lz7l5Si-EHSv)z8sPtDZe_fj;D`G#RC7)lKHmc~?xpYZ z!%@$h*Q5P)kJo3(58cXB)8MwW0lgKR!#$s482=91Dhx6PxPw z%;jdS8|H25xx}f@_EY}FqAHfG7ClXP#K8ymFY+~&O=rr->LA~Rbi)SgChy4(G+S@K z`H;M6u#LUfWmi7C?w0b!67KfJqyod8?Q1ba)o==0yzENg<8Nq*UmfFHG|3^t(l=de z(+KtAlp=+FY1;tVlxxpQ-arNd({=$0G7iTa$+L#hTO5V2x;2VR9u`<mL!KoRTEx6wX)=;$vsDpw83xg;~700^)~A#aKNetYq+d-elPFr+FPoM zX5|t{;RwOLxy>4uKADenf00Xlh;lbhf~cc(o@B{n@#4Z|JjT`!bS(Os$w6}-V%Ofzwvo~_BI&hSk@ctivyAfY2ylwQsC zGk%gkf6b6O^IB(C&YQWDuU9pNRMKV}RaE)}nHlKX*ACC4#tR<%Yjs7NCDt1%pXHU! zM{gl#zu0X_u4k_{+I~!49jwmF&z~M1s}ZD{H2ZnS(($BziU08h{SSo+|pf1xRS|DP3B6@cgP*( z;m*!wdQ~(9dwhpHlS~%+YHGgQCBOHtH8Oh0Z1H8ie5@oqe%0uFeQLN8{Fo>~Gi69~ zuB6?cwjw!QJhXlKrR^eZcv=#iotbC4?wRuTOXjqhar*7f`LUUitUj;kb>;JS=7+*rJ=*;G1 z!04-I45lE5n){DjI{XbfL1sU3#5R|ST;E}CTwryhgW~o-; z*r>!v-B44i4Fk3#a{7LF_$XA%D@HZIanTYgWsS^O+V8bF&G@+x2FkWzOKnTM+yDL4 zXhwVJ+t0aO8qQveI0TBm=kC2e!Qsh<3#>$oqt(Q|sjmq$=nr}Z-JjeDc++Ql?9lte z1pB^YQM#%30g*~pwFAT*VCf}mok7mBr{ievNuR`q2MY1%<}&iwt}Hb#-@0Sed{*+4 z!Gm~QYiUbQ0KE_MpEi3qSr09WozX_`;5bV*uii!t_?l0?2e|$9N$c&BlGT4G*?mRE z^}DYFPd$S;rhQg}aPAorDV-K4?UM7`=qcVoq3Oi3PpBC4Mu`|7|8^|mNf1-gR7Dk6 zzk(*|9n@-oq2h~ekeTs1zH3LX1YW(1$rz0*;T=Bies!~a<02}LNkS}Qg{sS(I^5Ai zT(8-!z`>IRCv92l?ZC%j_cg0%-hlMVz*7A;&K2ll65}d1Hyr;=6OF0YiL83S1ef20 zG>LmH)q2EY+o6j!Qyu$3|9IsNIFJD{y3*6X=YVFRYESAniAe#U9n$=t*J?Ux`wH5( zO@wtotvJd?Zc}vr8f}$UUR;CfxJ2#?&R*T>cJW37ZjVUst_DBQ2ynTjSjME2ud4$w zws1CPUa)K+(Y;I7!19a5)5V>0md~<>*WUVV_67ImYXXBV^H&lnoF}?E%8vVAPlMm&*8qk{E@VunQ6+w@(y35zTZ+cn??A*5wy%*LRvSlnFI-X91~y@ zv(Ne^ODVsb0M{euhRNrp)ChoP5z$J`e&!D5wXR&j3)D~A49!|}B;I#WYA&1W6G_kJ z{1l&GbBv?Qch4$u(-f%N3V;Oy*+T`93V#*c^SkzG?(Fv{n%O;o-GS_Re~r~sZ*-T? zw2`fuaVdefIOp0d(At%^>=7vVi_i+dd(2k491=-Km8I?4(&*LYYfZHL81jR&CzOxh z*YHNyV`SW+ifxXA%ZAb2eOp6ZGq*k)LN<52egV099AAyIr|;ep(9pGBrfs+$3TOo0 z6!3)JoXARAh215RpL01nqfZdCov=BVNc}Xr;?7q=Wv&qZ$Ve^71i{YkuR9K z*V?HDB!8dFN%rVp^TI%XZfhDY+&}d5Ae{>F&C;&`I${ICoNJCGL8pwGRf~;Lj2!pK z#WHVcanRtJ7-TPULfd5WH2>6;pxJn{!U-PW(5;pK{&i#eKA@|ek{-8D{*XJ=LdkOC zf$WqLWjFHE=U6v$cQRP5gLd-`e(I{v4~s+uyIKG5 z7=oA-!8#WuP+x0##`)VplXm>=HL$bSvL})eZAc;A<;iDXu{?tu z^Ifix>?u`V7lr#EmV=z=We>CIu9?P$anCS_!Bdc~nc`%Td+#5UgE^Wt8~8{1UBUk6 zGxSCL@)KIvBC)^iJzdY0TO^uF{@j1_fS&; zzexLQQ{a~rE_Yl%w3?xR8-h6oalYeUvKTI*-%^7d2u|1-?D(*FDo$$?souUa^-Nk# zHF%e6pH#mMEf3tA>7JWE0gzI}uFOfu00OHSMBG8iv1ozY{zE&sWE(ab*(hYEplWAjTxErr(EnjdWNF^ql#YejXTtNBP9-z#{KTXua) z0k=%4y3IdK76e;VRn!btv3jP!Ek?7u)6do+B!8jbxVWv^zjKjZnGH%;6u$9M4)9-J zaU(C*YYHO8j@B&p|J5+zw|G~8`DX{(DPzE$#PT#aTQGxgN`LXLx-)unuz9!?txR96 z6pZ!2Sy_O7cY>0;B_;O2jURdA*=|L-$g51xz(Ude=rdc~u+>z*Pj1QGR9=kr8<1&duhObZv6Y?Y zuk_jV;Xn4vv0B8;$-l%*T5;JH8BMtOdHorcB}5Wne9lG|Old#Mr8kYeJt;;6nb{bgbNM;YxoC&7)N3 zg8c^Fc^={8im|G#(~fqNeg*(eGy(^COOtyeDQ|qI5uA(;!`;+)ah(Kp zQ}(soJHRI>2ELHtb>fI;4)9rwqomv%+RcWpE7nJ9stY2vzHy~?kzQl$qKrV;y4gcwSwZ#SMVv@p>2>$*;TG6Xo~X#=WkWK7Ky+0= zcI3|$Zyk}+u?j=1qw&<~)ASl%i0eDD)wf$BORf2`tf_OO;ulFpa7$9-b%JkbsCo0_ zz`ZqvHsM(vhl1qKTv1Jzc%r^+ot4}xrvBAakX&6(9kw?FU;1Lb<_Tl$E8|Ns8+^C0Ynv)e>h$_bU%VKF? zR0#DLZVIJXt}02n(w=nn z;}@6(@Rv;-2pj4t-o&Q#-S&wn(q~csrP0^cljbESmy%Am`VB+I<@(8G+!k?`9y17$ zRYuM+V(H-U0DVUF1C4Oflhw7TCiC!cb_E6e=eXlC8G&*a`<*f z71;@auZt4*xt9N(9%=ZBSe#fm3-nkd%hDTZC!%Bbj@yVwb+AttM$VMP(#(qyrsb-w ze96POz@*=tMc-JkL98FN#)K{SL2nkHrequSt7Myuas65c22cP!cT3eawaD^MxuWsb zPr&6jno63@@JX~D!C*(@mJZL6;r+4exOd1%=T>jvBKfUe>*UEQ2vD#@35*@)#E_yR zSV%8T|MZV;M!eL)vS(CKo-#mkEXoyhd3JIrAKouT0aVdHqX&!(6U(f@EfVwJ6EV1n z2|3;iSjl29-g1Q#%)`^77`;gBipf7cZNk$JQBvb&D&T~9vWP@Rqk|`IBILvp#iAo+ zE1Y@|vOn?jj3Zd>bn_2RP{672vL^!l%*%cq%(GLT9`}_HzS~08`xN0n3)% zd$xm4 zg%HX(-Lp;bI^DqoLq=1)&^&n({Iqyu7ajKv+Co{CMM=ged6(Rup2{Uc+8~xGC5a9P zB9!)5GdsO@>)IlK&MrP(qLaP(&przkfiYIO1*K2~Hgdwh>KQ|>QNun;OI;+){N}_) z*``r#wO#q_AvNj}TMDr$mDF-?7t2vKiUV4f){InQ0m6N+JZd1fz}$70#3pT`<1T~| zt=&4|TvluM;@qh(X%U8VX8GOCoI zUKh9hVq|)}t7-EK(=ewn0#sS(qehd0WW7%vg%I+DZ6=eTo0mk56Kns+~OMgA^P4iWXp;J#`xywd!VT zSwaRGsoB{;(GDy_YC-1A)WavKYYHYU7Cxj>D8W@jH^2E190H`G)Im;pS}8RAcSrgE4=vz^Ik4y&r$TU8cDwod0AX z5kSj1WoJUC#3Tmc4h$%}pS^L6$YxTfUYUp+PZ7|r;^=4;^F*v!D{YXGydc8Fdkl~^wKoSg5T2noq#C0 zSlZ3104rdMjOn2Ksc;b_G$DoBl{s*qU)yQsaI1{CmG9nv zo0=qEw`=9#Ft__;igpYw;Wp6{M-yh}VwP#qQc)z=o{!uV@k%oDX8(6Atu9Z`o8t!M z)NZQJHi9SSyIp$d(da@b@z}yy&ZBvz_)y|=>;Q_Rm#YEE?Y)3I;8MYz9nhPx^aic- zN1JAmi8rV`f6oeCl>4^*qGuIzO z2Ab+jgWf;$*dm73cE{-BE*$iS?c>Yw`o~)0hCVPeG#-rRv6}&%CKx5DN%s^cl?v;G zpL2Ldlb7q7iQ!x&cqqCjSAz^Yd=Xt7$S8S_CA~4d!?h$(Bdm>Vf-MQZY zYgFk=`M`y=R=F41?7he-gy|p6c^ykkY#Tv%b*VWY*t!DLO7pi1s<+y)nY?&!z9WJ= zLra5TR(X5nYvFSI1YH;B`Y6uRmJ#rPqCN*Q`l5|`BxY*m4smq(=+B1tbEfiL`92B_N-M_hU2uF)R2EQ8xd9oHN`Yy=~zfkWv79OLTW zi50?{O_2Odr0?rv>#FPNrW#Ju60*bk%agjho0oKJc}k@=LqNiK=vo+j8$D(uwQ4*e z;>+>2oG$fOh7MGp@Mt|tCi1*6SCqbg7L?7FT5bkfKrU1Cu5L<~o|Xr|+8SS^4D)Z| z+HWj}=(rHiv8(to2BeW4mc2i;$Og~6vF!i|qSuEUo@DsC&VU=`fZ9dD`MtcM?b72a zJy4{r=UL!*jON>Cwm`f3-ZE~2YftQO7=M-DK;-Nv(K$U(QWvd4n~()oF2Zx4Tu;so z&>qI$MR#wy39x{l8`FE$T#03n^j;h~=g{h(2d!C8bhbYQL+(~oE8IMoK46RICrbE# zU)8F0X~r9S4BXf<{3Wy@{*0(31Ha|I)P-e^W1#G^<<#C=oAM_q0g*a(MtekG_+>MS zzcoA{c#W+&(^XuF82V(TeFoYdj*~oTXYOR9Jl9!rpw)BqO;Rnj@6Oxj z1P6);g7Zo0f&i*2Fo;S`JNmV1HB?7F1j9S0WD$N--M_JoC$lyi^)2#h2>ek`*QOtna$xu4% z1M1;*Qh!-&D#X^EVGoe-pMZp=(c zz8?QGtbc}|Fh}cBU;uV<7r>tZPRdg&RX;>g3B1*mW;*CQSgY5F5dl&0z1&P-zYzfH zC#T)KLrneMhrxxw-X^0a%jC-|hk!2skd}C>v_MjwewMYaKRtD#KRs^l7cxtWfFiD{ zBZ}(0uxd>n^Va4KzsE^36k+CcR3^HH1qF7uU`z6-e-7U3wVOj za8TH~sG0;_fDv#jN1Bhil{zFTLYCt9PCC8y;yl)vf|6SmX4XD{$W}o+y~d~Q>&hQ& zlB|NW(FSY6&2)@B_8!2+g$kV;l!U~bbuO?B(7<{@ThFB(d8+YW^WuSop-`1+%wLJq zqbC2_rxGLE2?j3j4Td1OrOs*;365rnE9qo7m6oaa5ck{TPSf;_WM@*V^p)E?{+fwl z^x`f39mL*<)@yb%{f+U#X>Ap|JtY&AZVB&0!#$uT^%;XkmLr^;x|D#2S@DG6HT8nz z-KUOQTEEAKPNJ65&3c9>(aP11Yi;BgVGaxh-v+f6kUsna*L-8tZIp80Kv-x*Gnmy- zKO+`_BOcNH48LZ{4@&omx_w8hEyc99Ygj7sSSyvXFjG`|Q)L!-&A?!u-dZN7)!U6vJm$3FgUz~2dRo>UN#R@i%FqFP={gAAw`v@TlwX?jfuQ80N z($;UK?I6{w3uH}mvgA5FM1M#7Ngx!R4?-WT9~Vu|YuQ;WwEe*MejCJukyjyIOEZ*_ zeK=xU2cQ4GKRc&M@2e~OgvIi4f!P0zenoe7&5|~AnpmBNIS_qfawg|pOABXs|89k%wHOJH#M<#5{$&cn^V z$$%S)q=p(3O7fBa&1! z%!|&A+7=}p67J!PHksvim?X0acQ;jibY2Gl+(&=sy+nLy(tC*=BFrC!(~B+mLP5t0 zM*geaziV~@0HkyWLDI&S8+@5wnu?2~EE@YP^b57c&?mCgObGh?9GJB>8li+AnWt3RYK}im#8c9;)u+#9lih`8-M(cB^A8h zUkkKYVyG*}!^B#(r~fF44|0GrsB9JQnCfIF;=o1>%vQOSG>jg8YGW8ZzfqWK1fRF9 zG~D%_Y_VsTAXjPhNS5pamvrd|Ls=T&7`eIn`QPEy&R=mCdq~bwpQK|Hl8Wc17a;^m zDbQ0rgaYWQIfJBJn>egas0g^ES9$as{%hasaMKHWAR?25(hscm)cOYI7ILxmp9e5u zy`@6cntoVZphHta!{mG32| zNT+D$hJRu=@e7#X?ZxjQpgOJ{-*zywf43bT;LkXrb9JyDB&=9Nx#51jb@%suyLa}_ zz{CFV{QbvN$2^S+w@uYI#mB=&h>Mqwi2@Qvo@ffDhO-La8qS<*r+r`gdw+8%1%}l8(asE`%!@dW^V+m-OoMnBry&&Er z<_slF?8pB%c%daUnny2y@GMyX;STI}IMaF80$u90m(f`!b1+_?`BNN2R(WI_yab<9 zzSWHaN$Q6AXdmY038o=)x--cN^)jvIzJ5qvL4fM;%zn1OQ{zg7g@fg^Zs(dDqj?+Z zPLA4>_~D1UK(rwR2-5EQwj*oR>X3Y9Wzi- z5w9K~u8dw^&Ji0-9aBP+{z5jpGt4$89Yu~T$sCJpkq+|!dM{da*Xx4H6uCJtp)#St zqt4tkZQwjR)h6yaZ^x03p7U=m!rjDcJ+kO!l=`>zcrnfZmg=;FCfpAVCWw^XcFZ09 z2=I6QwXjx~L$gY0x{=QtPKT%?NKy9BA)OOA(^&FJYHIjbdk%Z zoG}r~XG+AGLF^-)B~P%241%b%?_VJI%w-v>^Pbc3udE$M!#;s%*V?Upr(g2W-P^If z@dDA^E=FzyR|QsvYYcv0l%yfg)ux};80#o;hkF+-cxFG=GZEFdBziU@Id$$4k}oX_ zBv|`DYoQ|QGuM$c!(zyDU4qmJ_Qxj*Mynv1xTp=%&&=eUi>K==qn}2iay#CU;)K&~ z^VPU#KPzWA?()j3{?$AuS?8=}=0e99yjGI5T9@CYa9ga%q1Hr`X?M}PNSe0lJ#O%u z?8%Nip0pKqzpcg0(tURt&i~lG@y@LO{lea2dZ?S{p=PC1rT=IDOnpgL(&-7GF~I{4 zNn%3oJ}vR?zoKJDm-#-OyfyPUf=W1#{nBg?t%CYKCZ80v{9S$~;&1Dvki(_%yG#$) z5Q%o}mBD)aK3qk&Y4&G$wLf9Bw|a+8`#IPT^dGw+odh8j3u8?T%%Uk!*Xeq<PCb)TvOra{N&)@{nR#3 z9Rw_yv=VcS3~4Souu$(F@;&0RZEJVl*T{EoWiV&$W=mVfA^zLLP&MmlAQ=0D&>b7g zI{%wlvW_R&(j4ue9I;296_+D@n7Y zQ5&$yDS7eHq-s%2wH&R|jY1N*Q9Untvo#49LC6J5P5qjL zQTSZz36kV5L79hlNK2zVcp1bY!#T+ff5@wgxs<4vXQAB!(B|uN%$GLLYXC(@6vL~q z^sm2yrV*0CHaENkMvre2)A@h&vUnJYb4w=v^rIn8=84WIT`r%CS*|{Zg&s%!3P~=x z!X$s#dXQgB%(%;M_$;A4W{~s#Eq*TaSSzKjEg-xb@#K@O8Z-?Fn>@mTa9u@=K*&49 z+iu_+RK;ZZw3&ANT+chnw;~90SO$~d-?HJ3Eb$iwjwZE#T}T>c0N!zei%fqf_|0-# zU*xIkEYio&U9Os52J8U3*~iZl0O9faXB|lY(u%zFAusW63ENH$=Y1)5;H@yIPvl7t zG^^8BP>y1%22f#?epk*zajb_CneI}VP+c$V2@?@rbQHaM``_<$Sm=J^-%H_rp~%xP zFB7IZl-^kZ`;jqgS#!pmBIM>sA4^0K$K=qHxNF$s-^^c;Smu)-0j3YL$@VNKco{~F zOY~uQJX#^l6KFDfjl7uf7w8YUMp4+&%%*aSjDrBY$64&2J58I)2yTYUqVzhU(K%Ot zIP>5zV) zdSL}|v57lQk=H;)>yfra)C$hhVd$ou+XuB*Ust17m6t_M@nFRqAc+{qtf;>-@gUkk zt(W1;{ZVkH9{CA*Kd3mTKj2q74o|Jd-Tnl^3vt4s{m$_1zyFjm|35QG{_kv`i_~=_zHqf+w$Xjo zG!w^+QOdN9n#5Prgb9;`3Cgt5_iWi8iLWl`=gmdV%MlfWaDzT^IE?j-i^N*66Udv=Kcif=|FFpAW$-h`^CI_ge?k-pc;sundW3nesQ_ zTb%vBu*96?;5t25vNY`5y2cjw`*g!g?-Wy!zFpVOQ_KzAl+4caPUz|9y&tS8s+sQ8 zLR7N6lh?GW#{4D~BQ+U0yS9dq>ij{#vb;eJu*_A#6HyH#LHO8f0(<5UZJ8XNLUJdA zNHnAaLs44mXiuF|k@Q>Ha1Lp*bz2hgs)vn=*`3kR_h>%dfClJSw{YM0r~QAjKv(M1 zpZIVu${BqH>BT13B^Rs-jLM2Rmst3iR2MDrNY0=y3-MCPbDtg=x1K$~%2DVmL zXFBTE7hL^gd?eR}gsOuh7gt&)`qP35+YVU4s&Q?r$%K>xKdEZb@z{Y%Q7@Zsj%ci? zDpv9-Rh`Xt@>6$KRftQJVrp0WE;ySqf03_rdnFXt%+I-?4NqLLRoDy;8uL)ThoSym zDzl*A7w&olajN4FtOR;_JN!>;UjYzDvaSmuK+psYo&bU1?!n#N2Dic8f{@1m=E`o&I<2SV;XTdq}RfMN8W2(6h9 z)D~harlpG!kBL0Q9IF9iu_s34yAly}H^UE;LiHo^^wTCe3Z~1Srq-8ON}^b9`C6L` z^TKaWngj;gV7L?BoF~)TS0`2PZ5^~^^j(jM^?yLm3CXFX`mAm)R4^%UQ9UBHXF47} zM=ILtapWz!!DfaNpZ;~rP;q@ts-Yoo`deR`aiI^BRUQnVf6+BVF20Di_hC+cfYn0T zsapTNIhT!M0CtE_*H_d~t~i>tS%4C%6?JjWA#x-zg2;`p(<&2bocZrH+hkd0N6DXH&i!JwXg5}(xSNj4T z>Q{}hPEZtg?2dFw;)JF~Uhp8>hmDTJl^qO_wWlKm^u4Ty%;4PJVNN6spyen|^YDT6 zZr{Eh%_nE;Y}&bq3nBWvh!3sE{UyM=NZeT9yTw-jvT}vSH-Jc{MLIW#+FS~dHa=Lpi$4 zL{Tt91)a?$kR>Lh_CjaD`Up=p16SWwp5alOTgZg+CaJ6nK7G2=Wu$WAx2La#WNMlY z$#Ez+wX;q(^nK2*Jt}PR9uuJ=`Xf7FySfL8N#LZuGKBW-R;y@#@ZQNuClz zLU(Qe=+QO1VFSfT~aQP}RoXq$8F!{^t zlriPtI7%jW+6N?z0FX{zLVDlOb6I2a<~qT7X&JTJ9h?J;iy3dbp|?o-tJ5~6K52|D zla^PL+xxk5*^TxURdmfBb_j8~UwEGsF-E$Zf1)@d?O5ulFS9b9^tKCliOm1@tWjBj zY`|WoDg)SP(PNNTgs&H%Bs!@28qtsZz0^60wn^!QCNd3GM67HJ;@r-OcR;tjSMuUs z*Nw5$qv`$W2$|Pn!qk`|1`DnYG>p>y%=MdAbXpH*fYKKg;w8iZ! zf&j=1Lw>)6#lfOH2iCF8h9oSLr~n2jP-!x0*>m{`ISEOL-6^HFg4G-hiiRk0JDk+2 zb}EpNIPlmGWH5ycBpVs{*n^pra`zEzq80#vb*z4vz&(bIXTw@&!j)jar_U~N*ksPs zh-{>O7bM-S@pWvWwlUhXbYB)*tm;=RK06T*vk}33xd6lm?U!oK?VANDkbZp|z%Nc; zo-^r@8ToEZnTLL<&dPbBnb|zaQ3l*yVI|PQS3BU4KcJdRStm~hI8(BEmf0!ik{?uq zoI4oh{Ba?sGqK;x;r(QA9--`7O8`f_7;K61nk3gzMif6LfdVIq>zlGyp9({Xd-y&= zGRf|MP~6f}Q!|x+&)I_gY*1_Qib`q8t%d&Z&Bo2u+-^Ovbc@#c>;75CnITTZISJEH z90^8XCh(Mo~i1Z#^c`AOV5`LhbtVRjt_ayfGdpsEVxm=MT z)cp?AI5GqN@DHp+6Jd}L=#oSnb2<@GQzSrur(tqI<+Qjvt%t@z*0w~g6I7N)nPOg- z5Vh0g;8!Uf)wguYLjFZ0(njv$sY7|GE)GbS5VKUnrCOYb6oZQ40J@;htfnF_(IC(i zWUUp)S;0-cih3t(;|Khx0&EejznfDv#g6FWh=nOlh6lGEXa%9PI(6e%56M(ml}9$x zQ!s0FTxSQPwt9UmM9C9hbR6Ru+ug1*ZNBg{UxFcTXqX5+WT@`CeOh-$8(~lgci~!C zD=}m?kQ>X!?+ZpIr<3E#Cac%m4g@Z|*OZ{9uN*zquu-$T4P$+^v&ZPC(&u~TEKR9u zAM5liM-$+{fu?(J>se<;${|x-%0Mu#i<{Yz&{M9ywHL$@V;K}KWDB&Y;ntp6R1Ft8 zQ0vxqUt&``$9v8;iaox-bjy$^(}}T&&zA%3D4rUYO?xk~s7Z#vWR2IAjNBhl)zTMX zsg;$|Ep8o5oN2b7B-{_m)kj3l|hbBCKyh3DPUV8oG*aH6hNf4vE1iVQqrT zg0)9I^#~Huvfp&vWQxEA^;|{tbqC0_YW9j3Z6|hOJF!EQ)v??~z7C3`gpwS!)?-o} zBmm=6;)`Cs!5}r7bLpWc{9+zjZk-j}E}`c^ID6dtZex-KgZL9Yc~#IP0qv1+1%US^ zkrMkO6LlV0g;D@T^1ufq!SC|1UK{9}=DzeXMbVsy5mgkExOeBMITM0YuKnZ^6Hpum zEa~vCXS1C=_q$_e9O&F;RS)1NgIuhG(g^T1VTo5?LL#R?${OgEstz)Pf6N57>_*Ac zJ?;#IGXB6uLh{5)?*uTFu=y%nvxU9K)0+^QgUM#tiCF za$4gi6+AJju2w-kr&YPlIw+kX6-<-$FNl-uArOpem!{{19w>GkL5Xi*L;8Ht2^~e| zshnPiI&`o_*t=S6iNE+ zJIyCKiWT3TGW*45X%04qRGres3#>~ozjElK&DtB33F$squt5{!0-K~p2mIk-NU*Pk z?0Lo*uICuZwAwt;d!&=&VoMjH*50q-x|Bn-!I_}nWaQDa+;A)K1aBv}vuV4L8wpsJ zFKj|2rC}w^3a)qL+ITudQHRdzRfwt6!*H`B2N%#m+`YFkVNg`3cEb_)P!wF(Krkw4 zXW`4p^pK8nah2~0_}HLr<{2F~R;>Jm-lQu88xC_yub@mY-p?+6sD+4`4 zSU(ew%WxP~3>Y?PPJba|&8vvX||i2+RxP?MWj(MsRk zvwu}|0Tl+G<8MqYQ3FvSCd@eal127zbpXM?Hks9y92PD)fdvu0wV}2SUSa>r@I4C! zW^h|4r!Us${cqzhSJ6IKNKxobMR;yON_0bha8z_bw1!1&*zH>LP3n>m)Vwy5&DBR? z5wWl>wdm(%8>F&&x;t)tr&F0IT?R>BQ+^1MQLf<9w3|r_RNft(X#kXET!YtP^gEaQ z8Ie9^#szL4%t_?V-fKj9M#N19T!DzM4#UCmV_qtf&ZBC|cSxspHpqtdZ?fJOd_s~g z;kT-y3O?ba^z^)Z(7!cyA1b7}z+R`?R(Z#ISeyTjo^)?V<&CYTtDJ?#xXMI~6>3s0 z556*_$*Q@)o17dTuYd;?Guptnb!&}>G~h}CMw-k)ABO?oD-TXcr0f#t`J|JK(VJ`v zxOYO?5ZrA|>a^ltj+hmYMqc8OEi0xbt9t8j52iyo^6NxzrG{T(1xvfooLAU#70Hsr z$Zw8MukqF%R149*K_{Bld;J|b6Q|^a`AkWIcAhxDOPXAJA%_vJrf+K1#?6O%qeI7W z!T0tn^4g$5d*KE*GYABHHT}kjTkYCw$f#LHY~wEN`0=hg!ukfu)#dh3-Dz;@f~TJH@#W?H za+1q|$h%#`yzdXEAxjSxJ1Gw{!)~J|A&t8j;1AyB51RW)6WdK!>#pR51FpC4%#fWq z?k{U6=JcCSmfks8-WxtFKOPkwH=Z5t8a*;4-Hk&&Z0s*PNrh5|r`$}v#^HRtjy~Fa z1pj%_)W^=HQJbgraAluXeg3a5nri!}Yx@Et8pPWqLA-~*Ky>gmelT3q2&Co>0!4Yl zN9@~Eqz_|aLFLWKIKj2x3<>!$1zf#lc63t6?uFT|!g^N!^s$p`L(+iEXNPKsMO%tQ zJ380dYNbV^ZDA{iBJqN0MDCu|^~GJ~j~2-d>bv=$BT6!sZpcXPrEfBkk6B%sk&hy7 zoKD1?!EAmPHe4<^4oer@JYIG==#0lyUb6}>+OAp)Nh^6_w@wqF zcY>AN{{6(6HDvn)><)x-1^n&}M@N7i0_7?}V_?h)v|4L{9m=#hT*)f^-7Ai^kP~eb zhwaxCHud7uyDW>TnARSqBm3tpU&TwtG0!2sa+!^Y)eAB15-z4ITL+kqn$(Nc>@s{6 zG8;=cC%o@CRPpgslniTaRxri8Jbodu)pR<;vvJu;dcSdX2Klq*E7%s{ZJ)Fbh4cFVwizsICR=hS_{(>{Ev#p_D&4N2g-o;inHy&x@F&?c289ie<# zgSQ5)tEJ4)65Kt%IVy&G-1>zZs@T5dnj^-Xu^yC^cRG+V!c4H!ugwh+3Kt30qzbl8 z8Tb&75pPH!=KH}Lq8K0aHBHRw*ohbqPbg~9r#xHlEHfdCW%e56E7vCfm1TYr-x44`KmOZAU zDJW!HuN>>9!fCV>L(zPU+}J{19sn_2taP3pf})J}v;C-Tt zuWyqpufeWDu3yreose7ia3UqA-$)XK>D$EOEtxGre*9v_iltnv-Bl`UjN5^e?@jIl zlA@LJk|30LH%!IDW&uHF1}Y|TJ-RT})EM<^y)mF}8D@6$ynz3E%-}k)jAY>=f#ys* zENrVW1fw|J3y@7=s%vL%RP=lO7y3$^rRJn1FXDpHyRhWyhw&CFQcOLA61O}fvx7th zU%&i(jsbczh|gsqTiX)J4PK_H=wfT-uoU+pVUjq*3YiLg2RgI*g6`0xP1`}KGn%&e z)cPb8?W~vih1XEt3HbYOWcQD6-&{qeXG}F_>mSUQ^LmUd^`1Zj239g)h0}@Q@2LXwsUT==-OId~^CSrhK-rf?U3fPB; z!SqHw4!Zb4M`#@9nl>SQNRNtLF$=;_7NUfssae|g>){oX@G~(nQl6iis;;godTeM~ zdOU|2HgmqdK799c=mRrUd(rw7_H1PTyP?n8;7>cfi3CViInLb4X|(-u^!9nVe8(V$Gt0St8IaSjFXhXZv=@T~}At%jJl6Mxy9g`S%Xu-5g- zshLC>6>x?gFP@pEJiI${9ezwX(mGqrNYT-Ax{Z+FxVnAkaycr%ako>U?)tD8!g2LN z^7s(vregXXq3iwOC|^n=W923GHl^$LR)OYgBB?tU>f^rgy)2H$mGR-`t1HIlyE}mt z=etgU6glmlvulANvy#YT`ADszTCp6vsm z{k%c|*64unQ#(=La9&nIJ|P+$QeGiGR(b#MK08sraM^^lQ|WTzG&gyj(Qwf@5*q_C zllTJWN`eq>OX6^5liswTzy#@#(`NhageYP#HdA#cA!T;5u?jZD#*QB0M7 z{Mlf&mp#x+%#t(!1K2!XZ7{?*M>kAvaGLL}xpW(Soe^ve$J=N(1JStp3XGU>7uT*TvR6@C7EfTtV%@%pqT%B7`H zGoqvX%Jq0+TcDp?v*YnzKzj(1^F?gX;m=1c+xL{v_PB9 zam6WBXY`uT^XgD-BO!yd0b*I24_Wr%eS%}^0sNtz@GeK@&W}VuW&i%B?%uCie9tD zQnjKr-8%GTlEDBHCB|N%N+MI~F*3@EN&|8J^jr(uajfKG4-G;#C7>8E2x4bzy*)Ecw&uD7B!$RHn0y- z3Ozy!KtiHTL(_o1KmYX3d9;)>R z5-=#x2$9(6*ysvE?~n7Fav>2v;0YN?%j<_B_6^fWNY8Ogs|a(}InjqrFvKuZi#K)L z?G|k8waM-me#0y(&(P6a%x$zEuLpX8$1N8T<;si^ab(mQXx_`tH!5|C`9n+d)m1gA zkJ3qOd3TD%Pm>!hrP>+Ma0J8NdyvXR=08_`ZEAQ~KxMSJ}ji;-E=a%>~A*C2wHP)}?qj+MsV*v0W{dSov@ zfn%hxL^*A5_J`XrW4>~jUWgBlA<`6ImFKFhjSZ&8at1F!b2&@;1nH7 zDphxD$BeUvg#)6FNP?L2l7tGypLA|II z--#MfggQ@Ft$Y`aje1KiuUJ^A2rQe^3hGn}ut9Ysdq8p|pHen1ozXAr)vD-(4TwXn zAk$TLYQ+kuLp>wgQYI^%F(~WTs_Eno;FBLj;Bt29)(Rj%6(NrXkk!oSSxm0QciIQQ zpmJ=dcajHyp+b_w1F))@v@9mpVmmDY)Xm%0GCPR_#!>aihLjR3YE3K_*SzIqc)x)% zl2ZUws%y1Id$TLV-X;_?I2-31vu^}cwhiChM0pbM>nSJ(9IpBN(ezWrESyA93)B@WzaV6nhUHz zH=}JW#>Km8$hQSQmyFe??bR(GL{7+K7&p$IA1IA8pcOf4Y%@+^X0S5ZnjNSCXjF{o zh7D>Z5R_9ON6(Q}08A^|bh8FY5+)gpjHl)bD~vqh*$WM)l{E_n84?Z|J{jB2v6k5z zo(*i(4LTD;dOz&11GC z(o5ym^PzxO#ja|*MI|ZD0&9!5MkNaY-Hzq` zDbPH2gKAdOzg^bH03IKkpP8N;zWfnR3hj`pOS7<@$>#(f35$@0%v^D&RIr#&v#ed) z=OerXb~5vVRqgS&mesU&e4ocQE1$RU3RuM~Dps{e=IyK5?c_f5L-X0*wbXZDpRtZu z5-phy&AV0$+nIgN;NN0Vu-qw7FssY8JFJ z_#DE2!m?#Ku$(#kwmiFDI<1+~?&1SB#)t*ajAhHTW?q?1#3e7_{@6e=qv%!%Zb&p|-nH!8tn5esZZgibfWktYXcKox(+w={XN|e0fKfrM zC$j{QeOK1>>z9rJQuM19HzZmyA6vF=ruP#7;K~*1RV?cEEC+!B{qiv;6!WUKwFi+^ z>81U^ntq%Z6be#R`MN^er#TZ-z$9g!%J&W2;il00stmQp*0USsv%r#m_LzGLNEP_L za0R9f%b88zeyNyl3JI0udR8m@qf_pD&eg1mkbY7C1CA9bPse$W+r1q) z(dmp70oM>v956Nrd)Uh*e&zv5w6(HU?Tl{1kijSSu68g!n>l*2n6NhS5LKK8wrnAI zR$aS+^Vm)5dF|kCZm&9=GuACfve0*%#5e(L(RO4PiiES$ibrK_+O^%ULc(w|Sos|T zPtf+MhYFZ2b2Vz)>_VV$kk|hvs#LRfs&_F)(LKdtQucISt(;o-rR=wQi>`=z58;8#ztg6rKT&D_Zq3 zh6z&!*_A(hJSAMvOBl9I!Dj#DY>~ty>kQum{hfKkYG&JaSSm%E zz1vy52T_t~*Q#&(%P>!h0DFjY@)hfa{nnYvlzXG%Y2BLDNdL>lHTp>7tS>Nkz~T$X}moKF*OFn9Mrv0t!2SVQk%jS!?7we+~< zKxe$*hYW;5dq>5Ps?7P_4F{AB%mJKLW(RxZV=76i8E3QG1E}MRONc^fMeGS=X4WS+ z(lBYMce?<^e8B*L^=bySlubjRh1nkPrj%I2swLX32=w;FCFBA03^o<+N@H7xn-FLO zSoTX%xL8ywHVyuk0XIPFJdqC!bOqSf%K^C9k0Oj3TrF*Ge4qhf(JxgZP2ZN1Y1p^; zyG4Og0?{EGq3y805w3)D--g2DV)L+gS;Oxlj?rf-3w{s9!Hq$2@IHs$#UA^(q11BW zh6&0DW(vLlZy1c#)QaNu3G_4A5%|JuyEoscR-{_i-9AdgfGVIQgR{Q2!|i{sBiu6) zYJj`WL+B|ATbDsaoJo>tCfFSMfD6VA;SCFojz#mKkl3=efQn8>vFBST0xmv}u2 zIyMcVj&Tp0mj&)PkFMtcEHj!p(TaIXm|F&D_XiH>ZZPqeiLgv4<}53sEr2}yyht|~ zvn(teB9%x2Y;)!n{{@q}h0s}CEAB1tfe1RHf!BaHWK*w&=ohc+NGEYS z5vf_UL_21DEf78efxNJOXc*LFT6P`2zF`RK_=LQoflrYuFVhZ=vavaKL=8rO__PQcptX>m{DbSvH%-(mzk$tAeJ#`DR)fyW+C+BMGF-ADx#NBta-QH_ra8r$Q-_ytKY{{YF~3ScEt-W;cupzbk`9rvj4&jTe&?`p(MElC zwEvaG03rg1CxCEGfD#GMWu=_EBLt8=L4&}d2*6wGCI+fdtZjdcz;iJymyXOTe|$%J zBQDp#a)&;TV%rnIwmSjcfO1K65S2|9lW)!mib_fr7Q+VJj~q=}D5;oNDy&(xphjR@ z_$VQ8qs>0@EyQ&2DRn0>7p&)_X$d5<1>Y2eA)$F z3_*t&w{(|AtaA3*;jLD?#w3(k_IZuc_C=@sSzq}fgMu-tb~8r-83FtXL8cgs^bY#x z6FXU>$_2!DJ((f=QC+4%Y#5I8H-_iqJ0;Yd+NoX5I-y+}LAV$^^xg)r(`aeb(%O+- z`ayVIf?(5VIn-*}?p?e=LKqSB+4^MDOZ*6xT3%hkL2?)c^i%qUleK9UGVA4CC_z&y zleO6vs_V&JU_mk%IrKyNiIYq@quys2Z|NuuRmN(wER@%SyHta~Fd*sYECts~yAXmh zFuLi2jS@$g@+{QX<=_yx;5!?q>@r5xMtDYKMz}_#Mufhptb6f8QPMX>UXbk&?oim} zo(Qg&bs^;;1xY6hA@ocU?~v>eD3ZmJ#p1=1#1b0%zv_#nh{Z+2M~UB&3Vmb%|*>s%(*3dgj$JP4O-D#QCsC(8Cq>y;ai6=@ZC`l z5D!of5DridkPhBD5!(^ik=x>{?IlrjN_zB#3`b!eKccmC1<_H%mfi z8MpS{gB?dNrsdYld|>cjkwDL)Rnl`G;!P4_8Mh(a7afhur{&+C51d5Lp;gNX5szvF zAKA?rSJ!hJ;!HZG64WaldXt3B^2x?_*LZX`zd}r}Vd(R0PKAPA+7Mn+CyR)6^d8wb zptyp&gc+}AQ(~s5LO?Hjh%9NA#mahXZ(uyGq=Iuhp>P6_RiUI8G^CVd!?JI~uwy(i zn|TJGV3WMxIAWi7#-H=GRV+!FrN-uHho0}{@>=l_TheX$1l;95)(CU1mEd;i5JJ*t z=6oyB?b;!zqzINAYw`WWuyU#^>1|rI-Nq^V)HBKLup#ZFk1P^4$-9ZpqEp64vXeIO*giwa9#xSOxG5yAP zKs94f+OJX5isSJW;uba-Gm(nPpfL!~E=2|+2d)@Hg}PS5d~_wS72sh7u?_bSEdmh( zJWPP4MK`5s)5+{5iWnu-7Io=Vi~tZ~i?PI6Gwf=OO=czteU88(bd`{J8cg{rMwlQ< z9cM^0xDnrKryGCQfna#Z*5nl*BL?IGj zspCv(+BYa;4ax^~s(Y~_>Ilz7w-U&x0Q%)U8qdI->slSl*$w|*IlwL9w3un^0$m-@a^AR1#D@Lu+@q^= z@|eKo!qK8+*%c+y`u{<8ZpP^TrPCsUO^ok#pq$JAl35d zdlR(u`jX9hseaV}k! z+z9ycF)mw{o@<_OOVeD7=b}%=)PazZO@{KhM8qWbiVSb$=$$qv_ZB5YpikywZviKZ zk|JUfk|LBT3%KNy<^Xko3-9KXloW*=Q(N5(2XN*KOoo9Hg%PR}fXml|M(?O$`T`e> zr}FhZn@ni3!BZJ|kA1f~vh0FQC%XNwBD_uadlBEJ`@INl)BRq=w&{K?_beYN2a07# zGi*(DZHwGl08*iGDKY4}x;h(tRc(QH3h#6`McAz{H&ApKixW~JqUKXI7YFL=0N}IC zHV+Qjn)D*BJQe4Rbicfqjj|?<|5jvThlm5mxLE2+nXog5N$$8cZFDZ9XZRwe~gLGATgazTnZzHC`cx<%OL89azQV?Ums^*MVp>T7>7Q4akj5B z**D}zW2mPG6p&gRU_rkyLYaWh;&p#tAdjqD&3saDx8Pk@2e=;G*PS*RW1?iNaJfG? zsK2m0E8pv>IqPKPy`etQN>aXEAGOTu-#OrgZi1PZ+y=F&1M}ackcoy)_Jkp*K`PdwXYl(Iid)3K=0lv@BMax_#|U06tl8nIve@ZlQ#p zGt2%sAZ6&rdMc-aJ?Y|q>{8uDf=MQxgize(^aC%z$t{z_-x-jTECFWsHLiB!#AcM= z+y~x?M-Vf*_-$At#%jJ`+3bkuuWvaePnFXcMU^Bi<%ao?(n3PE0{X>9?mZ~DJR_*W zt3uWnLPU}k;7)jw>n{y zL)E}8#o?3>2x=kJpmhWBC2uKeF#(}bH3l<65at7wTjuz?+ z54LPt^y*9rwl8S-ba?;zh_8l_Hd|$ku0DetgiUmysI@Zb{33z9%i`G7-hIXVchh;=3j2s8*K;hPfQ8kinbWeP z{Go4StEOg7c7qSI*r(BXgRT7=x+-&Qw^gdVIkWBgcbPb)M~RnO31+}vs0LJ^<_*su`ncc`xQfpcSSKZkZFur3YOQ_i}X@{>XU@6t%hr*mMm(K3rp|X;;|pgz6`2 z>|v|Mo%I00&i6KRLQm^IC#OMlYI=VVZ3BLPJo8L-u1tX+NwN+-O6Rd=T;%yaw)*DU zw1KK^05f^J;mi7r7&2GIQD?|k3C#^33(tjPB`9_SWbmSe5HR)Sl{;!i3g6`%ZjLLG z6%4a>I=gERj>DQtAydL!oaCN+u*Y_=$ZH&BV_|41Tp~sAniV$>_t)qvJ!GCtmm|U> zebD@m!m3F^=8L@jq%%z0DAg+0zR9(kbi4eE?w|J-YM=s~ZKMF2(9C*e}o+=&b zk3n^p_w?LHkHa@Q%&SHQeh{N&P&V!FKal^%(a2Rj)HP$Hh=dBEsf|r;5SKQGGlaq9 zA7v7(wF`HB>Q!(P%T#faR&YMnHxMkwGn~km#(}16!DdBcP!gu7#`e8lQ^(p}e8F3_ zF2d1o(H6gA(~JK|i~5!JnX6rpvFg_mNqjS+{&+9sQ2FapZhqA{2G0zPp{_nKLZ#|c zZZHKOXv|?JafPXu7o`Xod6*1JY9FyoD8#$t$MSKawwaCw*@uh2hF2dZ(!RFgN0_Uev#C7W_CtBrS2+m%ZOf8SQG7>|*#$ zT<@N|UviIKm4McB16AkVq^Cn!Ph?L8z~+>f>w`tAAn(5xmUHdy)RX3{fPDO*;^4>@ z4EZ3NDMD0Ik%hlDDbVj~1;-3%GHbo2-CXOSTBT^Evr6c1p!j&SG58`6Nm0DX` zTFzGw&SzYSb?d~fs;scGVVc)ZiN(YRMd;~$pW>jQwk-@iD2X;u1NrF*BCR#Ef$K$3 zXO{S=TitiCuz|3-!m)!#)S3uY_1=7uUcgfdU2h@Lh&Z$v#(3&wdR?vf>V$r2j6Th~ z+mwE8#4dY>*nX@b=J_EY#I`Z>AdsZDX)f@ZC6rc=A;#~+t%I-VnI46<5Pxp{!C@kQ z`0Rm5>z?~tD-2-@Z_C14X1EXbb;xe{XmP=2x{Q%N%zBHTDZl`^BwNeK}NlL{k9J4Lzb8@JnaHp z4;p0}Wp-s~Ob5w9W34V!_muC{w6%4#Oy6kB%MRkY2MC7C!#%<_drWP?Y_b>m1Ieb< zD`ksDP3kn9atwtXmVJS*ROKC;?DiwWbuG}w< zODOKkmy|DX=r4+H_6Hlsd{6Q}(Dr}VRRgD%4O%$R8;om?rz)!!+kR0u%bK4WWPyJM zwUtHoT@kItEpN-MYvV1fq{jKs2Kx_2MHC(;u-UI#5CH-L(*0C-3X8lnTWbE=CoZFxlA_cMV0O?S#M2H0B9+5Cv@4&D& z0}b7lS3pd(Znm!%p*%81j6yV|oIFc|Uc+@3>K3LQ3(iRFojWLW?Za&y*e?Qsexe2f zg8d|rU*7!x4bMLj$Y)CfYaoTOnYPg{@_>Gl#dH0GJf3fq`xX3I8$W`x{ylJOOHETN zV>3O5@QN&qhI6T|6;k?8X#aM z5HD+?|HNRJ{tg#)D{D<_;II06wvd10HUB#MXA@<|JHn%Rawy%W%KZ-zf59cErm(X9 zGYkF2W0h3G6~p^LKm#S?dxawcE)*1W)r1Ybio=rAl2=Qr%O}#;2JRa1$u;1HuEF!{ z4?FV+2;%H_JDUl=MC}p0%_58jLFvo^iMm3IkHtVsz>GtWPe@P{^~voa9TXEL_aBxB zpcE$W=@k_xv!#mlr;tkwQ;d{8(3pV=BO)IZg}V4A{u=Jv5Y%hf0&9jK5*n%yK~gC5 z#1~SaHkcg`6UdDCU_4+vBK^yUb$;OB?;#(q`kCfC2YocD(S>`9d`_q z&PRc_OuOUTT0V%wvfXkKym|x4^xF14hFhVv02xA&&SAgT21;$I9rU|*y=O~D>U2ip)r3tbnyRn=wJJtzXI^b$c_CI!1|v9P(AtHlX#&1T+)Ax&mVq* zV0rSzKkYkz_wpyCf9v7TI(?33uRxxW;9Dy%wVsYy`&21^M*;!K{1W>Yk@Sqsv^0&$ z>3(tVKLh>ueILQ^pwEV+4fY>S@3++8uSoKb%A)=Ws_m~r{g{RP0{d^f*MGUnA7N>K zf+c^Nn)9a${Ot^Xg#G`P^M3?p`3X4w?*ad4PyZ|aulDqO;;4QcbJ|bV<@8i}f5(5e zCwTC`b>jaY|JOc*>Ng|%Uzz;-9EbULCeKE8{P&prqL5!*_&+$-$0tDmGe@^cx;jrC>653S|8IfhS9LtIhz{Jw(|uBf!BhR214)2FE!e~m>$i+VKU6N^v(Ll%}U|4aTwx8na`f$#c{S^Q;L1XD#ZK8Jm*7a$-UKUwF-KV@NM@K;5l@V;jM zkp~1M)$)0u;kSI^*+rVa{4-Igi`r-dO@Z`O>QoduX4-$1(9f%U{YZ$X>%U0ouN%wH zvv_|bbUyXpB=nc<=I6Wvc04bEluDeEmphck#bR=$8p#e~r-3qh~)7 zq67u~M-lf&qqa79x`zDEgq}0JpGRJPB;*P94+#A<4)et1f9(?gecH$KlfOs3{0B^a z8~}MD@_$SXSbrz-oEjj4|1BcFjV=6#?BHL?$ v$185a-=*-}LvFqL4?KRj!hgd0x0mGLPl+f9$or??rPm-J{7_H%0LcFXtQv6k literal 0 HcmV?d00001 diff --git a/core/src/test/resources/indices/bwc/repo-2.0.2.zip b/core/src/test/resources/indices/bwc/repo-2.0.2.zip new file mode 100644 index 0000000000000000000000000000000000000000..696ffd939d59093e61da55a96898e4431639daf0 GIT binary patch literal 81369 zcmd43byQrHs3F^;ZUqa%?e<=Uiz(Wy3xd5FUSv1t& zLc#i;xM=>l+`W*Ypy6(zp`hS!em|)EKR#}&8xw*5d-TtR^j{yhb}$Ee{VN;36s`^? zPOestZmtykdK7LzS2rUz3IqTDwhj4bZ>^SdYT`y&0f2#5=0Z3PYPzC%&ISnB$P^pDK` zw>#1N-%;j%C%ppZZ@9zuZ{0af2IXdvs3vC+Ch7k62b`9q3+e6c{h~ypV?`OW5Z_0n zR?x-K)ZgYO3sSpQRY2WFJq=z-Uk?!v8r8 zMm)*4Iu1lQu;ZHqA{~q$?XxGE8#xa85e-{+j{})ThP&5dAKKZ}emW>pj@V%wl(4B? zm@0~o44leA*uSl3AtZI`BZbb>SN3Q=S|=B0CV_IjI1?Oq zS;0j)Ry=SDZOVX#LV!I>3_E_eEzVcA!Q0gs5jhPUnGi2{AvUY56`R#v9 z6{jaP3z)x56|)#M6}{h61sm>fOcnNjHC5a}GP3=~P*T_m1IGmnAxH+1kvO>J;>~dj zCD+M{;DPWkLMIiR`gPwoU6Jn+av*V}kYc%ftnS?mIBXO@pBA`5Gbprm zi>sM&Z&zGA;$P1ratn2sR%n;uIiguF&~uA%n6YnHXg%Uy&lhlu_%W^YH0heI5YUr5 z%<6H+;BV+ z_+!og=h5lY_R~y1sP(;2cemHW*3I)t{J}ZP`L@gR7f+9;=Wjbb=K+D~9M-l8NlnS<$2_jz7K;NAe>H5sRk47=8QWXho;u zj5U7k7{%_R9Tsz$+&74rD0@4`Pxp!0x6$M}kFcrwJB8<{W}@#K-km{K$j=R4HrBF{1XSylx~|G`g*>qVuXg3`9ef~1Y~&f;S$ktJrP z+>IhfI_$2$<w~cQS`OZj~Gr-~s<=wJI%s3hrEp1t81UDt#ZXlL< zxDtzqs?G7dl&4=E+@x%Bvfbt>({N0B_M35T>SWp5XRiB_X%M@nQ`cv}k?kTIt zEahbI-&Tmu#r9jgEdr{>w7>nTPLintrimz4IF|P;twnuwT;dvo!i_uN%70hy4x|$L>sbj7^oSpTdqVh6>yfr zNUD^RVe-R@z9}S)UE%kAkyG14{&7mUd#UOezT-p=+1V-$I@=$Ct>#5Vt*c>mV4$Z>SrF1uqX0IrOWRyFdWK>gM6>ffLvR6J- zJ|P{{FA5v#y*($=aOoY@Rz8r$IShQmNz-E-6k^cfXA1p4_oflbN43^JvHT4w{w)yZ z{yH0l;c!TBrX&k_pAbAe1xG;#} z+Bn3M3`dSz+V^c(|1nxhXWx5tSFz4uLs=apiF5@*hWF7FMhQkJDJ(&w9}A8mbVog~ z#5j=hhSSW&ZtK5?jaCL_p*JV*u*@sls*7KP$osPJlpl_)K8Z2!38LqN+t94!1E4j= zE?Bs(>HjjdJQ?oWAn;I7E9ieC96bLj91~gwk$RFiIbZ=92M=tX$GIPYJQl^)pKc3{@jmq#Cfo|OHk-JqzDx^d z7PAns$a{c%r@sKf^H!};mN%EnY=dlKy;HR+&wJF5vYHR-EjAi9)5Wy;g~kg?spE<_ zp3-u$gL#Fzl{jOiK_pQ_l(iANAt~=QO-GW0E#A2n9}^R*B=gPJ647O-q7>QOUm)3V zV35w_p9dj~mlaHrbH&jfD6i%sqSAcM58h>)#}@wd=>``EU#85JRaH;aSM3NFN5d?_ zX%$I~^0Fy7+W=`P{S0ia7jU_e@S;;TpBlGj6Tvvc`N-IB3=+;b++xp43 ztQ&%eoQ}|%xLbF59Q~WVQh6edwAd=7y|aJj>ClKlCoVA@$+Wv7*+`((^WBS$G7&F` zK_RplNTohyhi5pjDU9Jx9B`TVq;V1xGeAvTxa?Ce;vrI(<-c5|9;2xfV$U6Rj@d}~ zxuN-{WQWkFQMwS$H_XZ>*XMgJ!s?l$9_n-4;ho^HO$$HYi01qw8RN0LOEJ2P00vx} zs>}v7B>0qK$6~B)&SIelR%Kt2t5S=?sA?=7Rc?bZv5mbX!W5k%u4sqTAeAqP_ttp@ za^#>G!I!;z16S%j3gInIZ*L$x$Gu9Fo+Mxy*ieWzH~;PxrX=I!lRmc$j37NY9~vUz zKUdE0jSV|QDCaPBufHEJ!KpFq;O;+N;Zte2VgabjyF$WNP1^NY)28wG)_^M93K=+^kQnnXE88nXDm}nXF>(GTCZ)Ol}Mu`tWV4@(BibF5cWDy^y}rptuj& z7an@9FD{)zEZo)AZ0q$1HSx{fT(CKhp?uCpW(A*}0QO3NS-!_TmX?aktv`NIj^SK4 zKl0YURLi01ebtofC}G(d0vKb(gS;}nx)UybG?P$?EZBjQDdQ~f^<|e#BGljts!&u# zlCh>r86;c?lG8Df<}#MGtI45i7mA+wg)U0uyOuj1HDnS`$}0kA0!NLGQ-RW-h_zk+ z==by8s*=qeYa;g`H(9o$i2!4?%hz&|Q=5F}##PU{$Cod@dza>CX_D8%ED5`5d7}gD zZDFfZTT%mzK{O(|P}#6tG$Q&?l0jjy-nK@~&ru@n1;e48!v~i;^qnGfDVl5x-Ko0$ zUY*$jNc~+qp3Yyx?>?PxKi6D7iUaI;wLcG7rNcVL!Y*(228M;=v6bv$LMNek4Z&)A z3|A>gr-|c45IYYyiGLRJBE*WW`xx>8t#OFSm)J7?A)w5g#MX`qjxiU4DemEZti^j_ zZ)A1%9{(qUsho>RF0$vB3x{{49$PZ+gtq#;`3B?8+-qQMg3zkBdIHk3}(HDj!V`<{fr@&Xw@M#}MG|q<|@7@&pm7D#gpEvl`qzj@Z9q zE;|SQ7&30pgw`1xcU_Jj-nGNn;PI)=$P3d)fsSa)YR%P04+oxl@ILwpR`0sx_FHK$ zY}BLgqCp(&HB_yIfVl$A&-Z5nb9eVfaGpX$?zOLyTM;9yw_g4YJ;huYer3$_8#Bwz zww|6!jz+J&n>pmUZ=5?`jc~?k$)k*;c;CMm*~KLO$PPF}&++UrGU&Q_2>hS>6u-*f*cf>K)y6QN`6hv6F(s$xM|UHDA+Njj_nnWB@Ba4_cN;H=%HMFivYVCtj+VdY;$O|3 zKeaEQXP>{koOuR5IyBr=&(@AsB5^qOEWY%|Z#}o0lP*-PFwYf??dBjs!$GRL~G$RY9lY6G288w|Deh0j?QjBbn;G~t*Vb2(o z)bZxjifo9ss=)!K%;-~(R;;Ab=+Z57QpTuWJupB|q$F_f&ghO524}4*r=^B9mGWa=Dia1l8xNF0i&z_0(xv z%cy)y8>NWG)^!_}D3_Knry7-@prFLWj*5(x(5Iqc$9kJ6y+(3= zizO+h!-SuYn@EKT)bA{+d1PZ6NA>pMiWLaIF*_P4SK99soSI*6pK_xjpHO9RF@mPY@milpBO*nXmrh!?n-Yqt$yi|DxKV|2kIxYuTRYVDWRSN9^cqAL|B1 z!594)Gz~+BQA+mFF$r%8DR24YkLJlkl7CdOKPv4%)HXUL11<7DPciZ4l8#fc&S|mb_HkPntqqro@|@QtuBE(3G~J&{BySr*%rOmE6GR88T(*YEb}Fr! zlGZy}=9S#2-6|WbJesOJWp)}obj;RSYPgKUHVb*Y9i5UY)eSW8COR@PA({i>%{$D2 zO!PJL1>)+d(W8;EqwFdKi7EuxDVPK)&VN)e#vfJYkE#P;pk!xYlmJRd0gKDzx74FX zBexk8KHF%NmqjaAj!I~R>EdE*VA^Qtt4o$?yj8)?*QA#GJ*WYwpqQ^f`qS5lu0U+A zkX4t&X;nTRMv7glSB&0oxmu8z-O!y~)QmbDPdtZZP_;;6RLE;_TTp%h*psO&q%jOM zsZn*g)UcctM16f4iM@v;ji%n7By&4x&bK%}^Pgq#j6JHZe)2xc;AE6gXg} zt_V=n0cC9x;oiEeaM>h3S^~qH$5wYEjI7gGP>TXmIg?L$Ms@MDQn(qc7V?13F~26R z$7CBi7`Ll)dtSRAzD1k`zCu>dj~)GQlzM!%I&UBEHtIKiMR*F|Uv&NK!R@j7{^Ea^ z{j=wF;PHI430(2F{yXb2wOAYc7-ir=6!EHoqi(3jg<`MqkF1Px5g&1dQ|znebd z48o?NRjA2aWA#qUq-m9>(UY%FH)fkLO_EfKF~sX)4v={t(~bAc^h*z=4&RDx*tBi{ zBW7ryhnqoGpPOO1(A+mETZWr@Io_Nx$v~Q$Y}q$C?8d=*b+*1(M=m!Nk9l2(u<^#+ zdPr8ay^hoPsB6}vct^G|MTn7Ev%Qk@+b1Kh>0GUz2gaOqL9U0TUGRu2NJ-u>y_54{ zewPDebUj)WD)})+5igs$UE8hs&|+r2AY#B5r5WFXZOt@nKsrVRuaJ3AJGL3$^7%{Q zjr39cEMYODta!2^oxL)D9;fh;@9b4F(vjQjZc&enEvSYDRlx_`#CBvn+nFp&A5iW~ z?_KUe?^frpdGDstSPHmp@F1fqduy}(0Kb!_~P|;PNqEIT9{*F zJAFVi#vX5#*+cuZiPy?`c)M=EG2Mo3!nAz=B?gSw$Yh~6UY~BuHnr`)ug6rUZB;*P zT0ejlQ;avpl&V*iuts0iqz$ZJF^wAl#H{1_GX2yguIIH`z`*TPd$vGZ$Ni~u+RAHO z|IIXez$XS3kBFID$FAjGphY{j6>Q8gW05prp1Hy8Z})5ErtyLK*&bmDrHR0tV`*P4 zW{SyB$F1(jY7vK^sT8DOnF-)Fuy0>>YdtcVn=471aE|%z+_zmcKp!)QXUR0Fo7Gs> z!t3Y^**w)hX7wN@|V%jxYzPOcbQJSAqK7B7p-?_Lg^X|u$3QY*0y1op1I`*{=Z zdCgsy_9G`uGpV`d>{FMST6IhsXKTnA61vFJUMCqc7r1ThzO9_rEn7E^S+q`&W_EGE zhWc|q+5?ud8p~Vw92+Msx+V}Z@AkZTfk-V(6J(LRv)@cEfu_wDywsQP3~SaR3e z@$>38wVAsv??+E)WCreLOt@zfar4+aEU&e+QQn%lPVIM32w(Q@=TA^(4sk2l<*fMG zU61d#HK~<;D%)H?<~g}C0-v%Dm{jz0Mo6N?kl^u;yQd$pY74o9lcR}~eBwj##NT3_ zHA(KbieN)C1B%kcy5gU+LQD$!{UYGe=144f$KBHR<`GGTTNXu4TKb72e9`Vm6nV4U zc=w!EwqpBrB0eJ>5R>u>y76v0O>ZUin?z8eeI)rTgGxv2S?0le?j-sxte-k!3{8u; zhF{WR-^ zj2-h_geA*|FkQx%X+^fMgKyzu*4FXnv^N$FvcEntn-ptY-Q? zdoM6=j4TZT5ZC|Ij^auH@OC07WSs>jkGp}-*oXE$j~S$$0R;3t+G`7m2N_OXmd_f7 zfVE>nY0rRGeTNQWCqB9pv8{wLlQc@eM}yC8`mSw1T>AHZzmU`6LP|5J`J(R*K5ys+ z=8sXN4FKfyQ%5sYwf7s=fZ=1ZX&Qh^gKuA|+;t8bx)DambEA-m^ts#ZT$WE827r}g z+-W-iT7AXQZ7tqCmzBNPF`YD5zyjdF0PscM?Ll5vJil;=GHnQ;q@UC7cX~Cx*EYtO zrnHtp5h0|C24FD|azEO1p4#gkgH4OL;`3&@JFJ`@DQx`sF206zU9jEv=Hl>ZW~{LF zqj0zoy8;5~^9KYklpFkAwk5O3A?0)TWbwdo6Vm<?NkiuG^rUJZO1$OMi0Q`fv^VRg%@T)Hz0`^{=##>PIX2T3w>~yXic=lkoejOxb{f^Vx^^n)l3kaHnbr7(YR(C)na+cWo0*{E6P~ZfUE2&Ma-n zA^roY?}uOaH@9uuY#+BOhPdKSNO=SueAceMoH-Be)Fj9I6(D8%x8J+nww+)0?&RM< z#}APz3FdUCx}=w0)DLmQAE@35b@}-``fXqK?Gz7{l3wEqfe<$5CQ3V$&~vp&N&NVK zxm|&e8bwS}r|fe1gw*^?9^~)9`I^;1iB1%WkE>TxcO!e}o5=)H~)LfqRr&f_;uP%djcwTos{0|CV2NpwcAZ zeq+C7=@cM$UC7t(=daW2<#X5GT6&lCBB$wt1f}LF?_6Y|cm99}{hvDfjcb+-eMq?} zLY01+k5!jCCykSqT~i3TaY@wS3PPFwE%$3zd`GUM2TfB1xs5^xPm3yOYPb%M#6S7A zUFQ$dr<`-Kg?RiNWI+jg%nE%68B^{$@X(>KpWytCufg--N)bUvS978wr9qR>1~Bai zZpeqE(=2&`y+J_)4bbp6XouL-Je!}V403&ERs2Zr1*=3|=uAucD07E|l>Bmj`JG;m zAGC2I=DrhR@wcGAK|CawW;8bL%?o;fMux#dU_#O+X4UhU3o3>lgGqf;g{VzX$6)N< zn-SD3s*u>P`cTfY>DK!*hy+>y)&u?&Z3(-M$2hk$Fx(7!2v!j;3k9E)mBQG(w=8G| z+HCg-XAmJ48UH;ixv^<)QP2pq8cYd-JhI_OzAEK(03)}tMK99>E9^epIBNPkCqSOA zkbF1?v?c7%H^hj%_)c`2PQ8gih0rIvg__#$>&T7GdP{;Pp!Hx{5bTh>aHq-5o9E@E zOA{u^4a6J}){wq1yJI@>Y#J)N?kdM~A>F}}4%?u6<6qEiI`@KtN}=Ds5d;l^jEcqE z!`;x22wa3V#d-&Vb(w9f-!{aOs2uQ(1}az*DOCtC?+2h z(+p!m)4^hAH1`29ezrHCr1{? z3%@}-!k*sVnSf1BE3O!xg~o^N!fg(E`hlU!9FEbzZl0pE??X7knwD4I zh4L1-&(+&0d>zRb=O@Fdn$>VXIP4WiULSdQ|CP*Wt#iWwJ+T&VzJ$=t>mi3x(U1Rx zw#$*A1~$g5RDY>0?4ul{MVuPOHRUhm7atr(lbv9qQPBer@ma&pi@m=LXO-T z>*%b#7dgtAa>T%)Zc#C=3a)1V2&@@3VpjrokHVycGsLLlRi3IX*Sd%RqefeGDjMX0 zZKFsjDGZhBnw3>5I+ZTU)tIR2LNkH=p4U)jhQSOv8hSyXQ26T1O z3PIJjYL}0DHKQL=9vR{_y8E$HY#A0c9Lml}4Wq0v`vD|35)-wnl7fNW$`OT)b5OJkW(X49_Auf(Elk=z5>B15%vgl5@l%AoDUxl;W zPRk%kTnFJD=VvS81>TaG&pU*)oQgJCv%J;LQajOu8gaISi(H$Z1{LC_I1Ob;5c!dE ztOOVCwRdz-u|$a<>Lqrn2KnM_;;sqbaRO}gXTLBtGbS+WO-oK|I2r8(4~oZqB#h@O zLAF3NKSCTtra`1ZqIvrZ837Ri3E?e5Oa=uE1OuWBVTCwB5FyeKI*1hnAEF&RDGyUNZzzH1;h%uxx#5Uw4geW94 zL@xvsq82g-6T=bG7~&dY1M!ByLnI-Z5N?P^@B|t}5dwfXKu95m5Ftna1R5e5+)oZM zfnY*ZAZ!q4$Xkd#gal%s;wq8QyO*<>xLMES0)d5yL-Zkn5I+b^@Q@gU0%8imf~Z2+ zAubRkh%AI5c*+Js2+@V`Lwq0z5Ge=^!~&8^NEs3Y%@6C2aD?)Wpprw;vS|`O`;x!Q zP0Z)yX6C4JPN}qKQVlW`Vi|G}q6F9MMeap^uYLjEOvwwK#K_L)?|1uj@;iYxb1J1E zMaygezkz%Ek=x=?{+ya+m?9pfyVMcWyiQT0wq?|$RCY1{m|NFw{u6iZ!|?@hDTk3| z@FZq-A^(uuOY{yut()R;)+}GOtJFc&Bwscozm|K&ml9WlgHMwR*|hu%{0HuUL;d-- zqD34`zo@Z>I(O1T!TGjgSG|MJlLpzo{CDmfn~9TV*%SPFZY{@lbIUc2;+Cb8fNYjV zJ!)$;4M|ZSC;(5LzS8dCnOaAChMifqb^9%4KV>TKB2`WE<8~45T0eCX^QvU!u z`yD@vyHJWM+o8?$L|(^f41!T{Da1IG9HAfe6Cn!6A#i#%FJ9Crw034HuLpt^T8uD; zl1fy?s%_2s-8lGnN|h%Ub*$=!jq4$Zp@|5EC{jeRZ1}dUlbesRJn6=)GsZ~}rqE>s zSCoCiaSmKH{iY5`4B;0FoCc+|04bdqNud&ngW&*?{ zloH`1YCtbr=neuciXveayS8QBhH)rFDpUdCHE{w#52c0Bj$_H8uHU#60tj74@HLdf ze4_nHc*^kw=mgm;fKZ2yA(Wy{;%9N)lOLc|piUC4v3@aiTFA?l2?RmYh|32^ID&*| z{1p%~P}m9VX4i`%WRyULG{XBW5R%XiggcZUM1rhdRu{vY^$-vtRR~f*pBlY?s3pP_ zs!(Ep@$5)Zqs+VCZzqI{Ge?j^(-3lH7g@c4(-8BX5krtaP2hww$b_a}K`K3#lK{xR zVjer94RYq%7v^|I-BF-Px8q(mZCEhR9Pt2=&_Uvw>~dr_8%YU_jwC zG4gnsodZ~)Vwy4e)XXYY9qWcs^X3skP}KejT}Uq5hZ4yc@FPQD~_oE2=%K4#uB zGNohNFlpX3f&hx6$&@DruW__FUiI$26XfIrx~%NRj_80~X%=V>6aeY^oZgvoG--Q0 z=%7lPMR|wJHMTZem)_mHWz%@wgWs>VcmN7HMGPvaJqg?1P=8q+VAO&qJM=tNwwDKX%p|LP{4{R%osQjP925= zQ;&_n8PXuKi&o1k3t*cALr&$DX|Z?BL)PS#d9Zh%+3r^kL<20Sg&S29bm)hxa4a;D zI2a|tDFx%QUB6{Y3n}EZYOQaLzwN=q(9m$bj*drwwx`a+Su?xN>6_9C4(=`w(V??g94*W0hZx&Mk-bEyv*iIAwcWHuSe3fH&iNek~t}_RA~&Qwrr3 z|0z52ivJXlyy8DahNhGXq?R;7{~%aOSLuu)2gXio##+}ddd$UZjzmq^|TV; z(KRMKR+(v#Y$RN3sNxV{cox$B5Dq^u;C_XS9CK~+>egbyHE}x4*(g3Yj;6rjIl}~+ z`=OsAhL`UNmS_I(VmM7A^Q(IDa2seu+M_v>D@Nz4VN3Br=dzY9p0lCoZFO ze4uu`fy(@>DAyw=P)kl&R66xe&C|YUgmadd;HZ%32%F%D8KqFuN46Bd_=rB++K=Uf ze}L%m*H7T?NqHm7l3I?rMexzd_>H^8FMbYpgJ)s;yhItC_ja$e3QeDzqI!;LuV$;M zS=LRjW(}xV*5R&Z(K6CfeR4E1av0|37H8*L=jQHa=i+DQre^1A=jP65)sDUQmye!L zZvs4ggax{Cvp1#`&n-*u8s{GW_u<=}SS)V`A0@4#(wlEJX*sH%%2%aNoRs6q7a8mK z&$+DMLZ;x`zDMG`TP>H^D;hzX+s#qgk~A;?t?Pk0Y#JYTt-y!N@}N5*CBX*0 z4p5M(pHRNR{!mnAjlIyDWIp>+w2gQ zqzbI%)zs$L6w+l|6yI^X+ca>=D716jTH9FZ7R%(yfik$QS4w1zqAfVj7Tu&l-R#-3 zZklub84batb8d>*4TioyC_2RbXnQ^=w4z>Y&E19_JI@&Oo8gvfE6% z5SBbz4NSaov{Lx8nSQ*GhDYph^Hx_5eLA%1vy^r0w>e&gX)1-ScR!?7Br>g$?ThW3 zvOT61ZDu*o3f4KreXUr|3ZFUkCQ%5h78$x0evKaYieRCjQFF}cmBe$=dgQXrp&OCg zt8}`_7|nTP^&Z0T{PH>q|6nE2kTJ$aDl*Lyw}B{q402B$&G zQd))1&3d1kuOQlY-iS+FsjunF3KD?Br^0X=jvAHHgO-hl@91Gumk&z&`US*nHiaI# z5+qLU@0d#*M%9Al8#cWn^shDpM1|m~#J403H`|@fQ$J{BbQ9owIrmB*GU`?wpi3<7 zt)=QD98Ie1EWnl&#V{*MpioVylcr`6AI4GDB)4GMtrDLOkZy-70EAbG>n=^-gx?Sk zvB1TIhETfYT#P$>L~_5y@*XI|cOku8lERQ2O%rtMvMUNyh!l8BG#HGiKBG~7=m8@K zr%2@&qc^MRXjr0d5<=CWua^2gh80Oxb|j%P1${k0Y#N$hI^vL(B)`xt_Ve6Zewoq& zR+j5vRs#TW{#CQI)7B@!^D7s>JrV|fNghp@w@PIV>qcmWtaSZbCX42;6K&3k&m6e> z15VL`jC{5_7U3Mi41 z*}6oOApx7j`X^izT?erCFgm0GR$$Ap`|CLb)_jNj2bc)+6hj0>;Axy4oeA{}6WX@< zjMyGe*LxrR&2!bbk{U~Z?Dy)bh_^~!jyS$ibl%VPO^n?wESIx+6lPw9kt~DLQB{#? zoxK%s(U0L+E2>618t&*h6h!gttA5&*w@G#V{i~l+DQue?GITsjxTa3Ez-L$iGspac z^Y2}6=2vu+`&y!n;*v>LKB(eF8=lhZWl$&Hcd1w{s0ZS#=pL}wB9vEE;^d2vlY6Z9gZ@y_R_VD zXO_z!g`XGMzM2mm4LIuCj#b~MtiS4fWE65vbF!>dx}JXYXL-7~BUr5wyt+5~pEQz8 zkLJt2O}FIxS2U7;aBKfQ-STsArZ8!V;D!iGF;m(f&gBgc&O_2TOeAX|Qmnb`CQP46b(jqj^xISs_$e{?y8^!V$5<_Z zGC#6mZ{?k32XXkqAeuc<_+6_TQBe z%r)g5u)mq7tbd1nYVvQxar&R^Q;e*PoXiZZKV06a|Aoq17-Wi4B{B||1UC~)EdMY- z+#NN7^a4S`T+@jHM)?5Cz3Cq$#a<+%M)=B+CLbh!BakR+@PoIY@=<{)Swz3`l-Ly6 z#&{6R8#^BPtOfNc>8XSdJ=!ewbbO)JeSJ(D<6+gjJxz3cDO}8@O}wGVbZ<1q9jx*! zt;#Hib&|lDy821!dcC$*rZ&cQ4p!C{#^x5o(3<{# z722Ku--XuBYVuELi~rw+Hss+_py)4@>4jcH!KUBT^ElMMK?O7WSCO62Qm`ZX!x1J~ z?Pv+T-bq3@7^1@6E7d+p|GE)L=OC>riP5ft?Sfe0eCBRjfB9*h3&|$2fC9G}7t2Oe zT9TpS%igCjA6pG)1m_|c!hB*3ELC(o>9RD|#*Rh<+6|gC>1^}6E-=se#BcF!%(<65 z``n|!LeB-1@Dtb5SvYRK`c40Znf>uImA|Lo?A71tHD-@zs_Tk0oviV3=~S(xgmDjC zQIHx3E-;z=Habc;6Vfq{K~-oRE4azybv|$s2wlp&&c~vfNix2_n$c}~4>;GT&RPAH zRPpl{#JTEJs<9gla}yfLU*Lh@o))@v3O#3r)7^-l>_?!YeP4S3` zPGzo23gE)=KpH@u1l>Ph+1w~0m_CUbur`_2JUd;Nj=ME4=(Rq7!tUkHNf~O^hDqR7 z3S>>_hA$|u;zY+<_I6UIsQs~}NA?Y>&i@`7kw-)<8KNwe6fBPlGzR5%w>@6Fw=a{n zEYnxtD5_L54rZC+jS0OVH7>)m1(`tQ*_ZNZQpb`2eoViO$Z*=hr8B!88b2e#8_RNuUW z)X*U#@kw{n&h8LY3{GU}lFhLO5w}KVHG3e6Y5Bp4^?$_ZKBA1gx0c}XgCjxrhr48^ z86I7Txv;(e@bva>+N5i+!KcJHG$||0q@jv4+sOy&DYT=0U1~FZyW_(xD zC$eux1WzUza^Ys*H*1hzC;KM*k)m#iuzp3}l5RQ!Yx{n*)Own3T#xY0k-PbMiw=dj z)lJ%D){E@A1Gy^Fz|1@^EVmh18*)#!>f#&wO1i2S@j)&t9ZQ8jZkCUesUSA&hzFT) zBCjgUm69IwprMGRI5GKyd_wD!fQwHSteX)Wv*j~vWz15ho62J8G`~hw)++8=MT-Nl z$tIoWl{5|Ipbj~mYm0M-H<;2$9iuQtIo(~j1|MWG&T7V<4hMylR&_29_az9*aua5o ztQ(Gxw+@w3#)B3{YndDPkj zdjSjD_{^dy3OCn$Ao$BXWF=&<#s_-n!&6v<@X=P9K`$BuvPdLAYK@@FMlxYz^Xu4$ z$*p1OUAPOTZF?h#4$ns9L7^2WMvARL5tNJz=SJ-Yw+ih^@sWF$Y$lXb^F|bZ9R>7K z69i-xi$hGDE`u|(qFni$DQ3z?k0FLGkOA-PON4JfFv5)i4*=R@w(lr>nJ;|rqVS%_ zA^i4{ZA&KOQ9^zyRF?Gj+q+f5kGaZr=JvIAVTj8;{&wGa2zoFdFR2}SevZ^ez5Zn_ z{E5U(((=1}MBwifE}Q?WweSy~nN<0Yja&6k0Z#j0s+F<9@^b{eL3^CAusdR8q1k*w z^eLDM2D}kU)-E=N73GjXI;f+Ec!XXA=;qRV+jue=U;g%=(n(F0dA9KsGQKqJKk-w{)l!)*XjB)%EF? zlv(CmVmMm#ropxl(M;M@SQc3h=NMI`X(wkvG)+wK94-1Zvy60;vqm#g)pWBU@X4VJ z7CfBO9GvqUyG$eF3d{hRapp}fPBt!9Zk{Gi4pw$Hphb+5>>}f&`5ToH>vHAxbQRJc zTS-p^^Ej>J^I6HJtxpAOU$MH@o|4r8w2PYw_nS~!T4}I^AR|^02!z^V{hPJV2?v9r7AM10N34Zti zO}Q}hng4o^@J5y*cP-W=9nET1%qJ87S9SUPQCN9h@m6<(u6{NIHstbz+x)Lo#?|8F zl;rh4CAG)Q%ah~?f&oD}s9K>-KSNkNdbveVCwJ3PFHZeN8*`G|!kO}+Qq5H}-0i0v z(vs6<`=ZO+GIG#pbfvK&vZfQNx`;4gP`clQEM?6|tMfeG{5UPhK~<^MYwXlR=u7{G zeS@d8C7SnqC@g7an9-4vtL1W;sH|g?vrRXiaID`%_Hc}RS5^0NCpzL5%P)jn<`TB6 z4GW;FB)(afo9a5ei#OyZ6^gYZ>ERX*{L3mZc`7RKw@)>e|0+5E4^K6p{&}jQ76$)5 z)sW#7qD1@2SOC=5ki{CAO@5ziLXjGJLTx^aapBNRZ9y+|LQPD=MDiXWmvth=80wO$ zWAmVgS&Secl_ZPsKJ6)|RFgrF5I=6B&;P>UJW(U-3xQUCfJ?|U2_ec0`9_z4bQP0- zRr~ui{~LOXuMhPqx6=Nu7yF-wJoocmXX)PsWYU;_10Y)ct8j3o%AEE`>I^grIB!!; zZu35NpMS=t_0Xi9{IMjH#XD(XNl+eAfjEO%P==_kIf27w2jo;v5swie#>Z=xR>p1q z5UVVJ`=*5CYb0z03`2oPu^9E&SE%zRncNM5&hIB(cP8<^563$x@tvx*u>rgFJ^uF# zS^;)$kB17k1uO`ppIhB`?*eZ!djfR*=vYEt1L?IkjunNT2lZZ`Gcp5kB5U>7QTo|SHe-Ku&XD{G&(BZPEh0(WoUzs&Y1A-|uiGkR&e zTK)cT^7_(p@USxEQClvU`mHo@%QIllQuyJ={&}F)#gE*kjVHz3`^npum#a6zm*Kfu z-4}C#m&>(VwH5)#U7p><_uCP#x0%xiFF!*)U&*cFx^m54cCgLv&su9%;9p9OZ;S#i z23c&?Y6Ib)I!{7hpZH(bm+piQ{Ll2x{SJ{G9bb}I{eMnq18;`DKh9$&24wl{P#Qfv z4Sl$IWPW;n^}fsY6o&PGM%V0l_cDHd+!c7+wdK2X9=Poouw$v1eW=xQ3446b{evao zxb^e4uYann;DwM&%m*)?MpCCqDOCIBUHr>fqhZBYRD z&@h@tdJ6z$woijB^MTD90_2eHmi1TtnUeO7<`+av>q>r zau4h53Erec&?&oM?^!_Cfq92Wm}04pH&Yadn$3UQ0AB{a(zT4BS~lvJaqsHKsLEPn z+3{ae!Y09qgV@kW)(8Xym@(fqt|$Tdkf_bN2Iyj+9$@^Zk)f>jmSIv%;tn&+*rz4o zg6)Kd6JmDwfer_AGi+Pib_c-=89MaAsQ8B1NzH&aYEyU#!QdGW4Q;J7b+sH_idE`RcFnQ0n|@VPs&un203sI%r#KBdg?KHQr1k}y=4W{E z^Kdz>AV?Hau#awvC;)WOC6&Rowz+*hW*t1M>$CPG9Q2BGWX48tyZ-TQO`ex8S<@VP z)|Uqfby>g9uu_=fi|va^KhgznxPBFU1P@@41^8^v@I$HvvAE^9S94cV= zX^3fw{KZ%N1*!O$kWeY#Mynp2e%UDJG<}!W;gfOXZQYf=`yk_O3-JTV(|g;&5G6Vfex9q=h;-wHVx`i=o4Tw7{I&Ub=oLWWVG-mBLCh z%iY8P_)1h)isEh7&yYcm;pgxs#$RiJLQ(=|$>LU?#>CSIxWw~WNx6Wu=9%j?C>pMM zsv|$Bx24I@wBj0g_QPjr$5XM*40KLn+OI3|`^R32Xg@6Nbw~{pVaz|K<~mKv|B6bl z^N4w#Eyt60h+Zq_spM`y-P5+0&$M-bio>$Fjq41#Dsfr)u~3KNwH~7oM`ha0G{5G! z^;E*jL^tQ;ISw3@nBdKBMX}uZff9l~LSI9i!n{K6p|}=2CGt9b!;}j<2)+(LQLmZk zlgAXd$jtZimeRL8^D~x24L!Pz+kX#zECy1UD^ZQJgu>auOywrzCTc70_VU)i>8o6|E96ESla zf83c(##x+IE;7#>Z{~AM%v~p?Fy@EgD?`W|UTMDQ2dppgXf z;W^SadU9`7G_1NdSYI*>9aYPY*}A|WLlQiK0lGfL9^NrHQ1XqAg?{LJq#HCeB$w-;ku){$URpIO7z$}`F=N}oSn z^Rz_mB$$h%F5}0J3W7N)IJ@QrayBOGEBc&Wt6ah>i)DZM0;%#5Y>m?RAR=EY`!L4j4{rU$H>BSBX9CL#tCkkOG0KaB%E3F6za#}q|ihWWlY^Wh%iefz~g|&z&6)4 zE%eR{EQBa_?;EmMo%zK55uNA3YdK!1?SQLhs~)d*tpN8R=FnGQL+X&C$~Vxsrt5vP zVCP53=r`T;z~^3da@9UZ9bhp~kPBgIH9MR0Ksgn4^%a&ZQO`m&j=q43qaD^p}Fi&B9LV3xQdqaRYQaa^VoDWTA|%A zAznoI5fQ#xDMwRkk1mUiub3?4))`s;5?1war*rm4bAA`xC!X!9eCUJk?1e6Q|Is$scp~ zDPEu@-eht=1N#DGKnJgDm))dmQ65(NECPAE_sP7g&~`C5gE)#>Qr)0&(NVV0;?LQH zxkn>xM|9y2Q^jy`)}PXvCJ=P1<_)-~i9O{+8nEgTV%r)b-80)cM6$0SWx5%j@Tw_V za~Gp(Ve(FF)M8^KerL=bB0dm9d-FGO%=*KTqD!b#*k~$L05yNf4UAUhXA;^fFyyTB zDG%?E8|CcTBFGhp`8?)zjx{XSvNXyh>amyI@Hg(pPCDuOAVDa$DYoIv;$AbJMM3H) zMKC*H!oUX;-|}_id0SIfZ2Ndr<>8dlGEC;|%-ck$o{Gs^P6Vt}opp<&RECb_0^31# zx)!UMUo78!`*6v=;9_;ea)wY({D}!CH8Y}QX!uwaS6?V7JtrH|rbfpsf`^vqcr4Y( zaUVe_j4r682^ifZ@+&RoWTE*4nhZ__tne@txcVjszU@fN33oqb?8&Qj0q!j%7F`Z+ zLC;h8I|OY>=`UeUCT`o7M3u{8mwh>9!(sipd5Jn5B8`>SAQJud3l?Yw1#SXHj=meYtL0m^iWOf4{@e3 z&nH#Ll-{cn)iqh=L$+;)(a))Ffm=89(hZbkR@I&uJ-`fW0R8&a0jfhT|##V&p1ed!DsYu zHc#ziEpp<`MK(Z_$xlJ&lnH3RCT> zVneM&NcEk&Dgl{dihCvUwv9GGwmbGz%pJS#?KO1^rDr&pMlvkI8guuOXTN-&%lue} zlbVQD`Cr$R6LoCL`aYx9XRgJ~U2rTHh^th0Qcm!n%Y5cy#ANS~XeT4f<>fw>77JIG9>{?$53C-8o z3Yi%f(t3_o$nFz>#S!Nf zqhupv0jd7Xky9ZYfbXG?DQUMIwCW~zZ%i-SURal#0{^ObM?4gnqNhdg)0SIPKPc17 z>Y+=zvbyT45`>exd3&!2!^(|?j0y?*KS&#x{%8=MlQ4tcuUO2tC+_ZHbBX?Wha~b+fBi(jUhI? z6=vNgTWn>vKI8IrAjSqFGiR`wWz+-7A|0kEoXX1&npic0-%;D!hu?rPK$QJ>`pcj{1rT2y;qo6mF|-UY*f+Nn%X4u$o4qf_l?|M(&>kY z7-1V!pQl2Aj6ZEYK8WCWHX*d|@%`o|u0hKAMYC@Jk2o2dNF(n=AMtNyzVdpLlL5uF z4mHtzWp}?yok(kgACWCWw$eCn0J|mD6>cWXhR|rAd1G_XOY>2k@qhdG&NWVFq8$QV z6vszDirTM#WWZq?^0 zwq}MmR|(^AuN2yJ0g;;Mn=*Vyx4KuRaB?39^$_4EYR%V7X|Yhxb~|WRP87HhcZoGQ zGms@(vg4YM;zG|&D;|?Eb)P$In?Q;Fi2>?Sabn2y+vY-W{*AW%E#p!nhQDLi9P1}!Z z3EI_X^R!5)M&xD!FXDh3X>6Mue`iGNWQ3^#3=+)Bbd*7)v{E$sj*6W3#T%_zm6N|{ zFGRn>vIyeZ*T)f%EU557t6N)TlbfKZWGg0P6|`vXLG^?2V^1S-1DImEX1=A;_1>G$ zz8_|nA*Q~;{%?Iw)V7a7`_I6Uf&FjjbBq6^&nHzkt#DOPd!8q8@{W9>UnFt&0v7N zE6+esiIA2Nn$PZObv;jQIIwrjKakC2PE4hH@gHSvXL(I&y!BzPKTB3xvQL72CUtci zv}8014OSAiuc0h1s2SgGO9}Woeod^;+ZB*e`-qFv$6#v`n|nLO=@AO3Fs4E}Px3TV+U}O+ut&m#%be z*Epu%t1f#(Hd(giQc0u|fJILj-}1sC2gkMTD>C5TNP+A!=Q&5SE19yZaM#I7Td!Eg z$yBE}R4oMe(pRVJvEsG`+Dfuc%)A)nqZzKRdjhmio-=H;Kx=8G6New(9r#x0x^?&# zXgfUq`brNc<{ERnTe(_IGPf0uC0>;6dBQJB5n3R?3l7#V@G)l5E@QQ9005cQ%@C!u znwHfc-49?iY$!snS!O(@F_j&Ik>_A0fQ}Pu8-w4Vf_s)<*AAT@zM**46Nc@&b=vnK z&g2uu?WviCIKETS1%8SRZ_%lOC3*JS7xqc4LZL;gA(w-mQ!4h}0w@q2q{2%5MP z4?A30LRR6snapJ?;eM+(QQM~CD$!srzdALY+_(TUEuvs#D|fH{GYHwA;JoaHScnJP zh6j|d8Dz2@W^CkCS_{Gyz4L`Nve#nNWyL1gnZ8|q@#>5LQO6f$;`Xw$jc&5K)?#k7EVx zPJAR=8B(`KI`+25RJ$Cu<$Z)uv>aMZuC{)EAB=wFb$D5^})bep<9E{PVzbIrLZa_^AOD(MFadA=vw3ohtpsi^XU2 z(pYg$N`UACD_eAs=$^K{L+1$#2Zd)KKtm0`RJ%_hO4wwqD> ze#>9yZ~uiSa=Kx3R000uhdc6rqXt?2#~P%rXN@X?nVY`ZakWX;H777PClr^WSoE;c zwZ7g#l7U~0Dj08L9{DkQfAX*6pCotC^ySUPBjPU*?gY(3R`I{&kxP2dLKt~7E8xel zk^eyvf|LU#y@mQPbv@>$!?h{EJaS~|`Rq=&>v=uOwCi5Y(Q_IwwS0KiM4c(t0(taK4atkTYmB#(28}BH-*D zcg^|gyWO6OAsR=OT?Fvi^&j*p{x*w%EBgMIvA2lNT(V($6L%$gX>Ii zM&OXzCb(O(^sqNek`A(4~%*UlqZLV+`v75e?>7fmLIoH$csMKcCX)${szem>c}~W3cj6 zBvli$?&xXAT3Hmg8nS2ZE6txaef(?nW1ue!ZWEeJk6yqvoXDmL(WtX0Y*r zpssQV>VdE?>dlM206u9X?WF!;@%+|rNT*>Df3Mi-J>@Qc9MDe5Uc6trG){fZY7Ej!h{H0v}@zzMH*|@26P*uf?+?tPbmONFons9TL;i_K!?z17i zcHeCF9t^Y*V6A? z{@SA2=`!)N$B0_xpu`4q)ftCd*<+pn2U_!5hw`7$^wFv`SybY@7huD=)2c%AhQXF0b=_MO6BToD;2D#*;ASX2P^95F3; zp<(u&qhYcg(=eXdsp_+81A0%&iWc9-vyfUu<#rMNH6-ZnDL5CsMunTPat3wq;o~g< z%j5d>AT<3u=REIn?!;HIl+oH^3H^?pZ4vc3! z6yu#`Fv087m+?eeR_JRsTzt{_`?Q3I10d(QUH)ptb;UC)>ZY+iBwx;GP=nBv%!e)R z00J3%y^!D0^w10eY8K&|#;dMZrv(;CCJAmNyQV7gI%!kWL9x$kLxC9rGY4l{}t= z)?m#CDf9F?7`k~c`x_nUitmBRXlHagMiO+zO1FIaw~VPj&d^Pw(}rw-%ocek=O05K zKeMAP8jId=RoUju{o$Er3``zyub6+bkx%dIXEqkC&)5~F3=Eg?Yb4R8=udXl5xkk2 z5aC8Ou>orXhpnAsFTT;UOqE%;6Na8UrM?fyyn>+1>ZjQu`&h=3vNDCY2ST@CYs zMvi>BOMj?)9y~7G&yo4Bm{FIgIgkH2I`H2jPg?z_4*36Q8bHUu_soaT0K3$NSk_vOU;V)0S`nxX)88U1Ui`pYzgrNquA|VEh8WRjTIAatY0#S*gr zH3?Z^VsEo{gY{oLL&`u=b>rVZek9oZH{Ju*|0x@nU4Gh0$Jy-d%g??!9+;s9-lqQo z?yv!7m;>(c&tW8}y>OD)@LgvryyYUyUTqXDu>OP2wK*|5oc_RDzW$9eV$hf^$^0a*D89O7c}hI|sw z4%y_0jo=GM^aelBPDMT{ftVq<%whfpIZ0s~JG8UJpk}~(GF@#+i#pyzmXcM+7rfQ%z7$?wx z)m1fn?0nhAeoFuC=U*{W7jz>?IDq~P@0}%g2k^rUju?pggiGd69wz~qj!Lf zzG2~(fir4Z);r_)Gwz7Y^SLP1WTH(aOj?r67jofR=D1yXH8eP10(o*VdebiK0@qUq zs}M>*=M&+5HD_dHZBCypH*K?F)?+WP5Zj=%#e}$5Cgpkw{o*nKxKb05l8w!<409>M z@2cB}{A^1AST#F)8K$Y|BBxXhZ@=SO*$3hFH(I^ATwa~v7&FEK8qZ=i5ARd8X-vrC zf;P}rU8c{ecJy(Gi5%|t-75&$yZsM*tt_?qlgjca0T#}eYh28F6 z{P;Umo+y&mvAg*U>v{W==is;txGOP^ySv9F^8KIfKTIj-;O2=o4xsMhcChbXqT9(` z;Zk_g^v|4~{vHMPugY{$Za}QlTc-EIS&IE#)O3avohM|DW!9PHN>c&gkwFl>xh4X( z!PZIjhH&dSA!foJkb{m$C1F_7*9$5b&I5XgJF@Bf|850z0qiv$u-fJq%C>+Rn>xx1 zJGIWl9aPVV^nxxyNn@G~gL%#A%xitl$>dEU8zNjK3%;N{vvDA#kxTt!MnbPD|5E5a ze=hGB{v=8%S8s8F_Gh0j>yLpg4yjILpW;DFSyHM&!+T{{hH#oFWoas(zfJHOWtxHo zMog>}rqCLuP=G6SZXl;9OY5&?#8pF01ftDNIYHzLtC^F zYvh@ebB54AEW(^i|KZrZ$Fy)Y%~-n78(vWq@MM=xpOdDHLV-v2-JP+@v-q=&Po74MFI7hSf&$XRD>TUZRVtgEUl(|^3giP|Zxja+F_EOFr3 zkQzs*7KGm}#gju|7cG35(=dDzJT57l4FC5fp+FgHh(Ug{J7%f5avJ)ypdMogaTm=2 zlch3|u5?Q!i+t)$u|d3Ly)MGxV_2GAki@CxPIF}ia}$t&~g zToo4qefb+XKCobKnJd#YBR=UFPdhe<>mlq36^^{m_Bn7YlY%SHJD$d0^mFcWc#7ru zwuoKv6o2b8y@%RA@K3s0c2D->d7~V|w&jycZncF=WK(IP3%{Mje1egU5R$A>xyC9yPZNZY32)&%+Q|P< zb-X~M|J@9GmsLWOjFrw_o*R1$nL~Wb2>9vL$_RJWFcbc6V2&HGx-M2uV(#ol{Hgh3 zpK4HH#T!lJs!gjh8408<%L*x6qlF7uU(PNVa|-WT8FyQZH|#-pMAF_bV}ifdfi^Mr zoUiUrSqjzZ7s)3cFI=5o+Zk(qHVA~jr*qZs8F|pvL5RSv8{&`l_LbwSfs5-*rH?m_ zmP2#RQ5-U0zzj|(Zli6NP2d%acf|$i$EWU&#D1g#)$#~if8Gms4}Q~7U|1j~VD8Y! zi(lm))$z%YcsA#dWO;A_4`XS<0~z5DJNjT=9lEGb-tNXom2gr%p@``Ymiu_e1$n13 z31=2fH_r@trp0n1KLwQ$o*%qc)=J#3zYK8c5GhZo-%Q4@%O1{qMXj&bVgjV?h-{7< z9HAb%6%d-!b(EbPI@LFd0JF}1=hPJ7PzB-cZzxi{i1SX0DVtvTG=@KACe2Th$IpX% zc_goOh+Q0yaE(I?o0#pij=wBi?SVpx=Qp?J%ad zkQcNw;C?b+plj)R^B0`arY+w;lFOpIyoqxL6z23ueCrZ9MhG|2!LbPZ+ENuTy4OTg z=7Io>X_{_G)enaAa9hs8oZ%!%e_b#{fbyj$SNlI5GpOTs${0lgeESY0ctR^OWbe@0 zh37O*p8j?vo1lo{`8eS)L`pA0W?kaCi2co+kK8MzYMNk~1R*h}Y>A83xo(!eyhEMU zPljKlM7sXajWfV0vFpwiV6jVybAUn9Kw2q!Sbq~r63b@JxXv{tWq_IPU3iNASyQqa%tnm)ER7~PR2c@VN@uS45Xxv+GW z$cr%fPIPo9F-WmQd)TxOy19dcoj=#GAo6aJ(5B@PDl2ab^x!ADCaJvpA&tI|l@_Ld zvzAPrTn=*fbM8_|_A1^z7rq`jxkzr5XXJUwTBgNhK?T>4BY&52k~~=9f`V(=HO{U6 zHFBpFM2#SjpUVugeEl=uIlGkL_P|MX%fA{>Ex#GFA(>f^^HpNFE`qCXDY5^gS_W5v zk6+|Eb^4?T!Rg`fq>u0@nTY%3TGvS<9}?|qM$a=~mFF3X#c)T9%{s<3#q{cbt1LQ0 zU#q~0AY5IGnvOFUU64O}#aP)eq^=6lQ`OcQ26X3$t;KS#Q{AOqk>U633b85st0>S+tg^J)gMXOcn!vi6UN|5*K(-{>r=zDE!$0w2j#NGK0tX9kFlJZ^EM_e zc*|D71&TXJVp;<4j>~D!Iq100f>s<)sA}%Kwh0ruWLJ@gx1Aiu>78uJG3;4-u$Okz+1el#SxZ*^AmI;*4zc0*bMePl6u zo8WQA?}~HU?WRLwMFv7X=hURaKu3(j7(%1KIMX)MK+Yy67=eec2Om z7y?(r%K&rVYX^>YsqLL>8O|h$b9^Hv;Sn+2o$i)tq$R^p)3F zK0B|_ITOyYD#11Skb@~#Zj5J`;1E;<&sNT>Hq%Ytp`>J0#T(|e#L7N}ZA#un)iVE+ z5bQ0u6Dc7!s!gTCL1*N7ZJp|!Q5$&ku*XXcA244ZstbArG*Br)FMb2??_1PQ-_5&{ z2rH>&mILgDpXP86A)6cz@Ism%aPf+?Aa8EZ+kYH#*0apn5>2||n0a{U`WMZ)bLRot ze+FH{Iz1_z7Ri@X;D=6IaG1vm?h4cxu}@7o*+sBa_5Fivvg@O0Ounh^6qHnAS!7c1 z-k?m$iF5IZgD(%>F`V1Xf+CWB;+Du2vlu0<;0Py+I5I>h8U{^m45cMmVQWZbuQ6m<(R;*>9atom+ zbfip$;xwTdR%r-C*1((fvU^e+I%<%Oal72|hv1u%adCC-tTDER| zy7v&Vqt(BOX|iEqA!}NZ0B-!+dUfK7h5r=0S_z|rSeM9&hS>?)1K=#`_uEgm3{v;_ z#uvKWk;CM6XphA?(5>gUN@CK~m?LQk)!p%WLz)Fb*iQ+H3Z(;SGa#34m~N`0f6-V& z7EkHjX_+063tkGCmE+jD%ef46ZVLsB&tQ!unaHpu3mcn2j8ZvdtQXN^5vwTZjK8+-P0`KAd@As#5Xato zHL{pkY{{_q;dn)F?#ei)SUy3Uuwa^0jHS@)t~>{83;Qb7YTDI#xi|l!h#yu7H3?LU z99iVqEk@FgrdsS6$y>3;ej*jb5l_QW`le0P zVLb8R?mP+SI!)D$Dpn_2H=tw69`#AvljWrCQ_0yIwdoNsU-DBKc2)y+hAv^+zAInB zF`GH0vol&!iKCtm1IgMzDTyA>EapAFfnew&=%fhS>FfbW zqo@UK8D?k>LhHPA;vSa4(VjjAP1_LEi=g&CV~OoP&y|n|#p(rrOl$+>mNdmnxJyRG zx&?F!Zh4~vW)rzvuTFbhJ+N4dC1{Okmi?!T?=9ry3@hFrgE2^k0$cj|DP|N5CXy^m zDyb-7i~*N;Q!+``6u2oX}AZMI86V6Luv`KqSeSPG+ zcQ|#Wb!mJ*={o9iAjWCuPO}n;UkZn{s@H}gsp6*$i9wbw zqsV8$hm3gsIh%z}SztIt*88;0atX?VEXY}QwL;*5`!Gb8!& zfCAR0YkC44)k2jjC5y&^NV^-N8E~^r&_~Cg>;tn;vG$?!*I9z5uBk^$Xl{rs5@8y1?eZMeIfV%PsY^Js;Vp4LjVa3&&nm8($u@R}IuHw5Rg+{2`v!IuKuFD(K z+?ig;4Ju_SEU)h~Hg_-u8_1f=5+0$N1hA^V&g@M{Z0H)y5d>>+N5rdHq9`VLm}#hD z*C32do)_`U5|FR_obrfsO5pD!6UY^R2Wy%C%MbP!#8)BG1v^%&^MR%Lp5`+tb1i5O zR}4ywT_IOv#3N-YymG*)_pFwz2F%GC^%TO_ZabyZ|4yHAmUxSPiIS zwrelLI@s)I<6&AR61OD}jSkXAUzv(;C!`am;Nm$2rJkyLMi_!A~5Bu8`_TZV>u7Sl|fk3#R$U9(XuRgL0Mpin4E8+F` zFgIA4bBw9FJT4ZxA+5;r$az&JmWt6w_KF^pg-t^zp4qCNJO9b_A#u#2ajEj5{ph$Y+LHT?hIvRc7WQry9 zgxSg*{l0}DWMhdyb9$k-K>G=$orp|1(>0ca6^x>rVt}E+jExC`CBl8N`0qS`4K6p&Y;6)G(67Q*yvxIQVj)zu*ay+2R zWtI~8#z)+d&PuX zq+p6%oJraERM?+t%bv_mgOCVhiOD14xn>u7r!0NNB`7pv)GpN#(r~v;Q?fa(+sxvp z6Q8d=e>Z0k8v=8>iuTe}jsyw_1UmoVm2Q=q926kp2w2D`W zcL7}+EiT9PSZF6lq946HBG^{rh}36{s~0&dS^lhx zVE^{u8B=;DeEi{HF!_E>%&-+@Abx@z`Z7_gN@GKj-;Drchp81ATl_Mqmd+)z@ZO=1 zOD>i&m1OCxig_2c=!Ix`oFD>F&^ZiuNE;8Z!X;{jHAOI&oLEkq7t?pLiq>7ED&roo zEh(>4)iJa!3^fWJt5DSyjSQMrlQOvBoh~ID=17xn}OCeX&k-k44igjPVGxz)_r#Z$nd<3|2IvK5C3$#B%}N zX8L~WQK^*jh{yzKTT7jmutrrwoKnSj8@?d`vran)=8Ki$OHs;M_GGkCo|RUAfW-kH zf8h1=m1)wt9CQ7gL3@FT6V5(r5%8xU&bodaHQMnc{)GL=DQUo6cEKx~9$mckDsheg zQT77$qO4hRKo~3pI3o;;Z_+-9q(4s4y7Z>ZR@qa&zmDv|7Kb%(g_Y-E2;Os2SZpqI z!YEcpO``xosd@U81NgOLI0G&W@*mQY3_N}V5xMr2WE5OIjBfc~lqF{`x8V>?3#VvbqG~ApRP$wDtzi#8&En8ZUP79+QerdJ)H>zgZ?jwhW?tm! zcLHJ4siv%p)r~md1$9m(Cj}&BmKmq!7#yk$v!LSIfw&@}uX*@FCH%s)3uwxhCL9hw zk`YGaX?36woV&+2cyNtm3YF~%tYLC{#*k{GW%+s6<8?*{iR6$Exf|x`RQ5C&V|o%< zN{C))$V_R?88`Q=P_eAK$O~_OSsPuj?my7m!{&fArlRTPFQoWRHM;iMHKKR0EDqrp zwOW>#b1-YRdRvjnSW2iUh{%KpqQKTluWBZsqy-GlhhnKxXCL~^51l)~Ql*0c)Xw^` zh^j0S^mqmz$5@?1KxQyYcWPb`;{F?DzuirZ{k zZ2u7RXczP$Z6~zJR9+4_ZP0R1ohGAsjojAtNzGyR;kpMc$}Fx4e(kB#ZMeBZ%gX7N zax#l-jfyX&N!b^Wzxp)ra~)i>O+ab5IeqrsP&iMIo4P;w87{r?NAP|3$4KI%r!wk6 zAS+Z{3hf^qe~qm7xk~FB5*lH@vQ=Lc7C~R^zVFGWZ~>93Z*d6_mnqjx9D~~uJ23`j<>FyNs1i7fpSUK@43D;9;IYV2cp$m9ogxdx zRV8(JHYy~GcUk*!r+v|t`pIO?SS+Ka<#iPxGXEHiU@@|at06)cc|^sCRE^ZQm>o(f zUP2ik6&t$|>*XfnOu&D$X$xQ%9S#g=s>t&TuiEe?v5C#5ZHJs`DeF{PRb%Q`ETl*z zO_nP2kIm9aHx#&n#@z{~HN}m8bV`af&kDd4DyNOt&88(@!{Q4?EP20?)-w zHoBug4615qo#N-%y1}z$jK(B8{{2I#>>h8^BUhY$Ji5Fh^HeE~E4aw2v<~b?M(Fl_ zpRVEOf1&vh_BXRw`c~FXj;&$NClz&`u*5ZR`$Z!cs_BA=(#iUi~YAyZd z+~9L;^wi|>BzLz_N946}eaRzB@nj?YM%s89G&PXzp?9qWEwMDgyUC(iY28HMGUiwS zXgEU#JQ-_H4Byn4RME#%th+EhVb-Y9SRMKU7T1syn>2G}F$(9C>s)y5IDF=MjE7@I z)-nzSHqu@`jl2g3@Vq#@y(+azT`V(OqA625zk|JIl3EKnUxMz1{27SH9k{ImRWc11 z??SBWaot6i);o2x)o3WJx;|QH*IzB6yg@!je`Z^65^A}8lxyNieLM(z^*`>s$B}(8 z+ikkBgS(;Hynh@^Xv|-50NQ9`n;cTk^DAp1_j(>DFBp>FZ!LsG7;7G_5Bz z-Q5|lV(-9=M=W7h2|kNPE@dK)97K&o8HeGDL#m_nk88g2bO!}9bBPYGxm~H(?^{i! z^$lbTcc;x{ZeK~`(Qe(04s8T?t-I!IwBEP!?OrFiy4t?0Qr_|mUIobR+5N&4?2Aea zi0GO7h#%YxcqZ8OiTY*t_nG?FH~cjZs(W^4pxV*7SJim`xF?GTHF8(ITBR0^m%#0r zbr&ya`W<&i7UI2WV2a%Y`c^i_sy^GOW*1=DG6Yno{$kOhYRezjNGreT3~&yJ8;@uX zPQ#>`&d`IWmAoBush^}jws4`T+EpG3205?2n;8E9Z@#JEMb+)n)h&QSb+6*qn66nx zcJ~GBUJjDq_uja8Bsg~>T5o~cbS2x#Fw``i2a9m~{H{y{V%3kXalBR$NHq_sCxUoz z*8rL{tGK@?x7!vo05S8Ssqou>Ds?Iss}l(ly+WGmnEedN%X2~Ni(13MAbf>U|5HON;W+w%M;y=2d6I-?J zyodsrx98WObpmCEqXg|_%A~|)Q;Z1qA2X*xkgiVbU0*wH2)?3dC=1#cJkCP}7{gJ& zoUiV0oWS-LdqH(^iHVPLMvks-4|5s+1WTD+QD=H3P4eBagSAu52nKxr&ddGvyz7(r zi$*|nf@fhbrrh@lMN+9dhwI#X*b%v_>w{?fT7<|Mzw zZ>aQzp( zV5qgRYG^W!jKQnfJ)ZrBk7{xBDj^we+6Uo#qN#3|^gAUOi#B_xxwPbOIixW}mz~hk zkkpvJm+k_rG|}R`;?=RZhoqHHwuk2}(%4hq0b0XW-gdutD4q1JTJVq*hZA&{ zI)~w1jdLuguJl)Ix(f>YpNX9!%iVH-px%(KB|g2#PIl4v=H2EAb0fttNf zudcJVKkFb>(QvcTD0Q}J_9v2|R#`b|$gY^7G|GlAh%#q-Ipgt+%90!U?Lj4Q57ez~ zqY(2~&%5In98Xg%$pS_;ghE2sG36c_ZN}1m>yt)o5n7w%?s00 zf19mCf2YZ|(Z3zNJ=t4kJGk%n!vT}U_HTdQfF1Ido0E?}^NcBx3OJrO8<51Mx6W_v zaMwFx4^AuHb;b?-W!^UL1!9V0qUMM&+vH7soi?wL97xw|P{S;Cbs|u9z^r#2YhA9$ z>s~*Mb>?b9*|8RAw9BuC>9{u_E~SXAHNskIJ2>TO&s4+49o>a5mxxudU*R^>+T z2u9@2LXLUNhr?dj43`gl!G|j_n!8OK0Mgz5YDIAlzd#;V!ouxG=(59e3IP2aFFtO> zES8u|8a^*t&4)G<#87c!c8J7{w@2?II`>2!L@&&XhK{%R22w?;B+ZG6#j&(rqv@|N zrJaqPVXvNSBD~!f2=a8uR=Q!H<+VY<_@sv#(Bs6au!OIF2XcwqavzDNt79jwhj`>S ze$i}29n)@;-?BvS_~~n`QQWK^bufn~L0Orq1zv)A9ZtsBfwA#S_q%>SpTs zVzS$tW|DYfVjXZ_qAQ16rpS3hg(vP}GEC8C8EN!@&4k}^;{GFZ>nJSPos6uR|HV{- z)b!9&wp8`@i=|aWDO}e`=OEHo6U#&s^Hrz9gGiRaIiQ$VPat=U7-W|KvpZ_uoi)IQ z3;Ga3T|?DU%jOkpbGRiw579UHr}jX7eX-k(lDF_?iMnFD0eXboEK0*i;Eu$b+gET? z>FmV7wf>;2=2P@8>&*CODvmR1UH*fQ5tGMok*Y0FR`|H6fdLq08gBj&iH zmH^4<;mj#XU(-gT+)3o0;*(EjknHK}%L+$c$Y&d@}eyTq>{TcSerP(}3XD4cRUN;hr zqO#Rx&=mC_rnu1pNO`1k>?Ve)AnB=n`+6+a)Rx)rQY#INm5vYG&6vWR3WTiX zX4P(dguY~*|Ju;{j&@>%mhcOn6qBJGNpNnxQ}4T=XK}vt) zirtgA6jEsL=2%j#I3$s05jNM-M<37qa(mMn>G^Yk@u8&=oZom$%zPhqBU7e$w;e!c zi%>Q4orZo0oyl%yQ=8t*xXhsNDo!C_I!UK(H&R{*mkG~NnmMYxGHyY0!J)hLAVmYnv% z*&f_`U9}Ec?)>W;EcF1kVYNq$e%Y5WV<)cN&8TIzl95+W(Urj&EON@Gm-qV-%+nl8 zPVd3{H6;23j9`zD?!HD6>-R?Hj1Pj<(Khh=r3vVddCuvfZrEm{ar)%*U9od zCv#Ts{=IkLhCR~Aq0$X4SMdp$ha&(LjY2g-?8Gcy^#lH|LW~^6TCTUrtvB8yv0#fm zgz9Bc>YVbqid<6eV4Qm+Ne?<+u3>t=+)Xga4OS^^^5FT=caCnG41u8r1~T=-`g)m3 zX!{Psxp0Es^4=F2uMaQW*w_f!(gLP~!3jiv!;{_WA=rOd+Z38^MYCP13RSqhC|hiMOo`PI$`@Q zccAqhgzsgc`{?xo;VF(nFgFm!dP^kZ{r_|PCdt6Og)5vQ6(ysLM(!%{pbu;i5;5Lt zE}cg~W_L#~$^CI0Dy2%VSLbfkNmJN+8+}KE&1v^n^qIYYI0@&n{-3q9!#YPj)}TXWt#v`D;V_f&Z7iciTi{{JAL61 z622yMgSk?T)i4=7#r{eBe}s#Bu6M2QLzu!x%US9_;b-m*ySbcDAN)sU+bkq63|APB z{J}rnIiMf!6Yq*ze^E!nDs`E#4nHp6eDeYPqAfLQN%eO(Pl68gi>I;N|uy8#Vsag8KkS{JTlL= z>Ele0`poKS^Q`?+P+v`nG#Qoj6l#0$NlI~ABli~8`NY^$Z5wDc+zRapouWl!kr}}C zyB{9K`3h$Y*?&lJYRzD)^wFzh72J=Mr;kZq=n42@V#6oNjGIWk96FKT3R>sBK`S`2`eJ@EdkRO{yy zTXou~kX(tBSm`Y0Bk@0&d&lQW!?)40W2=z}PDdTvwr$(CZQHhOCp*bbI<_%= z-cvPG^M085&D8mD{()V!SJhK>U-xr8Ys~|1FvT=$(`si~sZp!LqfwVe!q$rZm?%-q zM4}tgQ&t=7bR`I1D7cT0DA!By)x_Nv9ym!f%M+IvAvk>(Vaq0!DK?R(TP`rqW(LR* zH?RDzj20C%7vE{{B84p0LsObJ0ACdT_xq7j&Z4|i6~f}l1{&e+%(W7J1l5>m)sf1@ z$<*Yk3DT)$3X`#Xr>c$a3e=Qj3K!b6qLX(Ch4kS$QwquB9ZG9PB`-hqgEweL8BwJJ ze%Je>Av#dtuC=^KlC4%0Dy}CqLAJQ*BslvR1tei$S4YSil`NyVxCWtO+8W^-klllz z#KEUk>3$?rZs$o#YgBLDIyV_;4-DB+7CHkoDDE#x;bgf;vtElps-p|MC| zmp6+89s0*oGMq!Kq_vDF&RtrZSPxX+% z!4DY*k<(OBwSt3g3Nbq4at_poMufj!{tY#K%3;la8u1i)$;E; z(GH`uQFzsQUPA>6!?eL=%S~EH)&oG;(%(U}BbD0lbk(gQ<>Iyq!t`lO7SvN#V+P?- zk!n>DP7#nMQHv2BNC|8G>5F8IqBPYGR)um!JGNoXG+9u;6UYQ{e@r+8(a5V*u#y3l zHD>vWyZ-dL!8KaYLkrp_Bw>koC7I+qgwKx(JD-XM(*!WA(9-CbFPM{!mz7W$lTrPY zMb(v?gdCRWiIbQmOCU3vTBMb#pRfG7`ZM#5=!3y@j0-hwRdU|z-^D91_sJ=rxZ-J! z-8R1e zpPc_TN0lywPg}(abs3|8a2F|n1uo8KT@?+$NJCVyRwTS`U+W%QTHx^6Uk2`~`nyE( ziQaJRA?ab+sy7yu9{;FTRCF(dSo5Fr4LyV`{t2ZR2RTfl={ImBgq;Cz zDWI;r>MITZ3saq_1~3)l?{Y0HohzyR=j{*o7v|nLL~9F&0ogXcb+xbNp=>oaDQ}|} z$VVBzY~yI{Hz8rBH3Qggut^?f&3m@XS)TFvJ?#6OKH%af9JI`Qcm37=teQE!gj50+ z6AK_OtZZ!05Y6@ggN&(A!VPMx9R#1Dyg$ovU?P9iK= zkzji!zTOiQtH@h+XT!qP{N^q&OZB1>_DA@jryD2?O%<%uRQMjuNYTH4jR9g=jo7MX zQR~*GsPm%c)_=k+14%oIN-)3rRetb2YB@8aryIPjrUK+Y!)~-+qXhaex}}&XLGgXb z^}f>q%~$(0+q3oW$yAOzMeihtIxQAQGQv|0V*vd{ae6Mv5L>}^&~oy~!dRUO;v4f? zifn-4Zmx+Q1UI&Q?SpPZXzj-5n0&?|vd+!#xBQgQRRkfiQMdlW$ytHaTcq>oN&iQw z2TtXs#w`l#ukOSg%?0UT$0#6hI_FBAp_!tVYe#UUS8vd=($I&==(K_((|3whwX==D zI`Cn9VIX&aqWCOF=Bgn1V<}=8rHl|kg##32Pm{XC40&O^H$HYx{Cnz+rP`h;vv;cQ zVKnSd$5`+-1${1@Es4Vwa&=!=1eov}d!O;`U6@Mef*T8Q2X^D_ze5!kZu5-r(0H*YQ z%ElRJN7H}skP2qvu|>FaiODumufBtAej<9ij}PGAdso!*!TgiBnjdryIfEcuL>NB9 zyfh!mGrPY2><-ShmAvvxWgMw@-)|LIHVHd`j%XepsLVep6t)0CCFkq8sHq zj!8?}ay|%|EnYIQBmz4R3>x@1Xdi)d%;N}8hDp{wfrEXWTH0UXEon!U_XX7a4^SI4 zpiu7q#heemNp_###QGd-6;4kX(Zv0>UJ?(KO8#>l=NE93Y`cNa8cJQ}=C8;UuLVTz zD)uO|p8hjjZP!zhM^0wGH?pw-6HF7-!Po~1{ z3x-Hg0<#$Et<+YtE(XqR6Zl{8DS(7H{q3H*O!nZ}=6r~|iFOkPSvyKEm;vZQ$MF?s z*=Nkt;eUhUYQoyhpN0{>*PDf(geU&_d#36&7na{X*kOhSwToV=n zrv4S?j6lyHwvJcsW#K+#OtF!B&_JR)!CX7n^=SKyT{XcyBl)pfwZ~BANVGj}OJKYt zO6MKRj>+J)Ly0q0QSHkSv)g|zKE2IEBSUh-oa3z$))9DjC=2VXr~E=LKTA!{>8c;h z{~=TqCD{0iD`eHnElBvx`2n^;WoR0=0Pp^B;ICvGuiAJ|Qlp9~IJL^_8;0pz zk;R$h69Y)hSfhKZb1-}@CE#clvdTb-oCez7NDjeGT;=yw)Zz|JEC%pMY)AJN+mdbA zdSo49`=BIPm#%lttrJP=Trx^%@j~4cf246q)XOhKJa~wp#CBK`e?Zce=TYxlQ&bV| zjcU;PM2)cUmAWtAz8{>eTKQtz-u)b}GU$$zlWJnHaU9d~FMPcY|Kqp)c@zCnYD-sgT)p>@#AIvKVy% zG2rOe1e_SWR4neTJo5=bt2!q#xqr@2*Ud@t9=0a~NeFs&UXzb9Zy|$Bc*z=q}fgPc8EWF-?C-eoemh28Qs6oB`n{SNyfxVhpFY z{Fi7d=y>~Du7;)e@dkE2w67n4-=-HJwXr&e9SpHQHzRxZ2?(sx!b`<-fEuLl*b=%M zXR>tzQZ5*U7>}dgznV>QH&m98SMHNlKjlfS5_Y*siA=fg18LOmTQlCW z^$jX~mvkZh*H6AioC2QBr}{7mzC6=-&wz~DKJJ(l6#~VjR3|!<7i}-qx=3ULDMXuD zq7u#<)(hihef*}x`{eRhnl0o`#Z=yRY&GMWrpTFwvjI3CCFN-_r8E?~aQv)%9=iL= zG{@h)BN|yE|GYlCtYQ4;-_Nqyu6z1No@3Bl zE(IbEs&c)}H@abNF;5nztPR#;I-RgHj|8z}`|N{IKGluawBO>~;*sEj`sezU-X%FRiZ?*{B3fTbhNfd9_25oN7m?%2oq^LPY|aPsbM`OuZ%k^+Xz_5{y=cc z2>n6P6wMX1OM5YO8-2@4u2xb}<%ZoUoUobtbJ|hIe~($VQ6Ia3fnATSiw6kx_Hauf z%@ta^PVZGU%|3#m-Wra4q|_dv;T%IIvD)>5vat7KwVsfpbBn)aO>;D>p|=u%lx2EQ zNbyV?S?B3r>bHGK-gh8IjvrivilpK{Z`J~sHhZCPowSv0h_#xdpj(R6i}d_Da-11= zReRM>(ZFsKUv(D`Vu&}TIqxTM`H|W9yN3q3Aezt+#w^O_X71AD%hd3|KI4wDCq&<^ zW)T+UUOY%T5<;ZB2mC<$HPY|~$|$#}(J9^Gr<^OqdUpg-ebqiimdjf-57T~QSsRq$ zHEJVaKy6~^AtQ?ONL5~wE_^2h3f#Ngu{u`>l;Ik}GhcwCOrPr-cX$U+;V$VgY^+s$ z_*HJ9On$dw+wiBssV98$^!EE*;uK&&(ctQ!!5~X$fJJFdW7u21QL)t&%*P2OZz z$N<|pL2G?zKYO>OKo(K+L>q`x8}rmo>MT5)$EhE0aT4iY+N^a_3|&w~K*DghwQ-0dSA zI_ol7`3X?$(zL~%i^VSIct{IxDftSWvc;b$3}dg0qb(Jk|DDVkc7QxdhXaE~SExPr zh8LoO!xUm6Hp?8Zk)0NFlNOjg_sSo-(u$*NQ9wvcoUJ#g^<&(ibqvwAE(zm#{uL&S z9CESw^=nH%#9zpqZLX0ao@tKk{DPrsk`p35-Hq&Ym<~i!2sP!o12t-+ddT;J7n=-t z+a{LdNM1+W9g*oWu-K3$2>4ORk=SVtun<-=Eceg|o54AtV!VlkCkMG)WBo-V-L*Hz zfO9HtD{(_DA4IyCTYu-6a6$|xOm^uoeS}#!ufKadAyX-5j-T18d+BX-6dBneAyv>E&V_a=pngy&TJS16;dEMgx zCR@{+tgghJjF>inz`E?ii@MB1-%SxBBe9`-6S}`smn)z@*ktr(uCf z)XJ-0^QeTe+_6hQmKBMSPOsTFZiCCC*7ksjNcc&{4Oy5pF`4z@OSyR#fp(Nd%$35r zN5>Z>qmD7EEGI(WdO9a1qJPRoPna2fP^N#i%Bd+7NFL)O^wt{(F(BI7y5F7h#YrAbWDbRUtFuJ zQCyvv!B|k0|A&5Nmbzz5$O-e*@EkEqzp*EZr&fSSoAf>58+sbs3GWQ~Pt)1eegF@w z__Blczje3Jby(9BBq~ft8_zd2m@j0_5wKw*CrEQfg@-%}(~KDTwF5>ztL5%L(|{r9p-roq%^F?U%~EVESyWRSb(pwr;_V1z zHH!fv=i`^9gOk3?NWw9JX*MO^hvEyZPLk+4rVr5Vn|}A?fPiD^AXBn`UGkRwGu>sO z)-D<0Ua_56+xM2(yRBc-j1BFhG?ON6+{cJqksEJL!u1G0)&~vNU@rZD(94MObEr96 zgjT%);a~zb7I+IWisEaH7pydz4S`t!yG6i(Jwc}i8xXC_j)`NAWoKRY3$(KD-xe&! z&k%GzI|fG;OYbovV^8Y2{)O_W@_210; z^y=)z|1nyd|5a!W|1Y#e|GS`N{x7skk}`B53*}ZM9o&ikUy2eX&gcjK&A72Kp`7|3 zqqY5Cg_ivPLTmEB30hWmgZ~lhSp5GP>zFVI3IA_sN6^7*+Wu3ui{1Y#+W)+Am*AiI zKU#G`|1$-_@5?t86z;zmo^@aX#Jm$EE%l>24js5>Z9{&H6N5KCG{yD)$>gcG$ zCoMu2R}0cg=4Exuy)yhG)bd}1w8EGVXsN6H{^vh8{Rx$<3bqcpJoJuiVTLhR!dFHT z36nG<+Drq$C7Y?otuW`wke2(%l9wAqnAd`TAeuEV`;^0Y33xXMW3B~P5_t3*ImDLZ z9LzT=fB0`SHd~Tq0{>}lnv?zm)s4gd6OH6m=JsH zp}`PvdG;Q}>^k=zJQ+saY<7Kk`qW%p@3Z)JJp%x)V`BnV`@d`Ig5edXFc)Rs7RhU` zS}b7R+btMBz3#{Q9VaH}R;vecr)vRi0Y06#{gVUaIa)qPS(r2uFkkua7ZotQnM7an zAi7B*pPLQh>*K=d+FDJPi|1I+-LyuNksD%bTo1eHCK2jsO&05UvDhva(@Dslv+~3q zk0p6AsSSPL&1jC@-Xl5{q5aY-^5?C+7@;R$MFR#V41x22YAknMYWFT?ks@yy_vN!8 z%Ew0kv%A7tdQ;vc_g>jYYYc*bCGy+5;2WX+mez{$d0&O&`lG!m;~5>O!6o3UPHfQZ z2flbhyOqQDS!17T&dO-$bplQWNXNB9zN|s7_g+ps%CboEC1ak`g4Q&tR>&yYK^$u-)H;R-kZQrpog#dKc466bPnQKIo1L1hsqkU^bW6unpg;(sj70 z!Os(l0wq6kBu4A1aA{0jo@m&Fy(7~!$DKI6P9pQ!pLx>T) zJAW!z#1;6^vE|>0g{R?_+ioU$<{~nqUxn$bfez&wX^83GX~5gb!Zg)Z?>&r?+bm4i zCUir_Zk=+M&+HbZ$RvgI6Y2@Rd}UrwNS`}(97XwlRf1AyIqH}J;PuBfix;zYEqry6 z%^Bde)9`mj=bn?TNAGn$z2mx&YM$IzB(}>e8S~a6F??6`G_K90X)ED4|7gJVI>Gex z;r%Wn_Gf1@kgQ+U?qJs3M+#HC!gC+FZF<>+%||WOmWA@T6SlebqY+2Jr2y7D5o z=F@?g(`IaMK+G2U|g^B23+AUqAfI{~B)Q)O3cJAVay+^TnV)%@3 zZeigd@S#oE4jZ{`j8&A*xILMu+U=%)xn@tIQSz(&Ak_93Bnnqrx7?M)uHO5dREu5d zp);RqHC=g`7ncbVtplMBi%<4?D4q7a6H@Z6ihe6_aPF`bj{nesSG%cUH68nEq+=kN z@{8!MF?h@6k?TCTi;h>eP*Qe&*zCKP93lP8;ysQKqO8);)@QK+-i+Lwd*FO@7MET6 zWST%D$TZfnAo*Ma{jB{)usMCZXzYq@m}k4?xbop#t2+HdBHmOJwbI(jq<35k(e2FS zXILwlB4(`R#+x#r6UsVL{%BZ?W+bxhv=&pd$Dkn5(S~oLS`#x}a4$fY&Y;DdsQP)G zr#r?m3K(o#Uvn>!3|QHmsx6;Lj_ZHN5qTF|)`Ht!Xt}wMwP)F7`CM8=jIA2O#MvWO zV!?=2JjGF%zNUlz3sb6$M%YPr#>NdP@Q#IjE9cMhj$2LVh!i7_MyM`m$vD?trjKL$ zFcl3ha8SP!;WexOIEAN~N4INu+ej%YGjd_MRP)ZREjUWQdPtr%9_n#Ji{iM$p2Q<{ zNfGQ7Sl>^7G?HmtkI8$g_YAvAwZBZX9~WSI5;%;aOg?wS$gyu#4BKTEBf7cWBp_!j zedKdv9lf5IRP&A*eNZ6h3pAd7*3f0!QcKpPt4h{}F2*BXRhDgS&U}gaK8OSn$zy=B z*fxKCH~Y9&Z3dMz*WGf%#*!q)tBLBqB0)5FVMrTt{^*zw576|B>6B*qq|A81Lnj

HTWbXo=#s{mri=-TUkmOWvz-QDKTN zccMUu3!K-PpUQ4~Ae8nIJ-DUpA2OI+i=RBRv#}#0meN_g_qKDZwV1qG%>(}|z`I(_ z9A>Iu@6jAz#k>LOsX7Jq*AB^hy9GTvPb2J_yjSfkN;fzO9wU@65MLfWzES!PsN@%% z&fQ$G*4GZ!R#)Doohw-ZI#l(WH=}Fp)Z~w&h#T0R5?uA|?1CZ@N}o+xt|UN@WtHg! ztX9XE2v<2t(hBDb|TOBT(k{!QRd`|R3m$8z#x_)9jQsXop>5)HLmylPUzeG+}Ob`Cc( z;(EE-x*dZ}A>6RfFhq2|b#ov^*!CHf;&P954EviO-nNTR@hG!^>Lt2$XAHN@@pV4^ z`dii9umD*h`h#j%d_Vx_;$dz4DR09t#nN$RXy%D z$K<2pk`bnwvbupY`weqxO1n_&Gx(Z2?5H?BTj{CugPRrjYqGJY21|w9zM;qtdm>wC z#WM#gzI!@M%1isGMWl7W$rvvcaHYjKLxUG_=U>&sY)$fE=S0Mt$b$nB`!=Cp;n;v9EG5rc zXwz)%_4l7LYI5#}4}dCKTq`1(Q9=;#K9JRwonw(GW_vnZ0q1KaW7Hvw8Tt-l!&I=T`UpOQ7zO}RvMF!tSmsXGLKD?t(S zh2CmW;^ZQmhmnm={N?-&PU_h#sdq4|yLmi&#(Y99p?mVs3(*I*3)?#n_svWQE=NYE zpb<;Qgn3<)jjIkC4-+ex7cd7c7&wyuty#DHcMg0o76XXA>f@U%ZEXMvAK=~*$`^t zW5$tKLYoVUfN+{%+=E6mQEBYUkNP%EXCpTaCCbLDPHJZZCOND$_+D^S2yKBug$Zcd zGQ;k!in9(3Z>$8NOz!Ffp`~v`CZz!l6LfronD#+%A15OrDn$F|`R>7$9rK9Y=?7t@ z`hS>4&y1gFZ0yB{XRpfEi@U>8Pe3tU*`Wwgc!c_~e{J{>F}}L|1l!wK6G~?PGCtR) z>u8bk@6*p;=$4frV8CpxWC0xNA-Ik$FED1pkyZyM{nyAtasytxVA&eK#-;-_C2943 zu0;4%;VxF?mfzYG2y8p_kV_>!{tnEITD8iEM)a^W7a6Oi+ZG+Xzq7R3FI~K60T_XHPccwdOCo z&R4@#yz@U$IN40w7#Z&QN7A);{n`ggOuD6VM1m>zAY}9R;wQMGarx9f59^o2RpPYT zs%$?aWa!d^23}Am2ge)(u^(K(`1K~`)j}N!9IHl|7=wP-77Pa>xbyc&H4Vz_|3d%T zejJef8x4NiWp@bvXCJ3kmG#b|TK#<^D9HZUM7iqHW^W+jl|*ZtGcmHoTHV^JnU0-rb-ebFF~l!CcgX-H1O3--pQD^nj)xuRBTtRg*3=>|JJR<)$^!5w?N`I_Lgj)(ijMt)S}G#CybL z>;d8R<;e5kC%i+*KMinu){z&)@S9awmr*ZtxK#IB{&`zm{T0~b7$}i( z*pJPBv8P20c4QGPyCF#2fQ8e0LOR0-F)f)4aJ+D3@|NN}CdxHj;{=rb+#wrQjJwKqhK*q; zo`w$Gv>X|a3!Hf{sTQHHXJDsY&DekG&?;tm0r#*s?MBFLijcHbJ6Wu`PnEdUc~#86?TFR(<+?p`NidOVjeo)$sRCsU=VVGG(ziF z%uItto7=aru3SJE!O|_0bUzQ`0-;@4L8F2>8qCeG8{|Ip6~?}WtOVBVoRdrZyfA!a zgYdN*Lo6tAzCWJdfBE5#q-fmy^x$l1LU3a;t~jKB_6b9ew)W1;fH~YNT=ls_==#ZE zkX-teSIm5Y%AhRP=tgu-OHEBa0QPrHd*8jPBVvH>PK`W38^(OihZQBl7b5d}XyyL5 z0DkB@Br6IU6^Q6OJA2H1t)Xr`?7D9;f5HsxfPd0zOcW!G$ld%H(HD*FWIBT0+Iko^ z;cEgcGfL@%)ZIE7&`GWZjoHKO>VaeeJW*VA>gq$L0%~|G^N=S{$t*w4rf7upIf#|| z&$FMCAZ@`W)oHow(TzUYi?hMbeu>K7#p6f}NQ>E_UuV7-jF{8IQu|WN5=#tG!u$x) zwyhAt4erf3v|^Xe5k7+F$JhZeae=WQxe7WTCJgl^yXtv-nAvSuJp^XSDUhzIA%FnZ z@7O@5FNeZO`TZQq6xXJsdqHNwrx~>M&A>CXecvSl5 zweOwspZha`cFb-c&K2};e&!@_7SN2p`esOK40KWaN(dHu41wZL22bJPD}=CKk_rjr zniqzdF{I3zKS#wBp83|S)8kxC1ke~}@!Y3BL199S5(T%#!xkXET+$KN4E+~@)5@l^ zo01~T_%zoMOKd9C>u5|yx{gs{tJM9`T7!vT$?P41P74DF`MU-p)^=z!ly2OnmP~iSu4B}0*pfYL8HMA` zt~s6w#0>%5G7M*O?z~}WBa5M0T}8+lHhV?fBl$wx;q?;$%&?P8>f0j>@#2;viOxEl zmejc+qVOmf9-d}?xHRIFkAKbK#K>nMRJ=BNanoyP8=JYO4_h7PT@zqRRb<+r)#217$|5maz8cT|CC?pV_z9~$GW?u&@TLy);aUVdG%*i`rg zrNqU8>v460G9rk(BUZv=*t!qDkG{v~q_9j}xTZJf{-7#DAFiy+B#GWbhr4 z?5J|~ET*Gn%3sVyV0j`C7+IrYDWgQpf7Xtsp!Qq@&V1`iie(2}h}`kZ85-p6jLE1{ z{HevvUO7JUElXbwGlBNrvFry%fz=apO~K?>1;XU(0t(U990n`jwG2#Qp;tp@pJDkhuUQfUjI;I>ufZc=5~TyreT z_5`&6&{$cNSw0n?<*y*cKGB32KN6Vzi>O}(5zMYtr`LQ3?2__q5CvpdFgq5Ss|rtj z-g!n-QvX_^p04#t%cTe+dCZlC{mbBdEseyc3BezN4R1A$#sWkShEb?>p2@4xh>Ry0 za8x$I&mST*vuvj#AOs9YP5#yd*5xzPYVaaIUDY%XcuKJnfD#F8n&aLEJ;!_IO@A8! zk1G$3Fk}{`8TiaC#u70ZXOpMKGetW=rn-TN$(>k@T&xFW;uqAsnA6TL-(a5z2Ho^f z4I1!j1|*fC-Cgg3PnM3pjSP~Ae*K7&{rhp#{c%f1-P6oJc4aui z`m`Me(0Rw-bzW^Jgsglfmr~|eeofvn!7Bw&796g%*1UyprAps8`2LRJdK+vpF8hPF zyN9T15yeiUaS$$!o zOhDmVFCJ7CS4;~G*arB=Xj+C zZzvqt_`mO?;M~tksW15Wb93U$%)r@S+lJ|u^#EE1?DAVYkUyzn@{e)?W_#F**zz*o zQJtB$#m*#=`?~}8z`K;0!HwG!mK$By)RM%mo!-xJXRNv~`~u3BQ${nkc696`#gG61 z{6Ug#G>{qbG?IUBytvK)wlGph+sXjIpe06e!@dhGMUQr=O2K>x+rb^_-lK_Q>6*8S=-nT= z4f+Xb7s`&%^tbIBuK7BoG{Vz*^p{^}k1E;>G-sM=2v4+REIr&k+EA?@U9QtF+amq$ zJ*EszKKcuxkyG5X1tE|`vUK(Kz~ z%`O~Vxc=5X5Bn6dS^2fjB0{a8CS=AUE_`uE&#O<>=PZ3a(OHLEnma}FT_D{xJ#pujG}lG zPb!Brw-2Rr++Zdc+NJ`eTGc>bdu9SGP02j3j{YLzr|hl++kJ%QUP_c&mur7#LwrMF z{}?z`%ffh~<7j8&n?LGc3FTOXYtia&doK^Zd5P06GfIzTC2dT#3|yD_=gILCzMx!& z@a|k+^gFmna#-0jkJ{U&6&T$e$-RFQRTXFoQiV|RhQvKBPpl

bBM!if|#xyoLf`$37sQ3WGq6!K+h)jE?~&P3jR1rL5tmd?=x}-@-i@awUr-Jt?cER#uOAWag0^g)1++|u zQ#`Gw@58J+tB2{*q^aCeR>mnriKp?Pj_N=rm(JTmc z8)<%#+Hjn`$RPXZiV^Pk-@76nEacP|q3@c=+(&%1M0IPwd$H6B2%Bni>SlN|t#S$4Akw9Mc|hjkI&3&8)^BvI*fA336#3DATKWeuN;i|N z8r)Q`Hzl767y@6xiH+w2Jxz^rK{m#@H!*_SCE(s3d#AfHjU;tC$Ay3dvQi`um(X3X zoH3Dp4^Vm)CgYkFa*Fx30h%HrURg}ZA02Y) zjz7uR5{~b&sxzK9D{lTm+iu(P@XmRiwm>lcNkB8!6}Agu}6li`UwTpF@so8D*oxl4HzG1IHJFyv&h z`<0d;IpC|Opf%*R%2UOfbBvL~CzR-TJz}9f-l}E|h5t>&3cbh?_mBY4?>4VBp`{`# z0q_*lp(7U{g6&u94++p4eooqP&uW6K+)rSkC>CsI_5C*(^@Hi$aAp!5*favOX2JPt zjl9_D$L<3l7D`Ew{LE!FU+=KoZp12;9K{&6}2qKN+<~ ziH*q9v;FaOfxmPd;(q>sUVEM&uRF+N&IFm}=B$z zSLsVUg6g9+yA1M~v3eWL^aHkKb3Y`62op-($DFb_@dAP7i)WL%BeKHunQ|{1I11|C zkH>)r{JM@aL^IysT}V@1af-OyEQ;9pc~(q^Li$d@l&NPYJ9Y>oAgpPvnztKvnW+O~ zK*QEG{7IG>6Q6TWt7z3VdudsI97kk{EKQGjH5>C9Zt#-cwTft!Gz{VT9z@A1oCXyPRWpcvZikpOoM&=Nz@u$^g<|mdC2gI zdJ%td#*S`=;EJFnD&m%FCY(EBQa*s$o(86sR6(c$s@ObTrF9FN6M?4JTeZX7?pdn{ zD67;Sb9;}{!HE_9&ASR7V#)?%@UnlePA%N9H)#%x%V_3JGoL0KsJQ|{ zVo%v-Dv=doH7nehz{|*`xxA3Ac$cY7XUUb(SJy|7`gYw1QcCf*bXL!xD>(p}Fz22j zk)D<#?Ax;~Fl(oHt+H%WJm~LHpywEC)_N5-&Mlu(@O|XM4|b zomkoA3yQN+_R3C6oo};@0!j~!{C0p7&n++F^ozmP z1*6VF=HII%hX8ZVt}?F_n@hu&vVhVXiD878%JysW@`04l{QUcrm*$?^qz6%^>2GO( zD`EhjPy1;5GP`I}19dJ$?Kbz?nCo)&ZgLls?uvQI+OyK|sJ6BH0SVDQyLnH9L$5{Z zhGo6s<#s`V9pbcq0W3=q?{hOIfwrT}?R_cFM5!G011I`(Medil`piy7{3E;Hm-+Dk zm}X6(Q@*m)leE%%xxrgqnzrcl#DdJTS#uVkO#Siuwii*|M-X|I=G1&%Ad4?a(r>S7 z1__)%tgfC)!qTs8+epda56CK8;C*QV&za8(?A^jR(x6cNIfgd3h_lelL7E8%7S%n7bwiK9-{^or zHmwE@bYEru?*91kYOuQg>y@M8T)76hL_yh z{B0H^<_GGBql#AHY2{I>`+t7dKqZTX9@ngckv#l767+65($flNU0nUJ4F`$b*XoyE zGJib@-#rrxCo7PcUvh&_8!}L@qe)m~X!XV97lP`;A%=OfY~8Ixc9g2gjs6vx>|h_e&A$;&}OC=x&bEa!$cxRO{0I1=_RDYYag3i5o@kX9ShMmEv>;nI@=!r=Ybgxzm% zu`oNb9m;-Q%@L4Gi=>RJKqcwi^BPNl6>I$7fl7MvPW$(L3!z0>3%*=L)xv;%X4lwgr~JoZ^5N%Iq5C}hC3GpZ z00wr>yJ+*|Gz&F);P2B-L5SP}IT3VBNZRUipJ34#1p2JxLoX1y=-Cj-6pR!`_0&y17RmhKe}FmX}PI68YJXjoJnGPiy;%Di%}-{s`WHrIMXn4 z0h4nC`y45e*g?6SX#s(YZt3wDefkqLLWv+RTRHQc=5UAb#iVl;_N9hY0Vq}Oth}F0 zCzN`bAoxgxXOe%*7Ay3escswK>PcYJCo>tkeX~jEO1G5TPPWzRULD}pNO>OePYEzc zTqiiwKrHlM{rW$;-Xm4`nVajY(5>gK@-n+qRYJ`zOtW);^Lv#JKq;-%{G8}qps<=I zd#VE~odUn`ANxb&9%qW4kkXBUAVJ^tYlZZ3kcO68ak4$mb6gnW0{hOqDa(F06+bk5J?K*Pogtc~w`Xf9+DYvh%#8yQr?mMA(=lBqxAlV7_ ziIF?(QO}l1hU2iCJ8#Bg+e*NN*xlZP}g->rSk z`YBc;Cpol^S{Gb6&A`2Sc`=m{b4;DLKuQoUJ2*25tJ-j-g_sYSn=sjpl@DInTYNm7 zr`-QW&_NIW=k#a?Mdz$R={JFYhs%FAJA=2ZFRl{2IW_pbtoE=sm%COVuVGGPg%6`| zI4*IgCrPf-JmX1@+=$JqG#|P$hTI=-(gNcv z5MpcA)kWq}cOnp@$yhusd|<%oV5mLa=bQ}Hy7%yLT;Sqda(}j)-ePI7{+zujDDH5N z+d!;Q-FIMiZye9kb@|)6yrF&#vo|0-r zDWwxDn7Nl#ogrE*AXS<==$DnY`@6Uc0>1&PYlCHIaqC&qmp70F>vjsdRks>vb;Ec) zMOAYf#DxgR+95wA@GG=>g*vqMQHnN1rL-p3Kk@CobR4!|e9 zj2{6DbSuI3uyQXNzi>^nCr<*JOjHdT<|`Lg94Q7)x-c~ovrp|VZOD5N)s9PK&rgs~ z1~g@tQ3Iz!xJ|#&s_g7I9p3$HJgcy`u=RE6Vd|}6Uf+rcN7=(nn1KSDOva&Mu|mP9 zZ&pT%_&)$;K$^dg%dZYHv7KV9)#P>)h5S28CrIOlt@g^|fY0t3ER}nN;qxvt_w66f zLg>pfFL%8mF0D}T{?ip(E~TUcn246?r%U0RJz5o~_8Q-xFr1-CId|m!QY_L;tMV6v z4rTN(d!b=$5AC!>x1FPFUjFHdd5`q7MO6sXTuL}Jc&nXd7--I$=m`k(*JU$1QIX$5 z*YDiJ#LAZW)vjjxHk0=%T;AWya8uQ>0aaNVEi^t!hB|_hn$}D!r{ON6xw(VajU(3j zH7WRsVot&J7O)~}g%LN%gzRJkloGZF ziaE>;z7KHvemRxryhTY;b3i|oPnYco-A}1V;F=tE4P!;4rpaKIo7X#}i6{7;m%P-0_ zp*gr?cma2_5v5&pp60dDFnpVhBp9A91tN-IFReD3QK-(;>YYFGp#%=4{3G=7=b3#O z7;U450RfbH3q$mxGBMT<&LhasG=^Kb$rf!WM~xg-_-GJsWCIv)<{nx(4|$?eof>pN z8rgQrAE?VcjE08;lP}t2kNOo2hi+kg3AU2Y8{RrYNqS9U)*Mr*FuS}!&Fmt3?W`{ z0k@$1a2>Z`>H!p59(;i&uYL#1eY#PP=;?x)WWWwlZ4BGp zpul#*C5n?2vUrW+?eUc@Cn;#H&6*pWr1;?iyfL_}oVA3?Z0F$&3jRFJ?I*b#ZmYDz z@Otv;U|7>9%hY>GZxcZ=U$0uQA{gg>2t#0Rt2E$a%B^eY4_)(;wOSpf0v9AC&3}uB z98uhCXHIGt<{nL&08ycMs4eiyVBfak2VG#L>j7QI)Qsy6VL( zDk`qggby)db`4-NI9_AR#Gr)%fQJtO97T4$4RpO(wSTYFGnzFIZ&pQ#Z1$r@14Gm= zTzjIIc&f1ro-_O*9&5WA{6|APpcqpXKmL$%l;I|Ln;MX6eH@Ep(t%y71JRSSTCtY)Z1Z6ovpjv$cu)mUGz; zNy;rVw&}wuV9<@wCcA+Dcj5XrI3~RgeX#^g8c(gcL78-<;Kt83z#@mM##igZdGvrH z_t9uAiWjqrn=Mevq^nkZCtRGBP+muO4MlMF$X#;CdF!$o3EYM*FvD+vw^^?l$tI+u zT*~-3tt&jkFtT1~gcmX9ay#pVEvU zmSm#oL6U82=KVE4KDj2OIf04K^_Wxo{CGPQ_pdY2)os8xjH$`sA&HZl#uo(Mbr#gX z_DxQCGWm8jRD2+W1DE1iZcb4BbODH9H-Pf)u5pvy&a*FD!jYC&Sa1=g1t{BFt2w7C zB9dkRF%HX>)_B1i`_1#Xb+RH)H@FNuCa$+B^*cQokH5EuH!#kr`suz9l3Q`~eToTv zlcrEG@^c`hXwWo{t2MLFj<550n~7Tn-Gr8$P?*~8X7+$--qO{KPEcqXx|INZ?^cYz zL04~tNPh>H2zTJd2d&|pVBjLq{v3!+`hd|$zhTIqWBA@fDPYw`_;p2=^IK1G=aq^9 z6*Mr2u?)6&R)_16mIa(8eqJa{?#`B=(LOY z7i!J9R}r5T#sgkEFXmC?6hk+?&oJjtkej-Nd(b0Y5quOo_1)Ysd{s<%Gf(8>iIoeed098ehEHuX>Rb>Ea{fcR)Jw+Mj zWir&9Cb-Rwv~%-zgWEi8+*2K8H1e6=13rl+i@fbTn5tB?N)`In_}L z6fOOy71Vw~)4@1=B+DsPo6?Mf4xa@q83JO2`AVGLna8%F$+LEz14c%3TIFB7!)X3{ zyoNwde4PSKD_A3kWhV_t1h;}sp%sNm!DpZrtnZx1MZwLt81UGUUxk^yM``5W;*H?# zauNvx1J}MssYmxHV|iXK8q7kPR69|rC-*8C|3iu`rzp(+yV{86Fgzw8ZjLrm#CIzM zoz{?2t}BawJ|5twT@a%E$>dW8Hk5h{r@qWdnold-Rx|5qE-!$_OYT#|1v=DzNsBa- z1M&o!q?DQ-_QShf4YUm?o9_m-tig@p?)>PwY{u?^a23OVJglc%55bBDlMgJ1k<<$D?~Zb z`D%>bKWpZQA}bW(I7OlLHZ*}nu55m|Ny`=81Xe1d$b2+ghwnH!%uiK@qTp(alyobd zpo*}nqvI3Gy23wRc8KZ*Bg#hFt|8|=h~QXp8l zmXWd*cH;1)LO96}L9rxXGbsp0=tOQcfpQnwY;ZrzFYA?>9ll|PH~_PC)Lcq*nYgwD5v+FeQnY^WpO)&GA1bp z0!2YZ`+V*e-xJLk`hTs)X?2eMg062P8P5e*9CYt|jnWr|ZVOil0zaCI6M_!Jl!V-P;MI*OuQC<>Iv`oOVL7 zV)+`Sh8{&`{-^U{Kb;Bn?fzg##jiNnbO&NsQ8_M*neGEp!G-2xQrPY=1P&R+njqnx`xjFIq zTpVJ$A^IYsC=B}9i&f3o7DgeWmD>|*q`!@g-6&s1i}r3%#b0bUJlyo7OK?I2(awIa zn3HPlC|gENxv1);mS!xVE6_yOHx$aVcO%>KBbtiXPEs1PVd9GM7qxQT%U}SDaHQ(T zG|zUvgI4^8QpP~&2_lGN{2U~prTW2RRoZU>E`zj)?>`F+Oloj)T;?sIfe#d3@)4VJGw%%p^XMY}aIv90Fn~HM>M%jGEMidR6!#NPPE4#}C zLH1c)o6qFubm2V5#b30pe6&1dPDM~2HF%e=5mfPc7_^lx6TepZ+vO?eLx6z`;)mUp zHO$(*BG*({E8ocwdkemJKwHh^ZaC$~K=+|O^YogZ?~CDZMFl)$ey_*RzX7 zF7~jZEb#oiL%lZJ@lvUoORIE*Eq?%63{O}19V)HeKLy4fip(qa)lAUa%mG2migUAK zu6C)^Dt>E<1x?b7c%fEu0T+XV&|$KGcU1t{53y(1>a}2%BJ`?v2@|(IzE-ur{Y*8;h ztJX-jv76ENb-9UgxxC861)h0U&5yo>r}`WQ?{WS7X-|8P$(O3ad0QtZkY7BJuKMLq zj`*_HmHt&jjn%8kT{xG+{oKB{pB=B#QGJ>(CRkn3<<1goe4~oEv(PFbYVGXB6Z(ed z198ef(=K?k?~w7*A@kNwTRb5i1`Gjzt|nfZ5FfmLymJ298H=lX9{-lIDPiw{w;^hSvz~t$MNt(g44Z!2y09%2HHCbXbvI4hSkxm*?|Y67yYm<%D1}d6|(t z64NdO@uqThUai~`LCoMod9u0;OVwKGYT6?KII$7(G@Z6E@Re$;j06eY22$$dvSP5< z1Q#w=n$ZjlhWV6Dj@#z=r;*I$?I4>8|6>zlgAv4f3bfZI$Y_#mV%w)VYA)tOL9vF( z`&95%qS&u+ntp2&q1l7r1MxDS5=Sr7u1M(CZPZ@z5+9;GE9p{>^-VCcH)+iTddAh{ z8rq`eM$HSCQrh|=Wo{VrnXXom2Wrd?l_o7#Odg@!n0wWcyb~&o;1L{@@)@gH#d(p7 zyHpDRwiRWCuUE(eZsb4E{ctM?{|3tOf~23JG?RxE`2y{^(*stTZ8VBA$Y3tv05HmR zdJ&AH>wfYux&696nBS$k#nx&Q6|z?Y^`O-*=8fWF7|}Njn`UO2y#`=#fJRRTSMqq+ zx4YG5w28?-K|f`WDqe&OP*VG!z#`~oalC4Mit;)Py++d^UiuiG>ug2-Sr?;ubQP@Y z?=3frly=&AjCgWyT^vzqL^o(Yo=rI4hBjKw%sq8K2w`3`6QlA3)v~MYAbk(wbQ7(u zdsJy=7=riW?I#0bAx@3-Wprs2QKtF_y13M6MEAf#zYGiYC^XZ))dqUm&z=Hptf7$O z->Qwo(&56sN5RTRU{>!@8~HL`%9FfW=XR}X{kZC92BooH4;!|v)rfa6{p?++_znTf zYSt+Jv)asbU4Ec*=Stm+Q57Dq&dM@Ad)T7QR;C7>9ZfI2N436GUQ0ML07K`S)(dtM zMftSJb0ty&Y+s(fziR? z6*Qv-g?Fr@I-V~ zy`9EacGaBazyf z_bATbRJ~vZZ$$f6{UU(fTH!KzYXyaS$!ln6;&U*Wmoq$W=Pd){B~t8rR2OnK9CU82 z$d@>iZyHR@tyQkp<*^WV59ZyeTEDmabUI`^MyrQjr4cSt{M3Ya-CA+9E@KA0;wlI# zZQF2qvynl)6ocTw>`lW<4$#WMfC}5$$i$vqjr3Hjod#zpHD@ix=SnzPXZK(`Ts5X~KX`AzkzYca?x{EMw(U6Fg{Id3S{15p(cGh-hDn`Z~0OaUC%RHgCtgz)%4JMB0HxH_m zPjfIZBdO@7(glTf2p%AWY&9xY%W0>@2f*Qds>~ka=JlFP8;(2^%B!!>9Bz&US+I4l zEpP5ek8h)zPeVOt4ZQ7%5c;e13ax>^N+-P_4V^jHYx)-_z{aiWDsut6qS@a_?zLoj zidz|suGGyB^kxP``HsO`F@$M8^x1asM0zzLubsdK(*reYks>dxVW>h$FX?V(Xwe-o zwlBpi?^SOEGkG)L!IY80(0H60>d8ibP&CNe(D)FJ1ZT?v_snmrczL3j-!lC?hZ5e; zC%GXp34bDiy+4tIXR1k{;9 zY&3VpjPdOaW|R>ks_eaL{o2`=&~Ix|+H-jR3ObMQ=@VeWZwEg_aV9E_W7nz~1V!^! z?Es1zerVOSvhsgqME!pu(*N0dvxoh?EB=4Y-KGCTq#(?p_1KQA3c?|#86uPzEBLI{ ztU6i%6v&ECN$^iQ>JYzxmAr5hASp)IfKCXJrt?usq#7wY^h22|wdNn-6$5e_@p9gd zFud(STFaKLVPq-GRj84|Tj49*1Kc!F=O-|yQ?qyttsWbS!D@b5LegW3mx7wY7~V7N z{L7S~6e%NFwhW5wF~yH2t6;15#xuHziG~8$Lb;3sAQs_V5YN1EE^7r4E8IOi3*(>Q z!hx*MXO&G9rJC8$a1L|Mgk8E@ZQyCpAk%rd2b*MQaFRhswMNJV2jZ>-&)PokEWs zX9J9s+J!KDrz?Ko6EXUrwpAG2(`bKDz`{gI(D|`uLejzz6|Hok>4h(HQ>~)nArrQ8 zquy;itn`Pk*GL7m0`yY)qG8^tj)h_IRjnppG}!iG@-jD8(LOp4nKB#~FZ*TgCRbEZ zH%=<81cGpk<)&xRjyx|wK~BQmGqq|(ihLDjJh`sKVz<~s9}A;+DU8MjW=}Vo!9`Ho zzfmay;jB}Z0 zCt7G1?xzaWYe$(9bgsvxVhg$_2P7VEx#8C8a8P`;BG)qR^lBc=aw9vzY8UYpuX;4W3T6*qWX!BwYNaw!K? z!(7*(^%BVQ6?(b2-oC_e(-%41iZdzQbuxNtFWtPVIu>?<$*S!Gry*3+&)8T#NeFp4D!}7 zY@&Ir0!jXelGm@1;(Xh@K^qRk&)4A7aCu0D**lnxAj0_l0m=VTE_SpV`BL8O;H+a+ z*2=(t7~kgm=(5h+H9<}rv!61X&>-?I;v?w4_(xSYKG(z%pEiV#CSmq+PEc#ztIC;> z|ChO!o@<&9ieWm0l$^5~Ljq&y^EGhcw&<$mw0Xs3)p`_KC@59j*HsUq>O$2G2RV!y z`T9rTiC5Ijbpoo&sS1$$Z_ybSbL);?zu1+7H_uVLd{<6el3Ia~3GExTZ`FBRF_zfv zpv1dJX_5aL;L~jt2-k8O8gPL%m;gF2GyM3s5Y)Z8n3!psA5FmTagXgMKs@_bnwu@c z@Q4?Y3?#z-y=5NQKODw*N78!%AmBP_elTGyjDtB~_un|oHlTulrg4Xo7 zb>yyKs@!waKH7xw6})={4++p~y=gSFXH6OnauoKxzprwj^s{TagtOH4qR$u>oc62^ zMfI4%?avq$IB9iFpp(SLnz_GhhuNB%<9widfOt3hlra|O9hWxufRcD$FE>H@Hjrsw z_zL;^dLy{Y@pBp&`2&`hY@;?IBJv4bjs=9V8;C*b*hXNkH77g&}h;L0hz z=I^ZXM|lp~1O66Fh_#g}536u_mCo!>8I4?r3eOi}>`sI{U&O87_QKuyvaHDm6={7` zZ5FS(@~0SZbI@zAPqc1R{i3A`+5^sW8p-|@`OAtU-_x0RxX0RFRQB(a?~$W_5f?#^ zwXc->_c_u8>G-OZHk!qBL!Pd1=~9~8^ls~HQyze!mmaO!3l+2Q#=;_7r_`iY<@QfC z^FU~l*IjhZ2HneEFdFgSHL}n`{@P*UvjcGT{PfDI-AbVkUv;g0;9wr7+Q4Qee(jih z*3sC<5wO3&2eUgZ^EkM}OQC+R>to_BOFvEpScO@%u=+_>)Z%LN#zQLzi4L7d)D#~w zpNOjOkcU>4oHVbD8XYok)%c$^(=I7X1Xc&HUpr$#{}%dfL1oME{57+Q#ONycGkU_{|FNn0zwZ!L{FAG~Eao%+krXu;(?;d#9Ki5gHY@2+5;`rD z7|p>(BOxehN&#nRCMw^Y()+>B!3X9@(zN$4bZ2e+6d1aR~u^A9~7i8uD4Khd308|v0@Sy~dC^wU( z-V@H>^%_OKC<~vQ%N7Q`mp!TsCE;>-e6P@Y=U-}qyz=>MEb5eDwd6{icc3;oz+Ku= zu$*>v+6;TVH|#OdXz&3%zx-^44de~Y%`Y`jb&~2hkY%{#;4Tf-4m?m|lkPO&^-zqy(8W`Aj(gR$sdwFWQ(HE=jDC{ls;W>HtE*b?j_gTIdgs|FYDML}`Se2p)ttdnr zPf{&9BA7v$ba%qz-Ct|OFXM@Rpab_6ahwVQZlwEbey0h0LNs1Xwi*SbDg3o#`{`XO ztL=^^+y)wUJJ^q4vrf1WDM2y@5pHFjU^P3FOZaKQ(uvm$6V2CIAOsOs@CsBg z9Fg>H3dY>6mQllGxeZZxrcw%UMiu8#uJf@f{r)K2NqE(R z5Ex@m3h6yS8wS=GHBJQ&kj*wIJSJCmff@0X(f(Q!>_OPnLQ{C*6?y}Y;AeOYKYT=? zkf%lE*9{^c$j;@=T+S_YA+)=EUoW%Un;{&VyiqAHlWwFvjlgJr2GNj*K|!CPSbwhi z`N0(-|5IX=K5jNLG-ysC0yLk%LqJhTiU1V9wZseH^*3sMzO)qJXj*9ZOH%wIEb zKi*1*ExS)Mx9EPph2oLNs$#2Nb5p$?mDhV1#&$KUyhQSZ%RO%5>J7Y!kJqyuh+)0b zgzAy~4n`*EC-r} z6=}^hNEZEg0tjTSZYp}Sn5arcXYvN{(-U?heFRK$2Zq&cy-eIM%$MADL@JZ^C$hDw zb!C-)hJ?JQ4;Gtr^KrW!r|_a^*7+*gNS?#My-k(RL1Lg>#CAOv?6OiV9h?4MX764# zI}Kd&cHH7vN@j!1ykz)^3QHKSt$I)djbI(TR1V8Osfl-a5ly>)#gD3ewv23=9&Dhl z8Fg3WQ7R|nnYM?SO+`JsIMZ=LTiMkm_A_s zt_M#MLcQzK@PP1LxbB8-}pGIJf;j&{6gdQ zDZ>0pzaMw?qva!Po9KA7j?X<;}Ep+4sAIC#D@`8{14k6g8#zX7Z?r?SqVl(KF^@MB`zM=@%)erq| zJ0k@Db0y!uFXa1|nY}prA1nFtl9vcV;5!iNkZJ~9t!6t0oetp@_-UvE{Vy*Eb_Ln) zl$4Ds3TchyeAmsaTC&ne2yJg}V(kuWuPx?4oo}S**D6|Jy^$sfan?wZu7Z}Oshwao zC1ZWE3FuHlvoGNohkL|n^k8re6~*{Ipz;OI*{w?TA!Yjv&t*Q3vt!yO3Qs$T|6AnP z0(l74(}^FO3kD}&!=TAwEPY@Hvme2&f|p6x3a3xA zk5(IHlnTXC5zpWj+zJXPm9uU_*QX@edu&!5^4a@q6fC8Xh@#w>Oo($9? z5PpyLc0b?r(iKJnnBaE?G$Cy`I*&t%?QE9iR&)}K=fp~*uywz)nUU)Xs&w>n&S`=s z{A?oG1f`K4uC+VytHi#y;xS(3%@lt1E~b3ZByU};GOYX5W~W(ljsv&5XlmAPp@5ro zmt|b+S8b%%Rh!X%RrxpCxf9r1JO;h}4S012x>~?s3jaaZ>z@*$6K%o8iT_JQH@~VP!B|%8kNo z=DXVEazHo-gQ-$8Uf^SK9!H`^*0%OLtC7BLluJ@u34k%Zkh}5Glq_{L`FB+&?odf@ zqF5RS@v==pUO(T3*BoEueV2AtAoi`o>kJw~;caI>p|pHDYo z8cwHaCdG~SBWa&bbA!{3!Qkhbak@F2bbe5mU!`^3f$2PM8}q4OXVC05@HxJmSuBh*eD>7KNn!ItSytHc70TYfOD(5tsr;!LgAe6~d(asR zahK;g6K%!K&cSU1m8Pj5q9B{$hKD+PB>~*%` ze}fhp_8^*8o2pok+3l=&tyUwmB<>T|_v(JW*cE=-Zge$b4uHN~FJmM$?5!OYNbn+u+lgue)hh$YLcAIEIStSwKmuI(P;yl+v|J~YjTG`7mW_!Vz z+12pF12kJM8(`uI+dQJ#PN-bkofCy|N)O6!0RTS_b{QLJLX50MCfvMIN!q*c%0}iV zpX;j0UJ#ZgD|4BoM7Lm!JBRify=ZfCH{~VdWBsyX?bz*yj{{q$;dYft->sUucg9 zre>c;SVYXzH@uEgo#NrSCHe+mR_>u0%qzoKez1WFHo~j4Xk`Pj z56Fumw;KpxD8pt69}YS&0B~z)8>;t8;if_7$4g*zvlsY4XBR#cgRmOOM(Rv1&nQ2T zSEdb>;%N^C8yVTTp(LP8nT>#<^%SyM#fA#{@C^Q==FqCXVlJybUv4UnOLvsP;Cb%m ztLgKBR6{90ps7aQR{W$Ss5{ibZ2I_61!~nOJP&$n0_fz=XuICkY$Q$L%;4UQ7})iY z!dubh;Jopu9ATIbwZO!&s(EI4QM(nvEC##lLvaFcpp1sV&I?swl z5u@^Gno~*?tQhX%r!x&38qG^~ftU6We3wbIyUJ2d%Bz`B>z=8|nFbq8Xjh74xzR}1 zLV;7TD5MS1A5kpawk$*C4W^GM#+l>gRt9%5*rGS`*lefx!5)p`ncPb+F`8L~M_tax zGjD?Byoq*(lTXtn&LfIP@q~nyV+LfXVPgq#^RsZyS(GNVO*u2v2=!_+yT_tUy2;fb zXO_Y;r~=;_Rdj2moh26=HCQ-iPvh3lH9w=+VKT+?0;ro4EI+&3veA=bF0;@n8EHUV zkBM%r5%?1%}7(-q;-;M*Kdq~ulaj)ow zUkxoe$)e-^{7#G64=WftezApCm|YKP?5z-!6-3U#6Sfz^v0O&m)`XPR2iXFlx#7jM z(G4-=ce@~1fX4hPy{xev7|_^s^HX~EN;?ssz1#piP-4{Xc*9{~-lGnuG}(uD^n)SX z|F>2JnBZ1y5};Z|Heo&FO_Xo{u;P5%pe-Bgl~QTj=ynX)kTwq7qgw8%;$|~z?KF&R z;Q%5HY2Md^VmB>y=&g9gRHYoAn`wu{`OMj@8-MC*r_paz+m98U+1;A?bqf`b;`~Ln zSjsKvo9KIWc?@jG3{IkCQTX|6la_|VEuwFYFp6hT=qCyl@e*GBcxx<;lDboqcT*sT z_6?;`6HyQ=KSmv-jV!NshSLBr`!kJE*N8W|8sQq<56GV@ARN&nCIu9zET_YxsrH-4 zqDbZ_GPyJ)B{tpME65wQ;c!l-GTBDjAmbj({6J=DP;4vU&2hY^{R_=JtamI8J68e& ze%J>YxI(YW{c%DMtyS!+SpB_zK~d!cDoj-S%!ie+q=;{H1_dnZ9cnu%w!mdtH_^dz0SGa=0@EQ$%**v1m@gnSzB2WgG?Ss8B8=4G0AGjo%s}S zj7(won?KjuX?%hZayv?e>(R{LHBfOq<&{mgyzp$ry2tWk(J$ly8WY2+h3<^wUC}km zCNeRkHlo+y&7Nr4a%_M@fXz#cu{2JbS`GS6+)%NumVSJKHJ;W67n>$D8)*97CNTR^ z#hPq2tHqqD7GAKyx@v_R{4=oXKuH^Dipqi5LkWN%YZBkAPEgtmP+r5ScFJ z&Mw^C1BSqmHqf1kOF;T~sB~==e9Gl(28on~fpcoZmY)V(ZLXSjzn>mH=*Xp%Zu#{d zE4KYKfcWeR&G<8|5kp$jkNV)jA5szXw<%Ms_S48Z0T+!w)%(-!8fB0p+(+6!)AX}U z&{3Kt3&nWK_>9HdQSQ?0l3i$quH0&KasL+DVf^d~dlD8=&eOUzpQKEcDdY2qm7+J+ zf07+uW|a?71nQ$luO7b`DrcWP{u=-abTpgdQUCj92LE%5sGk4#w}|t9;!&EVeAoHq z#N9kLHK&c701u50B@e+*ol0S&0ZJ05wC*J5v*AQ+Vu~}LZJ&{m?yM!|*vx8E;{w7C zu!?{U|A=N#jufoJP?nAFCspgpALO-{Ibp>Vntp3%;c_mVS9qEK{oeS;Tb6T@z0o+F z&h4|5p&T~2obYRvdybPqepG>mnhws&vekGHkc}Ht@MsMaqhU$n(-nZ z%Q`8!et2f`?c87~AI>^U%Rm?zYGyCxZg`xUWt}`!V6DpR;bbWZR?7VYSHZSGI7f4cs0U>=IVvr>yR9hnQ8D_?MQoxjS+7ts)YPCe)9|Px3)1AogaJMMooZzhYk(lBR|U*2hOV)i zjip5}87g8E<6d;9>Lz!>@}U8N$_mXI!Mqv!YvWZEE;>f_&>z++W|i;DT5*>~+i<$+ z9iN=IYJ5Q-@nbhMSn+eH6byDRyi;wW9SNX_!&_)r563r(ZPa$B6Dbp6rOeOO(B#2d z+TS!%yzCO*$Y15{oYpbOEtE-qzvU+Tz=-FmBAi|E7L;ChqUuw*Riug+{HWTnwxxtQ?iVl`l{{)zg6ArRG7oOb1}5#3pNu26*oCo8NaN2m9md^<;~<0 zZr!ZO0j*aYrn$w{VS~S1r3^e5Y}?Kvnixzkw7q;E-J*P$-DJUAs`#r=)Kdi$pEBI+0uvQIsJK&Q&gsHj0?Y9yL&$GX_E@XpqJRVXG~q|v zs9naTR652P-^uGis~JL%Quage5-03$#xIxi!+XGXYyx`G9Bi;nZqeQbI$*n0fzl zzDO}owz4eDKIoz+YDLRAeOCx}$~v^1zLW(V_4}j$ux1-N@x zgV~8`zT7>WrIRg6>Z8OLu}*1ZN8^;qu=*!hOq^qyPr^b9DdZPupRh{Rg2km&)fR|a zoX~3>RB0#4S{)4z9rQeUtF6#SLuc$Wp^VdfL!sRq_s2om3*8gF{7|PE0vf(dd$vIb zY;85*4*SXXy9C@|z>a3V(*@Z)o)%ujCY8Yy5&0&;A8qkiqnQi~xsWvOE{531%%+2* zUxXmON89ik=K7xTG9}7iI^f2?>Fv)h9iW*2c_~c%x*ltS(@d8c2)3<;bh59A{RV>2a2v&Ux94m)6W{{dSgKT7wkU zHRnLhgln0cpgPWBj#y+`kCrQp1*yvm5a>db7yPGQ}fz zHSygVWk;;*5%OEmJ%97fdG+B8;zd0R>Y*2Z#cflKAOj{oKhVf;tTc;yxwJ{{4XIkaoK>`+mAD7 zLI%FbU~qHA_??A{1wFQNA2@ZEVa}i2&a&^;gdbDH>+LQV?hb19CkJSMP0X99^T`0M zEXx-<|NMmB_&U03neJ46`vnw5%kU_;1U6bR@6_g+KNfc_dq^gAU z{#-Uh`!dmH7NfKY!U%osrSXp)IS29k2nBu!9L7REYXfDehqS&gDw%A}_+mbCG)6Lj z52Z8s6!IkKAZ~*PN&yWD-HwLven)7xqoFXQj1TAvcBE2;!3D`pCTp7I<>y@1jJu2W zbe@&-i4F3g76B|a!)|V)4#gR*E1XH#w22l9&g6|`Id2w^5k>TR!SQM%xm)#7hE*iW zn4&hqd)1*L8W&Xn*+>*@EEl`C^Nd!kWPKV%g4~$sD$ie}NuYa5sAytnG$?irfp%~) zV3>=G;!$auaTi`HKAQ+IP=@oMkEEQ9W_Ww86?=4QvyUO#0R z>@3Ag|E!k@rW7XIn`Q=Rog;4WJF8ri=jE5P%FJ?An>+|+DGe3cW!LP}{PGfop?l#6 zx?5aDTm8UFuO=(a#1e;QgkrfN~Kjyjfl2@bt{f@ zhT?^^5P&ir+)9~1;}y?y^Fx$r2_>|umwi=h1Y4=i^c++ETmubu6fsdX@3s7NA~Y)n zRUr}hzno<_&K9VyZ<^N2ycuj_6ig*+PHGgxB1lZzS!7a16b|=;53A^cE=}xHH9uV& z%Y*zYT1{TAGy6rOQ9Nq;fh0HxOfMV~jqJ-BC4-x=ps-Igy>qwahXg_t>RN74##4k} zQ?e5{J~;-ka22ieHmYKo(MYyJnWFO)sf;l+(#y+rqw@1z+}=}fq|1$Vm_BLOkG)?>DxABEdKsIb9&R58ym{rn8r*Q06e!Hb63aN2pk7A=#_)5YPv zO>&~*d3sIGgw_17KJy`5@<^3FR4iA+)!x9&FIW8hLRgJaQv`N1o~roSPb!06u#%Z` zpo^ZRY`a@j;i)uT;zty&egOJqfnq+*XGJleeOa~KmAAOSaNej}djs~KJGJ{=#oAs{WUQ6 zw3>C4DqpP&+Q!bF#v9QFc+z6$NIw9dpt%MF3gMMyzxis`M;%>~_lv67Z0Ozrm4 zqbv$#S}UcTsB%IC=*C77C=xZ!QZ8PbU|neVMLJ+hrCRP~o?TJ(i`}cXbQ~sb>#_Wv zW-*D*stj( zw^{9?Q>Ko7!}Ovv6gr~loI4RmJgkUCws}Vn%IndA8zplB(^$q)W_Z)PM3+GcdY#R> zLy@h}G*9q`i_SxeJi}zP1kp=aUzy>)s!F*AG62TC)MdV6_?=Z%fx!&@%c|xsHC*KA zp7HeEKuscp=!Hqn~b!9{-5WMXx-89@QO$Vj}{P1}$oQbNqL)8@>IF;x5LiuN2?iU3RqJ*Xe zv>tEGY(gIjQBktfs&cOGq4DK>a2dZiJrVt!{M+YtLRGeH+fe;d;%lVY7#Jy0lN3(+F(rA*4X5ipK`*~F6 z+BRr^hBQ{hXb@e_Cl_-Y4Q@K8Xu?) z=1_J}esoouW*x+J%tbf=)JcFqpYX6AfvVbC#Y-pxb}XXRC1(PBD6aw+8_7dmZ$uku z8S~w0ZM2|%pw)V4H(FLg^ZC&MTDc0q7_x?o_>40A)Jk6@0yNj-QfDY-v_gzepr>F< ziZ_T?VFYMKl}spF*~^yl3P3P_NU@zI+>0^h_S4GAS(+x0ufoGqbknV%DJc=I6)8H9 zwWCxnF<^!r8q2g_K=CkM2<-8i!Q_8b%dM&#=}Q32n<_Rfzw(N$R1?6HZDbB<2x^0w zhM!$*luO>cY*)a-{#v=-*GC1by~G>gQr^zuv+!h7EZVTE5$>{`1Ly$hjes(1Z=#Qhy~qW+E_oDfRR^Jb zksSPIY>T#=cvNwmOSto_ZJPRcX|ZiDQYx3#z>1L0b4~k7og`%_Nw!fb(&4N^Q(gt2 z*+y+g;8$DbW|^Osehj7cEwkKnBizUa!$nG?vybjatc$h~@S;euz9IampEjukWagI| z1fg)K57Nps?og2K?hZjfO1g#`xBr%(qmX6Kwc>CMC1R9m7KpLe-xmTIonr3>h>d zPV=|P7REwd=lB9Qdj8KtW}QmjgrF$%8dNEKHi{xaeY-Crnrexz!Ne5I%Muz z#vv#_eipe-b*hG^B>FV0!CyB{#v?LXn*~IeY8%F0zij$ITYU6Z9!YF!6j|RFp8<}) z(z?pwhvNiB(lPs4b{%I^Z2FPJgouQ^h~6cWfnFjnWzL2z=`zK%>f*3RcpSoH5&_&r z)ydIFS$8un&rL)lstB?wUvUjK>vB#$+RdtOUE|>(X{}L1AFADo=57pVZjr^-jxhJM z8(F7=Q^!0M9cgH^*A0x!{&MbfvczT!nXRg?dX{t#0Zy^Hw~Li7t9yv{Xd5#QmvKBU zVw`pQK1Ld>mQn*g%QRteAAdFarH5o1@MD#Nn#cO0Vjs!j=E{I}=g@-G(NbG@C+Y*t z)oLW?ayHC z*@0DGM3`8W-j3QF*Us#$e940~oZ;2i$MGPpJyL)}ZTq_;#+58wSQvuVlXk4{TavvAUI<>|v<2y8 zZ!Wu{F#Ejl`r0-&qn)N{hz?&Q4M%rc=c95qVG_m^_!h%QyJ2LE`gN(>Is-=7nZ=+w~Ui3(Z}A z_iQz_6ger6r`iqqH7PY`q0gZ6F4;6Au6ow4FBW_8_FImwHq}_Cp}isu^&xZ=vx4x^5M15XG!9h*m z82A7bG;PjAB$?yTMO>ks4o>`*YFhpI=sjJ0y+etzcsCS7ys)ZMZZ;?DbOEw=itGSr z;sZ`i{%O|5pyN?%wgp|Vfv>2e^!P;~A#rV+I43}@tl@E5QK}!_vjIB=(3}#Eyz-T# zdOY{z^d5L%P+nyJ^7%|F#q8>;)7M;^ z>;ko5n!Lk2lzEn;Nl7~N`Y{#u}O8oR_+;%&zIz$t?7(Z*j zv*68-uAgA4Jo&ymh=&X?$eSTHiI9Hs6!mQTzGH^O2QFY+(HkKu$oZgdE`*3L5+H+> zy_JW0da!rAZw)T5MaaiOxAT1g?K@j(TW1kYQarOIohCH&br7XnbTE!NP5GfTcE-fI z8`@VA9(zWBnDgIb>W-^Iy-PQg)cwmK07zZwSYpRqL7KF1g2508l>a$VB+ zLUW2RYZ&zD=#%-u`V>mSo&~+&b9%B0^Fy)aIoLr~-scyhZsFlr6*-1XU<7zpKxCR* zQAraGA^-G+gZ?$R=yY*-O9LtZ;W9?vV?xVkch6P5cWm$IOlm^Fe{AK#_Psxk2|p1F zSFp-68!^J9VG~>h#ayS*4|zQrrbs}?SMQ2xM_h)Oy{&@^f%{z#vLt<{yv!dUs3%MZ zzEb08fR9uQbsO9K*1x~XfLZlW&r{I|i<%;+?gcNm4jbh_2ucvfSI{AKwl89s-OF)H zH`I2&1p^*vWClv5^Y~yP$GHM{*=788JYun(*KqnFBCq4h&{`DHD#AtmTSpJoLPefz z>%TsL@{c^Z|AL{nd0{tOTEIy!8jGt75AKsaTLYCqdw#857Ax9&;vt}8^KQnl*IVlu zc7O7m*O~xUN}GatfaGCxl<|yoBpyh$8w3iE>~SZ)zZur)@`fu8;14x4zfg%ggIugPEvq#ECIv z7gHdlkU|kdIRNc!nJsKgER2DUKi+|EjBHFSOl-_7%=-FHHwCkQwl&^ z;v4pPBh%+#5LTS=PQR;WfZ>5$0W4vhK8uLxwv#y7Z(D4_ARl|eLE$isg<@1?7@D8X!eY17@FpA3Fj+TyY_FU+&=6P!jH%) zGtFYid5Y_V!XebhXTl~GZ-_C=ASwz<5!WmE1$dadS2b0Yrdy@S=M7rA&JTL-&z#IR zpO{8no03djYrP))RI2%^E{S8jSk`Bf8GpeiOS7HhP||M8pfo=&?ggLuUIMO$`4Vd6%KDvvQgE5E||D$_IY2tXfl z+%pjKwSz7+g4Y5$S&w#*xc+EoLO5VDQ<|&Xt|D0P&Ca=J7@Bcd|1*~-wM15d^foKZ z8ga$1aYe%^8Dw2$Ps>Pynlr#k`!p=o+xJlf@5{BNgJlF|3{3X!zekn=q51^1b-R8E zLLBdO3onmd-q7*P{Br3`Bo_Zl!hS;BmS9C!7C2S@(Nf=O!Cs5hk-Y1i8mCuK_F-XZ zVM>Oa+3^ku{%jJ-C*ki?e%k5`#Il|YpdBY4p!(bC_A7;^$Anj?V!nJ{Ccs-SnZJdc(fp?@^5 zJkFU{-XAfr9H!4J0i!>Cg}X=Z`Dr@AJpPWxZTSl$UxM0UE3vk zLE}K|$Z?;&w0j`Bq~w)IFZNN`*9EH6chc?|oM*{8W7Rsx&zZ>9P)ZXX8dAs zG<(DV2Yt`wO7oEqkR_D1y!oZxaayEbB;Yj)2a7!*BZ$m+7Wn=3*9Oq+>5_t(yb9h+ zJhaLWcHPiPT;8Ql(C3U6O)rp&WYX>>)r9CwJCQ%X7#q?*yA&nAz!aLc`LvffqVN*t zXr_S2$KNuLFf6g>RXZ`6e03zK?)S_Y%Qg>cJ zCu_wyf~*~r8_t|1$R_9dgGlO49-}{P%|jvttD8N4A4q3qJ1mu8+um4T~_x z!jdZRP*5uvP*9XNhsFO6k}NmHmkG@|+Zk1W?I@S5@Ox?cMZf!@h=>7%)Anh3al_`= z(k%G-fNGdAo$l7FD;ckJzFk-`|CfGl23E_4Vn*VKFVVAE6|k|emeWsIuVD4ujX(i? zf^X6Xd!rLoWx7-xh%Q_Q1+UT<1UKeMF9)8R!_0`mmP7_iOG#l#XJimBxIdYE9ALZG z3_`}$T6SN`4QdSJRAzCn?>unk46Ye__!j5MfTGTGebpHA#e%M)eE&HUp)a}v-=YUT z))iYZW#$fP_F%iIfj=)8Cq>1|n(14H_DC_*gaVMK=aFl7z=y(QxMtq?{0@#Q@6YP= zR5RV9CJUSlpY~W&YCFs(12r%dI5g~MgI16-)~Cjn zjPc^g2>3Jxm7m8ncJK6)%#rO&ymGNqw3FnVpu;;eq6kbcpwF?NwlZ>#6+mvcNa-7K zip==Jfo%Ask}!A^MxOP7N4Csb*r7RLJthzk$%A8;;8uOnk+DKqR6y~xgR{E^j@gVC zD@A8nU4KPGf2sa6^ON)oW9sG%YoB9lPqUa+5uM@#XILHex9fp)AFI9Q>>7L*pj7lJ zlSg{3aAM;uBbbu3ykqWD)U>}6K58?{$d(5;ld@F=_itYqTsdBK2g7P%4j1o?d{$Cy zRVIy`iQ|Lrjq@>yj1wqF!Lyd0wqC$-~;`o8& zu1f0J{lWnWmaOvNCltMOek*VE?`J3VkZmcqMtZml$Gq&ALuIa<-fiHE3~87JwQ@x_ zj3yoV^-r|Y@kBRFg1jBb9a}1}P)uwNQz{l(+#Mzz;fYcsJLun0Mt`TpT+ZIRG<_OJ zsxva&5vm8&@6rZzCU@)Ur|LW6*=kHLd+iwe^#N#832EzM)6c8>tg9FU>E87fpllO4 zPoA{wtMcaNf$AqF49=IKK3qr)eO0V;rMXUB69iFch;|1~eo&wfzfZGqZ9U-%ewx0Y zLLLjBnpf1s2y-ar4jJc9r;ef!oP{7|^to&0o+MUVxWJVe?48O)aB2)Y#7rB66epz}_oB8I&tT%oE ziY^J4AT5p(#uahnrNG%osy2kul*HmUJ*(yJ*-U@3tIpWTcRnW{>r(<0 zHdUQ=Vyc$!8V`A_T~xh>(b^Jnj#JU?6|&1vDlN&s=U}uvAA*J_nqNF(`5}gT2R{hr z@oVI073TndV!ghz5U!eh~i%m9KDb3Y!@ackFUYw9K2FdKbwCzb0`6@}Lw>gbrjLjVdOZHa7 z#OqOz1&kCrjP`Z3K`CF!nI+285FRS;SdIGy&r}YBa68?U1mEekX|LWN*OGMtsB&_MZIU1BMs!zUbP=yhgJU)6y2;o0X@ez%h3sqklKlts) z#l|7CnX6Qu4Y^CiR|Q25c0!CxloCwY+f(M!ZyeB0BrSx^ssg%QOkE;nA z_oV5yiYppUrY!ZF>!)6D9BRqr-I#SexiS(|7mJ({B|Mapl5+6ONXBn-9>ufJnsZWY;i7q+`@28@$MN!tjI;e1(* zAUJ{*-HKF5;;6B(CDiJ^eRk;&PTMl4mr42pTDF!lhyC;{aiPXH*Ml&j={zG3x>urA zu(NSZs?+Z^tj3AJBI0fWEU|9Ebj_Z~&W=DWj^5#eMW{$j{7+;23ZG-Xl@Ux1e?HFnl9 zUqB+iGlzq-j-{r48Y4;4gMf$4scxVl$HcD)FKfoHd~+#l7OQPf%KT+M5eL=yD}>VW z{nax!*C5Fd^O1yVv8lrQHI~q#3kl$QqcfRQ@V=3K4A49=e_DaM(!O~nz04}Xn9kXy z@qIjK$2^7w{A!t>PU5ixzX$Sh7V;_qi6&c$Upb>kH7bzhO{Tt^nx1KS86GEo=sDuUv!ni=eC)CpOldO9A4Am%d{0h;)QOWVq(<$<|t z_M8CTwP)*k(^X>;j9vAepC%U=fn-aOg&0daK4WH4L^*EeP539ABf7`urP<^Ag37_2 zQlj5Bc6}t2vtsEgD(aX^bPCxfR1-e#jb8OC&~YCahM`k4*=;{+P?Q?a@yhjr-YDVK zEeR}n6cOXrl`^}UkK0zNHtwrn(QlIGW*Rib1=_mo{+vk=pQ74+4%3vb_a@OlA%6ZL zh3zpR>qtpw7P}4wZww=~F;PRbDjn)5ZS)*5fD-QDV>WW{n2*o_NMeFQ_2}rFA(htY zaH*2?gVEgE>^E#E?VnIrPZhs>J{j@Rz1rJ3K%}SEOmHr>evd*wX{U)-r76wLHp!;_ za3%vwvIxu8v+q)3Tf=h4Qqz?ad~yDrF;DqjcW@$zM2K9>rllBwdFanX*)v^NE4YoL?i9mBJ3WSe3q3Sa0j@r z|DHG%4I`alw#o$C7ilr@@FRz1D*OkvK>or~fYzrN;bjsxqw%8ONHwW$1+6M8-fR0S6wk1;E zW2`|(_K}Y;K{asw#qdkOSn<;O@pZe!g3m%53?P?FEe%&Wc@PMbhE7}fY1-4$(kha9 zE>hu^g2^_$DDU5tzXV|V&Hzz-5TpS9NBRFhijHcNgB8ViKO}~s51y9`Bz}m2OvL#T zqZ&yxgzLkqi;?=tRb>;+eIuX)9!q;iQe_|eD)f#QJejB&(f*+P3ZU~#YO-xB?^!&* zwh-Z>0oXn*V&!sIA!RKumP(FP$pZ5p8FNN>jcC`h=9dyu&?}dgnZ5B;ZpP;N)=ry^ zw{eynQgC%CiEGs*9XvElG6Wt5Y!C3HpT*^B)3LKi)mZLjYge(q@TP9hD(Gr7kqmp7 zy3dBR!vo0ABz}taU)je>DHM5s2rugZhu+?1l^l z4R-`F6dVBZn;$8)LFKe0O{KIL{SHX>%sjuh>%4^I;nxygW>)Tr8tR&Q>iNnlhMGbi zc41~AcF90qaaS#EQ$0CT2g?I=W^$?;v5zuCB14;I^7gVuvc=kVg;iA`J3E?a4OJ}t z*yPmWzQP!)r`zMh87WEr)QinUP1J4FtTe4WRBveWPlp4RM@_P_yDakUvX86Z^{0(e zW+@2!TnlBfp25Qc#z{;M5g=JPXc$shBuITBErqoDpM!s2;GyvUJ;H$675#DePl8cR z6&?!qFAe@(6i6#9_=kWW1?gW=5amGS`3e7zsDCcM?;@}Lx2mE3Qnl2d`ERhJ50O6w?0y zeyBL~W`MpvB$BdKVI1~USsTK@hGc|) zZHENpn;RKycV(2AC=6&or16sf&NzWwecZ@kzbm7*RH{%GVhgQ5>`?&eVmC54?#d{; zO5;0($OrwQD{16g7@T)y^io|st^2`%*yLAx#6j})H@1W8t_)YY(}s44=0BnT&d5Q% zg~5GS#`3DB#6ATS6rb|%j3(?`7(91nq`K4`{9RWtT(>ZI@5*2-FKU8?*n;H`dtgGk z|Bd!Ayek7cTz3xUuM8`mTNp-nWlVte1gar^8v(KKuYUEO_ZEinT^YwnTqLy+-{t=! zUX2Rf!Z5ijqbW<2`0w^ugq+)MvZB zw=m4^%5dIawU>ePaT(0tZN4FX3&Z@b42p&oJO+qgDf}~=_+!BF-;X@M;HS4REbhwK znl)Z8z6S;6hV^?pkTq{%Sl*T495(9y!*~CTyQuoNFhF-@(E3YhnutI_#ajQ~4kCkF z7*=;>44V>R|Gj^F%({hPeOCsF$^aJVbu*VS7;_wywH z^v!V>vX<*^p6?)M+H1(~p`iW?W2pWi literal 0 HcmV?d00001 diff --git a/core/src/test/resources/indices/bwc/repo-2.1.1.zip b/core/src/test/resources/indices/bwc/repo-2.1.1.zip new file mode 100644 index 0000000000000000000000000000000000000000..3253da62c3fdc29d811071ce11c696aba20745c4 GIT binary patch literal 70133 zcmbTc18^_R+U^}&D|WJC+csA4k8RtwlNH;xZQHhObH&N`Jo~&~y=TAmeS7b7rmAPA zx~8wX`ktw~r+?jYQlMZkKz|<;@p0n+xcRRWI1nz7gRz}0y|NNC5Gd}ogUa8<*$oy5 z80-lc2nY=IuLtG+skx;_L@4H8wSO0w|F^lhjghh2ztZp|afu!6*P6ajuA zER>{_N0La&JmB<(sC&>*SKF^XCO4c7Fs>z6CLJA)$C)>_>m&Tr*V~?d?w-b8 z#|92gZuqsbvwJ>o_xB&~5BJ|%T6KK8b-O;^kLNl{atJbc+u9%A+4w%sIW$e`bKEUH_)((oaU=H`zSvY_Pl=md12rW2)_@T>O$nAS#uUnZy%ZKC} z(vNwMxBP<+?Ud3JJ5>jL< zX}`laFWL_#3f#7bovt2NmUdyc4lBN;6}}@>m<{~GLz|x!Y^jPArbb#oIlEssn}aiv zYb7EOsEi$=Ek@*5AKtRwIL?$oRI4H(msg10z4x^>@I@C>f6P+${G|iU(KDIV(1Q@o zbtDaJJ|UN3Z7Q~*6RSdzIXA&lj|(bl$@U11?Y|>k{2NA0PZ{U8P${{) zPZ6a>m-5Yoa|qdZtPqlB0;<`;wgxzC#(S4KxhN5lZR$?1YGeD>@fgtPzhHs=sq=c$ zn)?E=E{?xs-J7jzv&@*-9}iAwHTU86AP@d9J>1bcjS8S-=fBC3#+f;%5^dXVLz4U3 z^5AM#8xGehS^XTuFAt#&1^u`V~MiB5tRY_ z6sEBGiRCJ=-_S#2D4DtX-svtD^I?n=BzU^)4xI3o=JOi^=_N70V7ar{>~f=CBye_n z><&hKa<>5PS3V`e$E#3wqEI8Vu8lfPwEa!8D5i9@3-|;S+w8p0cHK}>djgyX;?bO$ zyhA*4CN>bx`#UANxO{!zGlusB;g5FK3O*aZjep2eLZ4|bqBt1M+%l66dQ4txy zZ)1fTkqG`D}uk1_kU-z`*TOizXun!zskSOW{m$Tn>F-g z6Vsh5re~(7SXf-9=q74jW@yBx#;e7T0)Y`s%f@`B7$je?sB(|!x8HI@s6bGZ?)lYG zeSrdh14oU~>k%~!(RBZ>aqGYB;Ulc0zWrsb@vnsYZ|yPtTYHE9Wp6h*Ej?jBEhSlZ zvbVSQW$*HF)tAzwP1do=5w{q&$8gsmhJKo!_LnU9IMYWc2*mPB{CI$p7?~J)DpG`% zfW?p1O23c&_{q*kgO-o{%|YZY7KS0<23dsSNm71J8paY9v`{XsVYHOuB4ZVXeKyEq zKhka%=HTDIe_Wf95ot?|nnb_UFXr!0e(;#+x%I)5FYAJBO{FE|q#(dORl&(%ok=V*n4jzyl z6(cy!VID1MS8ZR|5!nk?bn`Nw5*k<&_e#Sr9>cPu(>J?&Bw8s+6a zc+;;XNa`>&>!#+>@?daqDQC+c;MSDkJt=%I{4d^Ld|3~?Cte&M9rOpQhf5<;1RiAy zNcds%8s7S*>5Fu2y7pe8?;xk8_Fnfw&%d91!Hp42 zw5Jy%lVlw|XOvVIwn%Boe-g21stPDA90jb%>ecXo08c6EaQqp>qG=l3 zUR#^(sM}a{@sIWv-w+U}4vtt_ZJ8WM3&w3bVoFwuZCgviB^i_;t^I+?!Y?2Bw)NqF z$b_J1A)8Xw*=!>_{b*4IvrHnQd~@iGz9qE_cca}WCbw>S$quT2=7OfktasFy2agqo zu)ADlM$9AF^{mdOf<3qz=;>v@0&+SU_@nfKx74vP!Xo)xirF?4`WQJC$HB)^*jfc* zoyVNLItV+FphgPI&w^x0vVB&Cb;Y_w)nStf&{B1rlgUXogOSjsTus-!%^akK7TXX4XN&{-`4FGg~> z3q4jajcI>PT?5^bvAX1na|rOq45tGFCx*g6X>ywyL#ZSXPbYP=859$USg@~s--1u` z${_Aw3!V7~Ew_cTrH1iGrdr+AcN+hjaXBWj~m`@ko#MJxkm>G__N=YonSp z$>muPVO00Hx!JV3%SoDne3XcBuy)7-fwNQ2OzH?`{_4EIP{@#hlh?U&THc}V)2Fl; z3fC=S7&RgPh>$8-EAeOh; zL7Htm6&}wZH*}Rbr#uuF9Wj}FJ8+PhPRX!L#Q5gMDYGkOmcmSFPzzO~qivvV4wQjP zZ;AGVAYOCdusz|KIp=rCjQVc*q6Sejmirx)dPf<+gIrhz0u>!wLXtQ4ZE zz#Wd^!7VCUO*>^M#^yvSusBPNL|5W^I~?1rW4||gU_}jKx6d}VlUg16p{*63RbP8Cusq>7z0+B%~_!BCns`g+z?F2WM}c*KNgx1PXca8G9d4c1u)3 z$F?aO%3G&w6f}rJ+06|o`(9R}y2jWHCu6tYGd3;iPKscY2Ueg_P#6Pi}gF5zyGF~JixLtkH@Mydlk$I_qKT(bJrML0Mx}+Gc zvG{NGI=1iHyLypQ@>0P1KdGljUDWEYU)V;eQg>lQh{>|nmn;PqCKAkR3bSsvIeIVl zaN1>gDuM^7?nc^&8Ae=*MKlX)8kH#;uh#566b>o-lt;ComQE#klW(RBWljWp5H3ZR zktyx>M)0T0z}0B(%L!f=B#GgWstQo~BGo~0SH@C~ijqTeD4hb)y|Xeha@0SU9K*Sq zx7tH7Xl(oSvR)w_zP+rUwHwzSGkbSkJPbZq;3qs%aZIOCVaJ5DEpH0jdN|57Pi9pt z9Yacqj~hrSoU_#D5)Ww*N17_>)zCj@@s)oP!K$p6N=2bx4BDj8n2W`e@K7wqK|)~3 zQSh%DB_u6V8LJ0bWLGq<$ObS6>RlF{09w7Ei}dQ$%7!Hwz5Yyf$s1{7uxLt#S5YZg z2FT`4m*x6+p!|VYW07*vU%AX)yR&9F5)=OkRmrCcEW1 zFFb#C(qz-~6lAvzOWV=07<o0@Mn6*eDT<72Z*o6XKp1Vgp z!5D)-1c?yofo-5XgLLrFi$W|?!wie(S(o!SM%Nu|7+SyEOdaMj;K7Y;&N13~R%h!W zbA9T7uwSct_)xHNUTy~}(kOAwD>sY9dv$)cKG-+SRC+L)w2u72WAsiBTI-bg2Ep+w z0Mr?!*r%5(0}oa&&nV>)0=D#NesDMu4QHcHBsBGoCpZ{n87)kww+S_HZIwBBxnp;< zspu_@0}9PHBMtX{U~9F9s1`D1sLV;(j7_pRvm)}UDStImlbDQPbkJXe{&w!&TsB7c;NJNZEqlcE8b28;nn<51j)eueIdBdi*T=yAeCG)q7 zt#>Ml9srmIlC9m05gaaYVN-Im(l$jckuqQZYETz>%cRF6=U^^ngVScK*`=gLPwTr2hpx)@1W^ibvYr~n)TYUt$Df)vF2ZAX4`}cPE>Fgi1)pfza1 zw9yQLWH0!LFCzA?LYP;nZd@Q&**s8s{8)Hut({iACXo(;gwrAfsSYqcGepe5fhN-` zg#(CkXHO`A7NIa*^QI0Q<5Xjdi<(U%%K})@(AmHlXZNJYXfqo_GC;NBil;qOaoguh z*Q2#i1>;QdWT|;VCfoO@mxI$t&K(DmjZrcY5z8%207zHC)9enObga(&S76H##GszF zXi>+~IxI=c@-z$QcZ$*ponp}^F1%^h+UwgHcsg?UDy$rd?(pF2z}+_e?0I2pM;NXt zSXKhpxtEw{4V63Up3!)X>!rFa=UO5Z z8=#2HIE^g} zY_liIEO&?>maW?>8U%I&f7O)_O{mJvWvgO>mSe8d!*zc>> zk$+I{)Wu3k{0euy!M+z}d>hvI zgdf(7F$Cv;K`JO?<$4oR6^=g12;epJqqP)wsNXz-$jZabSd%yyEOid@NMej#Gi{L3 zWrY>uLyfLrf%qVFwq{e)W%R=rC#i~V>mZ$zbF&X)=lIG;dwMAkQYY^)v<*s5o3GaN zXSE)V!Gp(HHm#_Y5F4Sm6kYDt`)WVaI4-~{_+?f|Bzrb1f-2bfvyF(28>WDmz$v*--q{JP72JPu^=-tMiw`nchQ_1%kGKYnK`- zD23*xKN>+}M<=gz+RFZbVhV6}gpTs!u0j6MxjR(V6LcJSNw^gwpar5q<`?@E7?;w= z-sF$))@&XZS~&RZ3@&7>zK+&51l5Zd3qmeBoap``fiPUxi`xaTJxtdIc{MT8aHC<--Vj!g{FcLMZ5L4Ur?-mLJ2vr2`DH6g9{?8K2uN`!+WzO-zaBQe_cJFl3) zHN-9TIitvD)SnwK@2KS6GFU|wvaGPWohyV+V|jlBxLCItq=U`cR!}GY!Kc|#%gxpf zF%p`}hxQaRN2f73VPEs|N3=gDhZhUn7F|M-niBcxtT~I_;h=jit&0n z=y^N#_5CUWusGDwe~hpAUT?e)uy?z>z-}S>o`-h7eWi5cygZhy z-E8^XZ_Vt`HgDN_bl)!9eqP)L>b`0SS$c_o9o&4bw3fer-fVRTdp)sxecz_h_iTO4 z)_Cuh1g#wH5bu1_EWSTA+J4AiPLL${o;c5Xc{qdnTyDMWea?J+p6I*=Ph0jV${c>= z-F$zHBsjgH`1-tG>3U5qKMA+fe}nz`+{yB}lp;8@_=`H;$83K!oaTJ3&iKvU#dMxw zgMWqCed%9hEuM709Qe{!PWrwR*rIy#fBtD*TsP~v+a~aG__X-i`M$2Y>3q04v;5}B z`dEPcI=tBNl8>d2&v{!$#25H}O};+*2LEU1bTpw|k?CI z&Ho{Q_nR_6rlb5dNj*IVhyg&JQ~aVCo18IZV`4cDjz~#1DGm!JQ$fW}QB8(aR^jR} z?RYIhKgMz6!kf{9~mT&!te6M|I2+}WLvf_@IQYy>iVbG zLe4-yY5D&FyJG)W?!7-DGtrh_M`C8u_)a$4OsOZbwmO96*M_!^WOD=s(qE!}qgt|T zRWlmYl!j@`%hOO25jLk97yzX6hjoDawgX5F1b@^PfAY_0-M@Q(lJ5BG)RaD7E;?pE z@Z6MSGIR6!x~a~vM{j)h5+g`scMxztpMoN$+MoJ|64*^f65u*sJ`&)*gNn+p4wqEF zP4gp4__8>U{RsthmwyTye|Z&CJt&Jd`6T1NUt8ZyYB8B_cUW$_r~6d3*{_fu#CY3J z^#N!l%k0U%o`#NbO!Dp`SRrpTzAm!Tq(%t7{;JB+iO2sMO2<*9!}Bd~IMTy)=g6vc zn(usGUzyS1JFBE;>tVJ$bJKdrN}4E{uPJG9YA`Q}?Q4y6i!N%0Jk2si0d`iwd}hmsrbF>oNp zUG+?IN7>@6I$Rp-0`?&-Jgfc=YvW9NXn^(e#X|H9wjnLtu}NWON6}&rc?ht~FvOX` z$()3*wIem7FDxj>z)w(y4Jr@n8eU(Kl!x>^w4+)X059_5D0OeR^igmgdPC{f?3ni1 z&4d)(fLeoDg%CW1986He)sUEd3R=Ydo=i5vylKwEyj8wTaCnfmsbcKy-dbf5W6C@B z9-b0q>SGrfz5;#gb#tI>$H)Q~%wesyom!O2%P~Aa&z}*TLDqo!4q8P1T{K*y^5JEw zeRp?k0`SL|j(1#>3ouPM?e)-F>U83L!o8Ttk5s=%^5>99OWCP>JryiM?<(7H`?f14 z+E(`B0dnlFl9yspxSHNsJYshHxRpq1!VXPMO8ey+DK%5bUSqbNqr|3YZ#)08Hi;Nq zd`nRFr}`J*gLw#cb*hMn4`5Lq1h~a3;W0FT%v|g zSMi>=I5(&ugXoD{#Up+a=MB~Jwgzvj4vL}^|DfhlxM?b@B9_Fd{X$(xra5K(4ufZc zr&9FBl_?XyJZ{*FN-@r)qG$PAMsA$*JiBV2tzMs7i6 zQziS>&S4C;kC3(!9A+e5(elDbWS!svHk@jbA)yKNGV3qi*5LsHC zKRgMhN~sTR5R@ZcsUBj%!eA@%GmN956~9N|`E&etR5c^BCQ8Q!K*uu$*Qfg!p|Kri z9wVOb7IQ3TAp39lOg-1o+H-UZ7_f3}12Bp( zAVPA4)Ut?`EApQp07|VkuYG24vPGzEM&#PL9ow_Y?-Y;$hdzo2#7g|0p+;XQHaSD* zeSKsEWp864ff|1F;E(s)z~UDO)w;H7#`-xR`&Ci}3MH-t1#2E%$0zF1*Bgy1Wpj zbX1L-oN$yi^J_h|o~2}PtKNoqtNWj?lxkEM+&6Vn$V|WBA7_{*Do1wa2LWhpI{xFY zTFe(i+9pRY=&!`8aj@%m4=3pummy!B0A-7{HR&l#$ENLl<1xnWLJ?!ligE)#-#ln1 z89hyyC&cLwaF!{07WJjn&60(3j2bEWw`ek9;W&0>^@yjZ=Du9}9F5lmYV+D`Q|STY zYsRjZns(fa+?^Z7G&Y=taa70kk3&Qxj{G24OnPX*A2b7IlG7PEDJ7VGGJ zgoVLtp=dUBS}r-o6v|7IrtIM8mpg`@)_Ii|X6@Jti__bTJK31stax3u!bn-Up`8i` zA`Eb@C)3kdtKoiRfdcjmC18u zg-S-FVbm|)DfN7%lX9yns)o&5(m>w)`10Lx4N0;C+$wi7((n^MvYAYpyhpe6HD1uBvfm?R$CYUefTl?tJ}Ae3dRe zPsQ%7V8~T~9+j=F$DDua<}AR+t2iRryO3icNrOA#cY#@9c}0>n_YsM(;J5h<6O%-)$T$y+cW|C`%5dqnC z%=#tDqDLd#@&U&+S=^eIXa1({K(Q~{eg7qOBE}c} zI8N2amdyTlAx{JaeOQ6IAF9N(99$$8!WMTR5P~C*3AZ+AbA;($aLvyquc&_00g6co zbg_mcd(mc8?>KtJ$~(%85FJF*1EN}=0bt!}k^HVQX8UzyV~TPANUWdoUwnjcA1j1D zgM}BAIv^ifP_xu_L{_Daz?GhcW%D`t8{QwvSQvXYDu{HKqy&QbCZNCRjHa2434`u` z(AVWLd_gtarWy3W$wVp^ppG2XAe}6+SQ5gusJ{NTux$s6yn_imY9-2QpenA~d#E%bD%xX#5M5}qXH z#G!q?=JNd=2al6dOkhB_UFJS?!BF1{&<%=i7%2~wH~dYc!(L91YYCU7#@@?GUS2Q8 z;+3&%r23eiqb-HZZN|0Y_oZW`vt0AWS*$~=VPq$9mrxw~b%!ED?>U4m0TU_2gvPEV zrf4fpl&*^Di%RxjG~eWQE?JrFQ|bBAL!d8r)RzuF{wnu_8W=U<*5XwcF0)DWnW5I_ z&JoASx%Akpu(gJX5%8qQMOM55Kv?3y&d)eSg5zl1M(^T>F2V7UP}Q+oeYzwf^C@}V z6LFe_=kobRI)+<;mzWw_urIkZI1cVB*yEItbkw0Py_K!#wFYQy-HOAr^L z1Z}5zhFh4}2J`EJeuJ}V&dPY)HZl#gl`9lqNrl`96@SVHlLd#RB!+h$rsaW9SJ^ zL67VA7BM+5)28#hUqHjJKAQT`Z2hw4)c`WDnkBCEZedIq=yXS*?!z-Zc)%}m+E4+j z(ur#I;bZPR2ku)tXkL7|cnagf`|Uh5k+OeEIVd)=a1kj#v`~yoJ3EJ$?)?+!MsO+6 zkoe>@a>r1;A#wYbhR(+N?!jfXo-~{h(?F}!fHDm68bMXVTJ#2f<~Wn=K_A`2#un%M zN|vb;>_On?M`bz(EIei9Se~AlwCzS!AKlcja&Gw%V?cDOS(srELEKKD`EI0g1$kMm zp3mdYK?bg&LQkA_jBULH-h3(bDfF=X&BDMC_Ood}jTrc>jpojN{d;T3&_VpldHv)M z31A+9X2McWml6Wdgn8Ccex^3~`Q&*cwH|_guWLQ@w^ux&;9OweE;Wm9Sh*kQ~~x8E(JU z3Kfp_>(A}o3E2vEttGnU)O*BM$NM}GoOzt>g?@U2`XQC0zvck=_|z}cAzmOZryXor zIsHksgaP4Nmg`S;&fEmBPG1m}8+q712BdWMNdXfYM{Au6SGkUfZ{VJaIlHg#yGfd?2 z-q(F#u4PaC?uVky%AcpApThP`t1_pB_E6;p3 zWxSd*Z2lbQtbK6&#Nu7PO6SBptPzsht{xjHI@dA~SWA-G>O(CUEDa521<+;;#-5ov z?QLnn!sl%4E0fLal{+#9R##wTIMct^9gxSe@NY_$REsD&F(#C>FrNtO%NQZiQrzQJ z++!nFda1%5^0oG#MHeSW%1u~jDec=*>jc;%A`D%2$HwvKoQ2JB9WiAl>WrQi?kIuX zfCU;3M}1cFf$*j|pqu>&rCTg~TD*q@2LSW*vak+INFN4+yR`x?5om)QDQ%DuNJR3|wnsP`WCYtW$;L zFRVnThrd~%vwGUiqpkp=vJBJ#=J##~db|N1*-A%P9cYjL3umI2UI4lR5|N255HKI{ zJ+MR!H-LMqhwXXiTWa=B;!GH5Iz`~0BlV@_CXdw}SVZQWf)C}beksRRbuIMrx)VhjEL(%NP;~&o*tWn ztQ*|Kl!#bNf8gOjdDIuLaQJz_D72W%4PDEFrgI?Vfd^r6%O;g}&hQ|HJ4GxCtF zUWJ#_gNz?a%m}G5C4`#^hfnjKs@M=~WWc@zX_XmLG5XdXSZ!+CHg16UDphDvh zl{@P-wcBL6O8Ug9E4C)8RB*W|SgsT|rW zSn|$5gJ)f_oQZoPQ}$|u8V7RhvX3t4KhqnDpyU&MoxcrrAmHC@G`HC< zhOI_6v_Td=89sJ>lo=j$S_BsW7VySA-Edu4Egm&=9u>v2A+4LT80?u9L;ar4!u(H@;0pMhL z+6?^)ZcAi+nKofjr-Sb0L?t+~ytoR24ezJ-N}UOF;RzlCyc(~(={)$2>7N5+Gya!w zqR)(;hhJzOJdGrw=6uJf2yewUhALp8&SB9oPhgWJYdAc`$(@bJo4Cr>Ky!xWDOvh& zdsxK*(M@}Aq1;$W!mMqV-)%Ix8SJ&Pb>=1a*h+MAn7=Q;J4tXMY68nUSHz8QNNU0?*Ttj}M{Az;Jo)(Z^E!i_h z9lR{}@`vmjDEymn@+?&OwT0_|HDd8BO?&GusyQf-SjH`Q%ua@9>$(I-KSEaFD!39~ z*COm$o4*THJeD7OAm&3^6rWP*#3*TTP;^qN7|Z92`?PI8G}-Y7bqto=5iRR*&t80b zM82T1Q`4xhZ#XoKEloHA z%5Sd7F@6oV@QHOQU8i4@6y{L@v%?t7b=z?>f^AyMCMyTc*uE2f3L%)^?FI;gQI4&x zxQL$sBDb!kknk`9)uBB+pD$MNy^D1#rs4M%pxckiV&$u=rMy<)pTm=@82GYw`Gs`f z?FUDIUNObSKSNaQCDD{OMFo{iFc4g9o(KH>^nM>zA@Uq;BiNCF+PFSxKPi8`{9%YJ zd@qGL&o^V#HP$d(+J4DKu-A;xtqfIk1a>TkS2XDy_tnQ)q0p=6kT|i_JTMFmtWu%N z@_6x-!F-RWusT=2aekcAdb5S$zKEbTJfF?dQMDk3`_&Zfhiz0 z_8L0}9&~5)5jnsL1a^dBL)u4ds?{rs!b=%;!kk5D-T=_+g+Bn7ePyTSJKx5C7*GnL zzMj(8Yxf6TJrZ5SIKmtRW7Kz((BD-;(y6k4yq?nxxyv~x0}%^w7%H`8l}AQ^j_(}b zQ0s7sJqiDc7^Y|HC;@^x{(;E^{BybglhqV*83%p=idwOC&xc8hT^}czYHP2coJ1Jf z(i~o7Omenb#GAxuH(hmA0L*YZCn0!fgVNjn4x3v4SXeZ(6yXRyAXfEk)2nPBje%7i zS}z#5h%g(d=DdWAfz-we%!+ix9*`wn!;F}qpT36{NEHn9ks82nJaE&*l&&1H zYbR~6NM?7pFXtA$htA`T3otTl$dCL@jXVi6u$(hH(Z@*w5@@-T;8Pr7yI$1kTg{E4 zzO=w$qKcoBuRt{A;>t#4H%gy>i1GynzFplbKEW z(-lgkOza08wvkiAnp}6U+{h_!3+oXw9 zt5AxmW9*$0Xb4m2@`MNNTQK{Ya7mz!{?^=%#Eib*F0@rmvNgz`{g_01?{$`Rc$Q*< zcu21j6We*pKNnmFlk>w?a+TWQ5E*x-dwXtrTy+Kx?48@C6na8=Jvnm9oB-O__yFY} zK)E3{MEX=jv3gLbZ+JyqeI0VE!H_s%cZ8+)C;()7QpfHmkyr|!^ zMfUxYeWDC%4|(T&N|)y@0R<8J;Z?U-5c&wiP0C_w;Rq*a+x-ELrMdH=}^9`SrhoeK6dMxm*T6?+W@`4R-&!`;>w|N{4w}M zyJ5aE-It2R7%mnLzbDK%Xr9?N-&qzAxBXO-oO-R(Ie=;Uc(}sWr7kRbkI&bi5}^zf3iW$HeS5`xix!J9utTq z&uyV3<5i5qHDu9jfu_kvQF_I!-8(_+*mm1QjcIwm(`4WvFaIND#bD!PzAs356JqT> z(Ko@q?a&UZRSPSOvXd8=LSPJOiQlQlPv{-1T6N*nD5|xoH*ul26Fx$LtI;4L9?=OF z^2Lb8Mhm^&Stq>G2>Ic2>RPV6VnMbfdLsXrhE`zMRpW zkNKA2ggKpY?2d{t%)65d^MsHrOcBcoZ-2SlUZ<9lAK9yoR+kO6M5f^@vLGCCV^7}x zRVxD~>9VtC5q7IMJsK`S(g$QTG6HgaNR1A$5tBa+^kNf!H_%Y99Mgs0bC6}CA`HI0 zGZVszs9_7y`3{NX5!;P(E(@I{;+=c1$W{H`VDc7}>(b-SuHzKlrxwTLF<*b03H(Df z>7<3RO*lijYZZwbBwRDiYsoKZIAW9dFa}KNoLSkCV){De<2gB}Y9D3*IqcOYcIBAPZP$G3zSILj4>&zfD_XIEHZfP^5CEjM4SK6beHDTX2 z74?aSiC1A}t>qFK8}j~8Z4GZTSM^(GZ{@-cvT*tLoXXGW@&%_D^7`>ZeWv{>?w)>+ zJpEV1Uh0*Czcz=Bo(jYP{HS^S$B1)zzG>#7=ou*juy>t*8`#Q0BmlkqnM%*;A34@_ z)mtuSRd_m#x@Yd7L`VhGCLVfKrnD-pSjYuOd1>9(qO)yIyR>D$>wvso-BUR;zNNQ% z2YsPagui1|_34<-ERpdnR}VN!%l2` z0C4_mRWgGqGPmN$Sjp0u`HMB;K2h_{M4qZ4&0FMPl4+{V$0coUu;=tK!z5))3MPEY zGX>LH0oqiGLR+%uc#o=KV!jba^(#d}{RNGRx?9Ltw|VgH8;w#5`)3B5ra5%8>f{QILYmIz}Lg}5oyCA9N8AK63h&(*0Uuic9wQwXaAq8TC6NUBU zFzYUna3aL120oB!X%G{x(7w}8uV`37(bk{`1+4jSLY}kWT5k-dB6=x)xlCv)LO&lW zWq8)%D0LQihyXXpWdc%qe|cFj4{#(2!(4xA=aH4fn41AK7D$|14og%w$~QgZKG{}1 z-VeDuCJ*${;sx=#V1FR%Pt-uFsP$H!oY6@CFom*R7MQ0|6yDeVgGL*Fbo(|5+El=+BU&W)iPBXKrVau)E3%N^=U z{xSNzU1mpo#VhV6e-2Si2oKWxQ>g+JWj_D56igkj`1QdU^+e*}k*v+K*Pofy8bIPc zfgryEbhC3R5sclS_;X(o0E?i{V=M5(&x^kyD6=0FURA*xz{GjVJ^Em_5GCSfz5JFz zn*5BtfRq+&rS<#=;1lnK56E5y7Ugik0B0o9rW?ZvD+R&QT6=Q$ah!5Aa&mKsw7|AG zR_2cmScRK-&wM|)zLc(jvn)gWMptjPF=-7C)QX|P7v*q&MF3y~UH**I71zwQ5 z`>q0cwrlsGqi_%f+1EyPTC6W9xEC+*18qp@-!?KI{mI-|EUTxA;d#5U`;cg&=(r`B zBb3T$X^*ZOLpAL1{2>@?lWU?T>{>E!4UVNIIr+;dQZ+!cAt-}a+7QE|oK@@hbWwLF zoSa+wV7qb?njpIucU5TV1wP#<(oaHSE=3FzrNqKktsSg3Tp0%U0?5{sm z6dMWUP%ZCh>G`%EQCMDwDr(Pj5-A;C5!KWu2^|@%H+Amvvu@8PC=^0Zwo7yG6ty$S zJ`W-EFwuZoOC;$FCb^7HQOq;C&Q7Xym=L^fF*xv7IF`++rrR6#!2xCW&ciP<#u$BQ zoXX>Ec+#@F%X-+`pqE*LhtsEd+ISW0UE@fwZ8_HVKSjRlvwk`gN~dt>v{x?DH9-O3 zc8IkM--gtS9~)Ft)k`8op7oBuaS5GcOk^Ym_QTNadq@dDwFm9s%jq5Lby9GnHo8Zn z`d9)eycC}Y5OH}vPW-m(kGk~lZDf+z&s2ClmQf9Zz0m9JWII?B#cm^Xj2(yzCmilJ z=l%#iB0d+WrviU;cqJ6?eDBJ zF=Bzc(qb^XfTFJR{35>&?toy1yAxCH-WYz#!~T}Ae{SI|tzt6h93I>`bA>A;%R=!j zS7doX-7`%!bU-*|g>S|vv}R!ZsfWS%Zq$T-eHNY&Iw4T6!O;?&4a(EyR~fgrP~5~sV~dh$AnUyy%>Eh>E`wo}t-A@ln>rhOQofyche8O4y8am~qdKGP=o9n(v#%OkGQbB`{X;G%V3lP}B@lB^lc%^8XasV zgf*2+$wWae*0j@CrBVvSlk;jht5a+SaRB884l|^|npp?o%Eg(J5(;@JzBRwQp6bC! zIpO(WKC71wskNo?)ydO(1TlOsl5XDMI4pN^R|*hO5u?kc*VD9^BBNgfe)K1QZq#es zE0$ovOBEP&&uJkufLb}#79Fhud2dB>e_1Pzk6S_xQdW zxr=73NdC@PIrd(gQ>6oG?W&={re;or?P>L%94|VspW=(flReF7D$0=g{I*m5T7o8Tde$yuvQ#xzm<3vYG#6r_QS` z*2s+te9#OrJRhO%%9HyCqCx+iT;8JujPtf8C+=;iijmi2yma2YJSP@mkkbh_+l|Kf zGoxqwlL5q;(vbNATXWs77eK(HvV_r?-rl5J_UqBcp&X=3>r34;0HtreT2h4 zh@!{j(o6#mwNR?jULX)1W&zZ#20=>ZIPx;I7wy|qi#zNE9hGLMfq}p#l^2SL#%;;m z$^<2cRm)(};!^S*ohy0;#S&?X_IIu=A|5cxF>Y2d=-qjC5kl$mh{u+OS z1vvgUy2m%JKq|l7{Gm7%^=33a!8)K19|9*Qio`vwh+PWqM6as))5;JON}(9C8a2~i zxbh~qcKrpfoT}c@mp;+OA>Voif7J{yIVWWm^y8uTQ=Jd)D$ZW;v^~=3mpn0LM;P7F ztd&)P^eZ@pEa|cU6Rt^TC;C7t(hNCJ+l^N6${tO0=vRBVj`%;Y2#%@p$e#|NmD?F` zNRMwr{CX(J3+bt>{lSxIb8wSJDLAL;eU<)krRzT7vUFz#Y)8z4W*j*uj-8jHc3}0x zX18LhMKn;=qODZ9#*L&Qs^gnDJ>$8*(7DRFeeydS+@M@O*CQiWox3f}|&al|N;Mel{7hcv0 zz7}hg#vutIZk4yh8B~WpJCh2ao4ttS(01oz#tJVm1@*B>#!5K2mbnzU+`tj+Lf$u3 zns&7fyDQC@)ni-Qne%{ap!8wMx?Zr=-uG0X>(KxvqH|MZ{3lt4pD$A~U8v)y@=elN zf30Ysp!R^zAn#8rmA=6oRLHC;>wkFQUGpmwhH+EriD-N)>NLZW;SI-V5%7Nj8Q7=o z>9c%#foI={%6?6JH@g3@BdO^ta?@(^9QZar04*oz3CbHUQJLEA+?|zQRQ(Xq_XRr+ z)I1Ef6<8EkYm9`b_Dnx$ay2jTi*a;SUuWyT4_`^eok4r0PJLzBSt{e%(D6{rlY|rq zawK{d9PjEfRtq)fFl$nOmGjqEc=>+-K|sF0^X=9wWkS|yfZrkR6x*TX_)ykZY*2CA z5f-ZjFEeWio~Jln_9xjGAt!ZVqJwv-0AhjZ%7(E{%g0pad`cBMLm685 zFKs`5)1ZV96;bQrE=?W-5)kzt2ge^AexdO{C=A5?k%?XZmS<(fUYgBFfa?^TWl|Q+mO?% zjBitFg!r5u=l2?UnDX)Abj0LpnF&lV@cbd+j8(_Q=~dk>cu94(SNR;pom%zMxh?Wf zXoyYn?~)URmwn&H>tc$8G;7lD+G2@9m$3qMfgBbYNgx8W}7^&TlL1Jnl9@MFJ13|qf|-dd-(fN z9da1P?e3a|Ti|!K*DrvJezrn~NECNbbp(h+mG3(^BJ(uyb5HK$VWM_CR$Bg&{Jm4$+$*nZRpdJAANbb7Y?U|KjBhub zmR=m(!jBPtu?e#=8KXioR(tDWsv#Ra483aCFnmR;yhojk_9TE)C= z%RrudbsHRz+%H74hNKo;YVapi_kQ6;b1grj@_`##DKeIY@qDu+v?Vj4qx zZR=;z+RZI}GfVDTPZ2KQR~g|dVQd}|8U?|&^sS`G*p}@;cE@h5=l^2!jm^wv-%KJj z%509G_hoeB)62`N5X8OL{?6=A|6|c+^wr2%_7y^N{|`XR!i3G3sQ{w3SIk+;P!P^; zBHB((MB62b*z`Hd*n}cS5r%v?{sFT;K#4=D9vmtNv50Amfub>yQBDBHZFpl>%0KQ> z0P#Lh`ZK6X@DAmd(%H=_v9k8RXnQeW7N zsr3NNylAXSmZ}*ZqI_YIT!VA3p)4r=%PdQu0$A06E|*VWkZsDo4yfI*a@Cg^COxFI zhwvzSR2@pgdjW~f8;;Cdm zc3}4TY%nhiP?tc>Q)%`RFkq*~J2XE(o>X#&R@MOF>Sdl`c=(`d&xGyD&S(8Gl!Tvb zq*|m&2DoM!bm9J*U%WsyC9hEJeou6wT9%!m(@6sYQ6@4kyp-rsA$*VwqaSp+eXFXm_%52gCMc&?c3Qx}@b0 zKqOjS^`Sz`f&7FDkxYu6*Yn?7;+67g@cP$KA<{q&%kO1->QoQp=Qy#RU#ELHj*2J- zl3b@lappTY6w9F%N#Teq33c6_7?bs~ZKq()hEwBSdEt_VPH48Hw%9rXm ziU77tlbcEN98Al=h+vCfR%FQGjJ?S0eswSp_S7qHIb^dbc1eNsJ#|QlpS?`U?44l( zg1J`ZY8Q$Vv~yP(Q`id#`It%-+BFzls%wH*c|E|ptWN)%jdPB)yl4DJKm$X>E*4n_tY_)bwDn{d_V_`_FafN!5MU><$jo$j1A@v*3vEDOpPF!Jx8}%F480 znIa}~KjSdqcTqa19zSP##m(?9zC=?;NfXr4Q2nSE`rNCSLphG2;=+e4CJ(UffjIfR zmET?Qv;A$wdBCy<)PXo1E4+j`^5;4nvWYM&@jcD-d`{_-?{=b(s3nGGp$ed~sJ>oY z#GRM9pTgA3-(XCWyEQMk-y$DSkuB8FVta6(NDla;Dj6ftnHRpyN{VzwTFx-7o=^hy zAGV*Ih#9t5SqLueZpt$OQ=Gq->jU0*%Ecu49hEYsjF)V&#dMI&`wsCC&B{pE#rHb> zY_Y-QqbeZmpLTb$LN5cun81WgnC<6vk-_)#qu}{On_Vxyv?}k=px{Ym;de4trO*sN zpOgJR4O|JZ%gDS3*HBFtt;F!MgEc?=H7(^igp#+^CZLx~w>A0;lvr{;_kwhm1fmJUtT3UG;t=FcvIgFelt#mv~g*;@6Uvg*--#S61g%;aOs8!Fd9#|7EGH4aZMa8*RucyDa2&sQh;qKQB z>hyGCH$|2w8wy{^DKl3*1O;-NCRc!VKGLo7cRFDDWB}1!7U`I!y3ku?gEPLco9s@} zXcSekPlfWd{XyeZ!+wdOF_^7WW_GDw=6j4mw4hfnWb!~*m|>cULojOMs$uc~bmGwf z+@dkn5vneeDwD^8g^Gp-W3o(d<{7+^&dYF7TydA>hZn(5jdr6IXztyXIK-lrjB=J| z^%i-TSnA2Yv@tj!wp-KqUtPQ(<3l581!b_)Xb;NSFQz++e9gd91=AeyKd#(jLu0<- zK*p;{mzUgO$t|N)=LwU-;rnz7!8@ZB5vxGM+hdjZ^wHFFfM{(3f-218ymgFi)~&3>3XDx!3{Gv zPzbFE8}qGZ>T+tCiCkJ@*DTscV=aZk%B7OPwJ+8-la6cNSyggc-|Qt*%Uow!!GF`N zCF6hO#fPj~I^!Z|?IB>ev%QYK)yvD6+pbF|9sb3d^%Q0EI>xMCI-~sCf2J;@uWqZ6 zd?j`H_iE7izfzY)NR{?f**_HEzs806R)!i&xK>u$7ic#IbNr_lFH_3ZeB%;AtsNpZ zZ8Ch`Ww0YlsdUBsR-VU{?NknB=EYEC(A|9z^hyjYyogQ6b2#iC<^#p+mMZ);1_j%b z(uJ2cV`<88vON`!g<5tu|sp0tz$z{ z42Nk{KMwJT8h2~WT03ZJz=ip(A_~~lyJ;5%U>tNrpK9Q+@I_W81&gam1Hg?s0hf!e z)%%l#7F3=O4ZY5w)NP!@N8eqS%XOtM&z>u@e9K|s&&BgTOPKk-;%uwdBeZ4P;`MPV zMT=}YD1QhR{WeNpb!rwI{yaNCE32NMm-~tc^zsqdBm>u$^C}`EQ1&J%-*~xRx`|L~ zRy})_^=CPB~*6IhT1^1e4}3R3xfhPKesJ#X%P|zL8F6V@ZbwsGqy+2L!|8|d+Fw2m|VbU0qY&K z-NgrreF1bNq8J-f>>@_%=H`!ivge4g?dN!iWg~I-Z8zF6f zvdBSz5x-j#7VbXEBBHVm)i6(D?j^9C+1pTH_reOsH`^X0(+_DOR`!--U4sq)hmkG0 zMq#`Rfz;z)+G3QTMWT+Uk%`_5CQlMe`|+iQF;v8lssg&%2JQUKhEMqmTO48e$$TnF zpAM4-=?JW-7Q}7get3c zbIEH@m;jPqFQwi2t6|Sm>&X?sTA=s!y6U{jz>DsakR_r8N~1riTD{4Wv{J=SDfSj0 zR)&;tNbnYVp+}<_w1bm=>WCex@=Qvbog|^x)4g>se4?3*r=@-H}hombG$_JWQ8gY^z%Cc~skI$m1p6d6Zxm+2tbl zM&mY3+}J7avG8868BCv}xVLMx=qA?*Z=W*DrYgkdAh=xQ&O}>u?G%+-w-c2wTIF3- z*d}dt9_`&w4^CBi)%L@WVT3y?l%cQM;)j}C-OjkJ^Nl#-%ht-_8eSh0;xnD;A1m$x zDc)rA9T?u(4&MYFG)nW5ueZt3821G#CK+hi%SU507GZ9uuQM^3xuZ4F(JA{oAe57t zLz&I!8Bo~A5T;E~N6+Jo+iX8wM>sc1<5S@WzhH{1s`4o4PSs)BIJF*rV0igQc#v^I zkxvVzV+2RiqC9l|j+l+l$?`={w!}!dw;xmF_&P(Oqef`mct?3Xko1yvXAyjr1wy=E zm46}k?i0hVhj_le7CEa5fj>j#qqTbIcY4tRRo(>TJXXVdx;vTT7SE4YKoexk6{dQ< z_55f}JfO+BHsgOa)n(yeci_VIr)$Rfw zAA```XHyoVI8qbm5BH1eFh#yN+#V>xj||?c$zv-Rv6mUu=pC*JuT?%joKc)qy#VvH zI?N-B@c)#hB=0NYrv^p*oN~`67oK&t3zz3hm4emU`$|vtS3``f$aYXwVwO8!oMn0m z*ervv70)xvUC+IE1oZA(c(&ha+JmZgxzXD=+Z11~$`@&X9MuW;TfuUye1EH8NZy1<%)~nVcm~np+tp3^BXQ6^ua%8nk}} zA5e!vsw@JMnY&~On?^frTtvHN%qnYZ2Qpl)zsNsOhO&YdH^j{Xwv0Bmpv6n%6_3ly z%{K@|X*C@nq?UqHmX&!_NNzLT3TL!jLV7W1j_mI2X0`kF>OdA=Qzk8kBK(n-K?8bd zp+DU4f{GHui`9Y7t$N|L3Ta8&tVMC$m4F{KSd2waDFLaxD00JQs&~~LA&lJ7n=$k!vl0@IvY$I*D8E7 z9#tihxSZj?d($IqYDza#?=CW&Xo;FE|_B_$-Jxu~hvE<__1# z292E{J2{0Y@6kEhzGgOYol>3q8sZAp6RkJB^iXuNdwqZgc#6IBhWUEEq1n-4Q3)vdf`0pBmYN81EgtXTtbuu%jOKoC{$UT~`6^Xd9NX&-(q$Q2Lo;Vaa-oCOdhW}_3Tr;5iIYGFeNi6HQ(Ftt75+E2Up@= zdmGNBY_O4iZiow7{rClt+F2_4#l6ZD|6|G)$)MHE35_gGEU}j%KQ$EDj;_q8=(SDv z(N~yyFIO-b4Ghoi4yad^+t_yXu68fc40#3ZJ8^}{U&nYHsdZz9$SDAYVUA6|#c!ot zNCi4$Y}>`pgB(9?(G|W^C#iXhcc+7{`~p*hqlKkG55AWJIT8#JEbnWa!TRf}EYg@=eRB=S`UFl_gww!~O@GV9y z=e2p+RLFgW$*j#r!}*r@+A!&ICT}+AfbF!(al_?{7IV&0?ZvEY@i6t|TNwA}yHu66 zpp6^i7oBoD4RUm`WtI(Ia!l0^J}?zX2VMAh%?tLI^6(BOmn*=qd@IOAn!DYfB+XLPCSxhMfa%Q*HJWP1`n#JVBUG{_^jPJvgpQFet zTA4G2Icvd=zvvXNSEz!o7Ql={gKewfpMG!=9I{_}sqm3MV5r}FYbkg z;U_CyCeNTXDh-w1k)nsE*fwU&o(8|l^yB??=6q_1XH<7jL-;j$0p8{}90!Cr;IfK5 zz0TwT@HYXaqD{(MJVw*pst4ze{#rt=z$U`tA;;E(=oMrjZR7}l#d#O}|T9_ff4+Ya`BaA<) zDkhDTD%k&Ehm^u0=;uqBEV>vE+X#}|bzyTq{jR+;?1#Bfsi%xGKAZ)5T@(-}y!^SZp?eoY>M16H?LLqWR00?A2eJ^pmvp*pI6 z!T%en$vq&C|3Z(X|I)=CTsAP0Q%tA&MBPhimY$9iCbx>yRd<0U4yi$nDm?Fj;j80{ zUXEcsrd^dQVJt@nL7dHZnjtx0R!m{Hv%U89V6U^mWvk17_2FOh%3U~!%MO{1Bb_bu zH-fcQE2dt76K1cN3Xr2;fV@Lit(XMlJ9sr3z33)FKB{9|-uxG?IW``%`oFC448FPt zTJqJ^1lj)ya6z22on|XzW8ySQapWL5y9c(sKg2(V67bx(s4+*e29pLf`_qcKNg2w+ zIZ9uOKhdg#P0BzNFIW0wcsN5uP-O%S8e||!&>Y}czt}{}6fnKWg5g$UL)@#nOp%u{ zXCNx6D}p5m=8(#>2NKGh(S{sxK~-*;j&Lgh@BLA{5qGC#Yl^`zsm;UOq)j(n4ER72 z1S(|C5C1}CEu%GDDx3npc!4^uNUy~)Q`q>3O?y!D@&`2-X-sCiOw_J>FtvU#g zE~)UxY(JYr3Gqn9L1@X}01Q5E(;hAd>jJ$_M?DI{IDtS3K-ZIhY4DdU8L2AtEPt~k zwG0jDe)2&-QU=4usAhim-+O2*-P#r&sLQ=+;o&o?xntM}Co$p2SCw04s1yo5|)M75wgmLi=NtPh|rk zu&X1{>J2WEp*@FYLowx`}s_PQS5fWsrv?L!Dd|7B1Tt1GYM5HZd0}E*-4r>96Ucq zVRD6H_h#3a{GW!LB^aO1>_i<5vU?q|6f*K1LxCls!N1qxw@W|z8(fq8VGV4O87*G; zCUn7Z8o=#pV$Q)Zf=MhK147E2xsDU5Ws^9hLmDj*$*>d>+wjtHuAfk>;B=d|IAG$@ zc8RCqi^-I^_`QuLe+txcI`Omz>S90X2X|shO@U$vs9{Ts{COv<0T5vRnoTg>YgL+c zFlG6fD*uToJ>bql%WnR@R`}_wAm7goyF6ua#fy*CN0LU;s+!1%^eiV^pfy<|`o@81@!lKJZ z+Uf73F^|u&F+T-ii3sl%Uu}pkIThV zSD4*!2dX1`Yrz;B8m2tt3RC87s*{#Wb|7}6d;JJXgkRTEZfP-0I9y&r`KBG2jrm`d zZ=o-v+sgGFmz!c6TRz_gvvfMq=I31OHiE*_a$k=+?sOHJwMAZQFnKu)27dF|)dAK+ZC(eclZ897nLzcbPDqil9FI37!o{n8T9NerLa)%x! zFQ_>9*$u5E_u&fvv2Zq1z4vDv7b(T{*m9NGr=8-~4nF|X%Qu+`Bn$I!KHnNtBhV_5 zEGFzCBm+fV{xb5@jfXsc->+W6n}HD_y#j7})eoA+h(4p{6|ucL4FiW!t8 zowH(>gFC%RPeUm5(H|xIy)xNqdZTwXvYfb;#0?#Isg-Jp|wU&55F)6GD>)Mwm z|BkEmq|24D1;~mbn0*;I3dX4i?A0GZ3yw?3(;N2YI zvi;>TRPBd@YGFW;-RdWBv`ABf<=&tS9nsa0ELnbJDC)xDJfE3Z`iwpIh%5}RV!}Q<(u%w=9Zgo zT+C?mMoNb^cMYVCe!7FFRXG}F|5Td$ID#UTr&9c=loXftzJc&a9Zxie_@f@(qI<<< zrk{OJR~(H~P#!JG>1uPc+R)c%hm)16_?Iqc0lkWqw58B*biBSiz(G&UHsHn$gwgkP zFBqZuAvCDGlv%y#Ccds*2t}U;Fr0O)Y>+*UI$qX;(R!U~hBng1B2l9vR!BeVwbAqARLCaU z&ak|op_Y+S*$#S%_mEz;N(BtUKQJk*78V=1hk5Mhbo=j7t{9#xMKOc>Xvd4GWVs7i zvfxc74cb5JOw0MRHc;e8yW$v4KG#aIKij1pqpz2g38PXw$nHR1JY$KE4EZq!!`@EY z&K8mCyD$iA5YVdT&XjZ-0M|vrs<`|mX`w}tK3%i3nS-&`^ zqR1_@Edd&`{oxI6RH~e!tzA`EblDDOh^L!WEYPM`K9}Tpz@FRHsV0Yd%%8Iq`LJ!n z2|c$W+To|20S5xw0*=ZQlMhj4nJO32c7?uHNh=c5sg{0hh}W3=kS%sI=7RLWjr+{sg zzEBa1O+TI?6dtNXPY-CE!NJ;(qV9|~ebYJLN6LTnGX8)KCL|&|cbIs-LM4>tZYSBj zT6eZm#ZxPC+b-br>!x_9O-^E}bG+-URO`uonqUUrOm&tSd`C8rL~ASJ-4=P2&G-l# zcO4(I8L&HhcK|wWLx-#Zz9z645hX*&i_J=DfGqPBhID8XkmU-$oVi~}akM54t~z@d zBy;b2-Ni$0_55yH!bdQoa>;vJNeiP2-1+5k=>BBbH4L|v?6FOnNNkViz zT%mqfUenp#{1H8BQ!E`$ECtKrMloOfzrnIYXty)k32k|q(p=SC(b?oQqOXg*jxlqV zQrZ$oHtAHVSZgxeobq3UYd8T%PNm4(YD#`a>sL`A4P@3lWX_5*DmUjpgJs!Q)16WJ z)lF*S|MJ0rCYZ%RgMwwW2UHoE>Z7v%otp~`dIDMjnm08!qtu(-6K+yG{C_BkuPqa! zRQV7A)H2Y^5N$p&E)6JzO@D%aoK6FJP}B_~xd~R|T98H#UpsjVU;&%ml1tT!%axyS zE#NQ{^?A`^HZD%cqJYe3alqLsg}TX2p@ba>tDrpOBG^hQA74aeaJz&J+CaA!UtI1^ z#+*?U73oQr;uTV|HLGZciur1@-lbeyN^2(iqWt1=!`vLkmXt7Q>av8tpv4SJi7J%g z(GrE9q7FojNsKYw};TC(>)Q59hD-{xlp? z{j5Tf=n64LPfS5JS0BovDd?mlKqGPT9lflqPyR_;-onqb>M^dBO)=qx5=U?t1zZ+0 zU$Y0|d@3I0*Sb8@V(2Bu{f%A=-hdr?gR1$@q(wQu!S(J3Hm);?2b~suAPJzvpc;nK zMxa9B(HtMC(Yi?4f}4ivIO3~8%gaY<%{7PUHMA@>yzCOt7i|nS*G9u>s+bGc=T)*- zuTwCIn##&93P}0kWZHP{KHQ7qh#yhS*hu`ApVylHW?+`LHScCJv<7!{XRV4CWr$#DNyUUVRvwbB^NQJS=Tq2 zdGQ&l{0MxL2jz0h$xO8Ben_<^VQ2wYtCyMlw-~r*Xr*nN&gUrg#3r2~+WSid@mQPV6}U_~iuM~JvzuOSdJzn1 z2xn1h25yb9raF3gJyh5Onp02P9S`#kzAW8(yJnv2uPxo-+os&sF6TIJHhGZ&>$xZR z@BrNz$ilTMya9QoU^Q?U2Plj#H~YOpV1|t*EI6kkZm-HUDpa?@ImWx#-SEIRe$hco zY?ha)(c?6+MwMxWQP{ZI-~}Lu_&CD6GL(9pMj1gVj{{*A4)@ca!^E7amYm^mFFzBY zum%eKP{$rj8dsX#0c^lq@Qz=s*g(mou@4tNY4O8{z+jKk_63a^`U&(wcH$?(2~{+0 zOU{q%Z$qb!v8{1Q<0q!;rxa(zydV_F@nFfv6qkoU)1%~%+M?#jmn_CVHy~qGSdU@4 zJjqtLX`mZ%%CWS1sKrm$;^|JN4?CbCyTFnY@Va@k)7G2fSWSK#1Pf(ICZ;$>6MGx- zOqI#M(Z*o!nBqlC{<1@L1g8)}ajZrrC+>C>-mc?}p@b5ZU&3{`!Jw2Kb>hFIkWzL+ z{)6{S!q=pcykTTBvPrGFB1h2n7QyMN7@^5$X|MSE%I&FeKxE0~2BQU^&S~l(Z5QMPuMP8y zO_;#1wpvi>hFH=mpX@?UzeN_48ou2j52Np9wd(d6YP0G0RmpG)#-}{xXCBlIbDnes ztoX}dUuda#0fj%mNg=0$ov}gVa&T3b1M*H(ns@0d6%Re`$E_WT{E|avliPGL$CJTNU>soD+&5bBTjG`ccw3SkoBU{bVTJLty6A+= zHoV|O+t1HZ$&W|xpWEXlUm7y)Vtj3z{U#esD5!b@4x!lfIbCFvX0>?v*G%~!)hDc{ zCAz?q-F;G}pzcnL5E>|Z;HQ1GrnJwttER`t*z$Y}F2S7+)F6(zLl<}U`cYAV7Wf|% zx3)YylH3HB;C9`npI)?=As?V`H+9iPIj71!bcZ~;-SU&g!>WF;e+QXbyG|nTks^(4 z7*!u?eB}DK~NqHEPpq-THV@U?0^K$FWQB-&36 z1ELqJd~~~|3dEwQPakD9DIb#nQz}zfFOI4K->8&1j4!1fH00#w(jkoWK$uKsW0T|~rMI!P zbe~~U%KYZIgwkP9IQBr8O<`kWfSlY+akAu6W)9HPdc`JMaEfE&^2-9S;@!!pazF2J z3I1*l#PEN~6Qo$OoUejmKyV|1#srZbCd6=j8{GpePhco9q$Y=*1_pexg;C!ZN*%z_HE{P->% z_P24oCLpNPkesDj10n5$5*KUBO_lxdSWsOOzRtSS1#0Q<&88J;w0`n+m>o)5WVh39 z{q%`53MP{A9~Z@^)93!)HZdN4ahBEmf>jSjo%Z7Ji+f2wP&~Rg0v_Hplj^Q8Pfk;v zr`58J0-70Bp5Z@8uNi~TZWBz3Qly6ki)2w_gZA!l0T6K(_DDRt_@ks1Zm zS_p+9JFdYgC+No*S3avOxk@NBhH;h@ML*WI19ga+nw1K1cgSl$T~(YTXxYSI5ZqqnlTCShwLX{z z@F5akqk19OIDV}HH>0wgDq$4v6dTNc!ui3cEC`BEXyXbv+&alb}Lw;0zcB_u&QPmRHfJzWNG0r@rFs5 zP5YhL?gO@fIyk7xosgV}XefkfwN+q>AyeE+w+Z zSHwB!_r(;mUR=uO^Qw4H`0*I}zSLNoT#M{-CcZG_E&|W1;wssN6)$xRQjh&XQ&~TS7ZvvGiFI#y0fjsDT#93+?2dSmo zQrTip_-*cG-A);lb+#pnabL2!vw1dUPbtAc;Snzp+|Pc=X?}AGm;8gv?Oh1fv2ZW* z6q9f=CdL<-m)lc~i91RYe;)2fn1y_Nxt3=X z0Bvo$JX&LXn>ti~y_nwZVIKHtNP)U8vo{^rC1!W}!NLk{)@8ebdqHN&zd|}S!>Km9 zDi7BwZ0@X5;pV#|;2rJ=pX72=;iKyUEz?Z#{SNs-3*&P-DUC~+h+%$CMGxWM_|`}& ze5Ct^Gdaz)>k(+~S5n@mk>PXd(f4&+S5_p0OyjRRGI99``4L27v`sZu(J^-ATg_1B zD%I)5+1!-IGV|Kg*}wUhgbO=LmF_= zDjgVis4Fqbmh*wk*>82Ly2AHjOsF$d#&_)`S2iA@b!Q-RIK}dYZRm^N^yH_iqSvPHb;u?3WrZG1967371IEuA#(OlmX&Z%tvp08OEGIFm zx@jBnfiLILcV%~lQ<+@DOZRCBXU>j-c3@9%iK`9y2ioUuR#^*u4HvHC13#Iw&@3yV zcjc8OzMKisX_RGSX_C%&x31*8f?{7wr=Im?OlvTSzh;+}UWd$~lKr@E&hlL@ayGAD zy*1a^q%r)BPMAZ1w2m>)ET6IPi!Fz&UOM%n+7`mFi%NXMRU)b^1C1S09vQte=ac{7 zujs3v28e?H&0ixD`Z9p^U+60hV_L^q?ubfun0{L06EDlp>1r-I9Lx%PJcm6S4^W-h3RYsii1kh zJdnsxk`OlGcxxb{inL}-jG|W1JN7P7z@{yRrTdt|Zt!fEQeE~^iqH};HN!0f^jXca zW<8orcgP^!>)1d2F!GhWA%qz;GvmN0&JW#3(%os1ETqrz5=qqQV z(wodzIV|&J82qe*>a;;To8SR%`gerk=<-bb?hgte&b12b%ZtmIvq`A~dB*1GNcg_c z@oUPOrRZY?4xii~G=Nj>-tLH9O zu1o8o6f!|pJ#3@M9X!P0w6`WySx-vK!*0|404kWTQOVUzhdHz!tJ!LynVyJL_7A(*&I9^h2qcQn% zg`RLn_}#k*{9wGiXFV9E(pT~1Q|^K2ZU0v?nRsgIT3cyfhb08Atqj`yZ6biu+8wfpfC6xs`$ zo20EbVU+y%HmY^fX^(tL=4(cCe}UI4RJ;6%L2D-;z{sp}@d7bsLUg|6r+=ZC!ARO- zCVsQEvc;exQ9h-WNGcibl-VT8A5%Kfs>8FR8*Sl4J}@NCD{ESed{8jKAYbr(F;VEF!zu7Ru_A8Q%cT*~7K7r!Kj-`kNQ1+{QKLw;RUfs*r6i$HKclrKLh^k$ zc_#Li!yx9A5I({7i*0bx1uEsFZuY!*|I!D*IS*=-$J*Hx2SBQKhLG)0aR#ymyo~hj z4)8Fe5_HV}8#~1X6&QC#-dlC5`+D6`pDoL>-9KxjhUocj?DPxz>PXr}r4-R>Riv2P zbd9|^Jz32WpBVCMz{}rAV9jyuUi_d&kw;po;<%q0RI|4k(sYo{7ekNYaUZqi-kqv* z2W&J|8(wDeepikFX-ybu_hu7DiWe*L zGmKt*l1notda*)$zZ+b_a0Pw7_)Z5(R_@lxj<;%rD$*{{gN~_wQIIQYt zaDwH}TAX?Y^Y)IAy`vew!y&XhTI&vm)nr~FHpTg)HGU<0mNqEUo?c7=TwgRsLD$m0 zD;ruUCz@@8b8h5vJ9}$VCf}rVr+XQ4ECBKlhXVA`FQs_flovoTu2re#q=2hNx^2-& z1|@=y-=!$nRAt-l^dY;dd!SOk@EUFP*bI~L*BC8~ z>@T-d3*c*yqt7Fmg8}V6J4+cypCMOMe8;)(C;Wxa7hOdF+(2CI-)snH{Ic?fdR(Qpj zS?T4hYSVItVk#4{Dm$R6sq}_^m*qL-V#n+pDgvgGf=$MVpfT4pFE{0NG=;_xdz4G_ z&QWETPVw?`VauD0zXi4VmdTt*b>3uySw39%iW#P#ZAZZ>+pvbH$vH%)g^luf&6&PStVBefE#GOmi7(9e|8sdF~BBt#df$< z9t7x#fIMJY*qEG6-}3~g^k>OiFg3JhG>hSJ=Yi*V@$s7cyTNFI^LCm$(ekL~kj5va z%q~?neuZYlz4&a4ATd5v~|a4QoR#%u2t+y z*+eu!yMk;Vq78J1=_UJ;zYVY}1OA^IWeWHof@f9C>%27x1g4%42W;*yQp+2kZD|K!B`WC`{%#7ClB8|z2!nt1V)O8h!tlsYkJ zZRLA`!}ZqY@(xWfJH7FjO9B?GXN>81bRWu~37R_}2r7*xZfQ)Ls5iB7cT zRc0xd<((;rNZ!m_lo9It$Fz&7%)M3OpMFA#WuOj? z$({y(lgWM?LtT0i^%&4R-IOueXDEVR{N|VlX_@pm*Mec_jejPnDJBsA6nBPiz|T@; zi&V=+gHg6GJ+-9JVoYV37~cN4nCF&XI9p+$JL}PfmC7NGq&!;uElvTH4zR}}CMg5| zHq}Yr%ssreT-(NYca=~toz>UiieERKWrj2XT9_h~b3ffToOqXg}RV3vT)DFdy!iA|D%W z^%nTO^DsX>x?L%=&o}~e3=WW_!r7O*><6?7MV#NN3*Z{LYqY@7-4AcZ;0Br?i;1nc z%7zv%c*}%+XZ)&er%k68vC}a5991!BhDpD>^F_@Sb*Hz{^5^7DlR|_cQS5VF=C}e4 zZt1XQYb|R+kt8!)XuY>gx|qbv*OPB_^ZPYFB>#~1>zKl*o&t|6ztN&fi_^X+RA8}( zA<11V%K3~#x^s9wBQd!Q4sor%*CM{~%1VwMTG2(R!Mj0taya_%$D-LFMrnRViyMWc zaQCCG2me<7wp`w=Tf3w&n`(N*E2C`1dB(Dr01=A3wL5v8Rqq?lKTw=opsb2TCa-kJ z<(%=Xl_tUcws=~V54%h*gFl!Tw>nt9a^$1B%<9>-s<*asu|rT<1ds@(rCG?qtS-GDaw>i%Zw|CyD{X^fpya3w(3 zt|!UFnm7}4;vL(b*x0dc+qP}n#>BR58#~sV`7ch@`On2y-%VfiRadRGx@vX5>wSz1 zm{QpCig8LU#`KS5vAO9AV;AODGE%|vB8-*66-NY?!Qp%`nueJGg2qVXL$}OG<=FAN zkZz3D%*Lf5Fg0Zw;D40l8lrY1U492T;HRDpy5vQE#TA&_|G`rOU=b}fMmyQix7E-i zj6cdXMXLG|(9>ClTm0-Q6evb0Qmax3Wx{^8XZ+G%-YZ!gpZVuu3f@S`!X>|ps?%aP z<{+&j#GM(v_(lpBBFZAlo_X}DC6_DFX?&hEhSvXJM+-+re}R%B7WK^Oq5Q+%5@!m;#` zI+a=0Mxa?ay>q0Se^9V-R{Dy>48!pJd2~8NT_Z7Erg|xz*w|4mM$Fx?-N&uhM6#}W zAsd_>v?@yFLP zexJLcvU4zN?*}o;k6AkP$|Pq3S19}3{rG;|+2sJfhjDwxG8N8WJ^ zR9^k(G>*CDA0SYSqIb5m*lBh~yyvCTQfHjWszu@XOREDf&e##_QEU!Z1OTZF z%`8~Xu0wNK`+kK+laN)p8|L#VOvYSEk&pvMR?+WgFL$|NW$yuFOBYIrTlBhDs+DNu zYIm?|9HnxSS?Bx3){8U_R5h ziC#gal{zlMrFct7kH)svC=?xTj}7B<=4Fp{rzkrsM>EFJUmxOax(~bt>x76-%E&9} z`(ZAexG~oEIMwzz%I>16$cUyNDM)Ie|&8+OP)?8~ysfTmv8+>mFJ z$3qhJSDD0hVJB8HjUjG|+J|InWC){AZnS>w_W3R1>{%yH1kX6z9Qlk>_!k@h^Vo*2 zCh?ehwMQRm&Ejs)6djgzbxHH=y#tKFy?2FU?KPEoOIg+!#%8&=Z_9+tW*Hodb+Kj5 zbpc6aG*mK^yzZc-@|~3^!3kTc(%=LFin`D7MqF|`n==>dR>eg3u4LC%^-V;G`Xd9% zA=|^0Y;p(vYJ9li#;A5e=)=59Ae%Oi6yrJ6gQGyX>9hF25J`od;UTO*IN$^vMxYhLW=*iL-VdJ$;=o z_d}lZ>Sn`U^6?Bn&o`5x8=5<>((HnO3rxM97c#p>W?!jj6fY#t+PTM}v5yAVR%SbH z?h%I6)k*Izf=ryLq7*jpt84sI^lgny59`WIfen!Bq+m@qG&T)r?M-t!U&hYBdQLUM ztS+5#2)If5`@QMqFX(ErHfZ8YVd`%!xDO8-=gLtiv!@`7w>_KG8k~6$aAG{7o*2$M zIoGma>wHc?SH8s7O^GLRxwy!xw0L|KkCNDYa!dVPX^L?0lwpIP1S}{JuAwFc;ul)gBA3l66E#btJ3R z+Y^?9e1A+q`j0ph3(()%D`EoM#v;R|YT%1Y&9p~H|4u0fIH2JjE4YWCJSb=|$#qH3 zU$QT)^!fU{*X(b(u>7Q2k`8&$o{Gc7T(Agu*JqJcBfsMNLSNeGf+|TfkCMVo#16)M zwY17J%Zqn;4Wr@;5^v}YSGE&_WNRWpA~~IH(NCPp_rr7+BP~-j{azQNN}jZ{W0pU( zVLaAvQ8)b4!hB-)Z85RDiu;eQr2)p3SuhlRy9zyGt(M+e_ZWDFo{yGlwn&mK@Y~Y8 z>Fiv(MdKuo1ZLURg9^{M>?}@@vlUUknIwwFLp%zFa<+37L2L^Zv0x>_k+Q?fUFLrD ztMS8JIkc720f2G#o~?V5yZ?_O=Xg%YDq=|Q(|n+lo;EZ7vXa*H_;>9Gwe^hsA0zEq z3MyKzY~3HIp;^=8q6EfOzv$RyH&jWPBtkg1&M>Mhe%=#HzNfra<)>B5(HD}Oc)fz5o1jw#gLkWSBDQRfs;)!LkceF~Ac z1?+wnH7rt9t>@+u*E?$ud&%sN@HmpUrPaI#-fpj$9KWmtUVQRskh&DqMZ>;bP163A zbbjDYFwMu_->s0Q6@w?xc<{I*59RaAlypBWnyCLJ(D$|U{ls>=qo8J|M6_>9rB;1% z+)LJuli6PRh2zDsgKDO6{JS4>whRn7hn9hUunVW*bdDHvIrX*uVdbs85=H?ILg7Q~ zWusJr(>P+~vu+67A~4xvem7W;Zd(*2SB2L*9OB%9;3?A^VdIb5{%?(uvQ)BYyqh zvh_B55tWZ}$?!2I-xahOi#w*#Nd|HIr~Rg&N-zu0(Wgcl>2S9Vg zSR=Qp+Bx5#z@3#ut$u*PI;zixXrSg^*_ftMUBCRM^qUlVR$Odvle3K9S?R+Cox!6e zR9w7XR5vXXf7Au{n1ieD5+F6&zig5}T{`@fv3-#WKPZx$z#E=ZD}E|qMfH*0 zpuSue>fbnW5@)}40Apw(#sDw|3gJ$CBWfZkVt)(U54bq2G2hEqk7TyD4TA>Q8ITjE@XilB@pTv=_wPEXAD-rp zuBjr^(@e&Vb%44mpGuvplM^hrx$D_|M1R0LFm$wKFnr4%QRc{$oftF6c`8g@+~=lCyd2Z0d(73BG`YGCtyFT>p@LSM zgMaM5!rjatnL;@}u~9O1<_{H`J+@=~Iw0VXEDWWY#@J#(u^ielKML$j-BTbzT8SH3 zuHM#Lw4;YS`ju;c?>*jh(U_7NeUZQ}d3?DeoxrDtlC2Vl0g*pz<8ddaF&Hv^+;Og^ z$%tStTr3((kx<&g+_w@w+XM0kl4Ae03FS-%V+mj|Gk$)g)2X%k-09$t&iVKY@xRHe ze>8$bXTiRG%Lo52$gCXyiOl*xL|?#VQetivzG6}aPJ%{~=H>sT$v*u5aNmP_DjC@+ zDOT1n_Bv<_Ll<}?Fls|%J@g-6=6$VeQ3utqAO}*+p$Nr;4WVmkIJdj{hUtBFBIOXh?k&ba=^d+vk~>ae><+63M-=f<=#)QJ`6lri<# zpY^C$t~h2>w$yO4caJb>kSV}WLaeOg@+wO#VeL|??>qp@5l0db;!yR@iAd(&j_y~1 zogQOr!k!e$X}aYKfB=?JAMZ@G(;n$(jU=|y?lP5rIi?SAX6iVm_i;AXIHvb=%7nH{ z>*1BCU9?9T|2(OiVmIwD>mPKw;!2GtF{~sGWYT2PyP*oy!!28s%t*%u7Zqt;@Lz8 zl2JK&^U`!Gy(fdAzD4E)ynT(QA}`vW6a;%*O#7^k-RoC=>^FRfg&?o@Fn!4P|M7?a z4sFjl|H~8Q@Bc-faQ>$}F;481?U(-+GUE?5DB4A4v9A}!C=v~Z@k5j_Dt=dcw*(o%eOOoQpwe$#be6_kmbhWyVjzyWqBg$ z=us_Oxq>P*x_7X3rb%I+)2Vw_;hI=mxcx>;{i*m?gnq;0(}RAU_~vyd%PEQ6-vz6x z2!cO)mbBQZyooRPeRQ+qb2%6mDbJrb>UaA|5b_pScRv;XoqjpD}x;hn--+W0NJ{SBt4+r2F)!0=yt#qX?lm3Gw^)p%@cca z2l4TZei!oN2mfHC8w&j%c~?~G9h^>Ir5nbI9b)Ys>jw;ff9#zhhQ|pqz_~+q<&0}9 z57#NsX7=KNt6R2q0rvr_ThMk^rbB}L?AKO-rcls_OgzbFh!>`Z5E3MFQ zb@}+d9yJ@1ryFp&<3IPS!iP^oG@pkrAn5;H^f3&{><0gWiTB?Wz5ag;X1tu#00Z`L z=h&B8u=Va7J|xg=y*ZP@xWj)pOCDLLXpk&3x#XvW<`XXu!uOs?-QGruo18(pJQwZG z0nFbz_k1(&6Pbq(1Keg>!D(-ci&k95i^zaaST%I=XV9OvzvP$~Jp`Vnk;AT8N^p0Z zz=#ONV_%D$iF=s6Tg8shFs5CoCUVv4V7 z8)(s}J78(6Kl5-sb>>xmp-bz1Th&vRK7*O66;5%X0a>f>qZ0WOMuKvV)U!>%G4cu9 ztPtfJNe$gvDcva<=*ZeM8jw(s+D3(|aR%lyP#3=ieN#;#`bosL5sa_I+|gl-eH>o1 zjKAsFaN@|qaL|BW>PJ{lChOH=PK*MSAX`lWcWt>cp^8IKRp>Ck^O_jDV~agPTFJ%T ziAl(p;w1^yVnF{k3|svK{ZwtZg8+qPD?Xzu$;FpRKg|EXBd12xdhu8I+qXQ2|Dp^H z{!`?xx_z}4p4*>&b#9(d5|N36ZY5xd2Y(2|Fb;Wm{47pl zlB62PyE;F8FI>B?E<{YVVBsj)voIc-@Zj!IyL9or zdCz^n`@DF+Af^=Z@!sawS#jp2?_J@(ScHcume94k&PL?X+>D`Tiqp zh=^f=c|7NuqHX-wf=zy7hyGyUORfBPCL0wy6 z2i2zD{FUD$*YO?$p^!}5@cFnBPSnt*$+kbisrLB!mEE;KV?z$^P4AjVLiPaX@+#tA zzK$K&=p|LH5ROnZ>>$PU7+I6e=tL{*#IkA{x`*_BWAYJ(@g$zva1`|Q&h)cyS|F_j zg)K-^(c;rNPovoOK1-Ery;S=&%RwLMKKG6wuD3luB6yz)x4-7@w}gE^oce&5}M@Sufw zuEG)XP%Z*LLy03m%+X|xwW-zK8OTIqcW=pn@c=S#9lb;gEU@Z`;GQ&7OICIL-62u3 zADWjcHXEJ2Z_XEDXYy{tHtpbE-Y1fnDe&<5D~fJwMfie~h}n(;7rTFsSIY#i-iM$#yDE**7` z3P%YKw5{krLd;D@w*a)e)K%hk&?9s3n(AzF`Gv>mJj)8$lQKCD$@JWdjF3}20n?Kq z3HBT%=EoVF=!@&rT5H-2*Zs5+Zo8CF41?NiAlUUG`dwyD8CG7qjGDTurcckIbxa?u z=2C^5GZFXPUgLV@l*Y$1$#6n6*+do=&p7%@gcu)_B@WDgh$*Gu4IyqUtL}+K6%~qI zj>+13y<_)`Z7giPe7L0_6Zfq*My!7_#`n8_ySlDwD=J+8IEIMbEIifequBGgqLoGy zq4M^Pw|Q@uRZVTgb@;cav`s_fv+0m;AtSJSpz;NOvAx$eJ{5pz8OV6UWN0;^La=n< zd7B|?4Ikrq>VVMat5cNfTv#6ZwvfFKD4ztKp{_!RgWx^#_H=wDya{Q}25jsW(chRBd-WL$f7S|a??!T4YI9=TF4oY*-4A%I0t5 z~W{D*ZFpKTg_ZImyrJ3_l~-_`Ar7oa~H)O*k{*C|zM z0s$Mbjb)8S@5U=Gl7{(^TBevIIw?MIPXusty}$^vM*1$P zf~6f(YoV8D40N}|=uV8eqKKwkyfTWNV1;zPG5AN>lc`GsUDn(` zlqJ>N)|`hJ&JLi!3(S+w&dnvyTMT3*{PPPqOp)6-z~qoS)K^PX*)}AhelS%~Pc{=b zr)j3>6cB{!G!Y1;R1??2r&#WC=HRr_xDra-Ffmt@y076Q9FQO_WNaGy_@p$SN2v`y z$?npGGK_j1;5}k2Hob#9@_`0MQN@ob#_x{74 zrBmJ*js0q_vQX=omq}HRKPOplDb&LePw%$LLfzo=5j+hnm1~2uflIPBC4Z%;$J&dl zu%18=qxgE&??CpAP`Osr9vSIjoGMwjwg43Ccl74q^ zV=>EM2UKg59C1h$+V1EBD;TwO72wuLh2%7GqZf8E#OS}5r#{w zMwiCA4)x0-mAx*2R5oyVjy^^)?rn*^{f2V#XyB44&~l7+GAZO$`od`J^IhgZ7E|S@07Pg8;CY(EZnqgy0r3=W~_W*2D(= zk83THK%S&*=dc7p0X0lmDr)uSeWK}qZBKm}S~Rq}Oo^}sVE_;*YA(6@9`E2xbdwGG z0^@k`)X2DT1baQ)WI&+;KQN%@2q`zzI!pJkI)pO{o^wS)oJSMYICP#eR=_<&jQpqT z{Ll++vW-6(8et;5PZJwCta=Z8nSyDAS8@ zX-XS=4`BPn*MzP{Xh@FM6vFqYu(+ZdN){5b#vicdU->~~Gw(+WO%;vQ;GWOn7Kv~8 zIu1+Cw4vawg>Bd_7B4cdW4#VL<>Hf^z}@)JpLx_S1g}bHlo>v;cX?dS0st zXW7cq_Up%{zYivXNvV1bkbS~5&W4eN5K22f8LwaB(zH+NbTww*vCZ#zVlWfu2<({6 zB$By|FR%pa(;86W{OT;J&CirvP@fqv<(sY316!~uw{(NIBXgulu*Z2|m6dx;s$3bx zGDvX!2}7&=$%)P1>vfNWcSLP!5?WC}xtn8%W0QbCI0#j$!Zy`spJOnk=olwDT4??cQo`{rQyM9&)V2p}u$sm9Kp|VxvM# z5lnLtXor>&O;sqPelDsUEFlqpWN0b$j$0diD!~6voSHV!99iTMKxP+S4EiSM6_&Fk z?CkPA8NCpos0CzaMs2hSZTJ%xu+DVQwuVC$vTH5o>ExqA7m(#we;AZtjV|L(Rv+@tIxpx& zT|qkUWs#@_b4YoXQgaV{Q2b^cJaf3v-9ZT0r(wVBx_j8fQI>J<3LX6G@ z$?bEvDqsCtt!=&0b2P)eI8WM2GCQ#swm5QRiPTKOWPz}XQPwWl4qv`1JL|_NxfWh9 zre3y&?6v}L@o}4h$?-nLlI@c(Rk|rAH-9!#teMOTn4161idNjMT*kFW?T91Pd8QJ3 zOI5N7W?`81#|F@HsIr88YRTC8K|N33tKbuNBQP;FDhd~AGmmzeSar#<1a>YD&rP{T zH3I|bs8u`f?kQi2Lqc3m{Tv|12C2g?%2LWXfOFMi2?M2~GJ^g1Yi)(HIHtb;fGja# zAn73Vv>viEjx!u==!=r-^Pe88UGiXzZca+_2=ByBmTal~HqP4Y@E!-_Hu8%lBymm_y2Dnv`e zoTiz=b?7eRI)K9YT2f$ikjO+@&UnM?9W+7hYb60Tr=UifNNDx;Im<_oyiNWIYR8KY zT)%z_vn-;m1ii^>(yBPsSU$?TdTrj=jemNoQMns?ePAuX`aOk0 z5w)6N`#N`6)+W9g2Vh%o?+RvHbu%x@F;3?l?%HmF2+k?Ffp)$iX@-d(){r1r^enPm z^|x_~c95-$C$9AY0zabe9r%NA85QpcStJk5lwgMhKDqTF$CdoHtO@In4kechVF;Dv zm=uFul*sg${%glnV8dtt63#hiO2YlPR#* z9hy%&cQq37@`wz{b4;{Dgk5$(%mIWjSIVBAbi_~|dg>v`nmTftZ_@os4FI|Ms5Jd8bZ3YouHbuEeuAA8aERCgB+3!oA8Eyxu)(j zLA|I9eJgR>a6gC8K<@q3K4c6WY@eWYElqJkl&bvpfx3prZ5Oa=#lP^x4Z)eQT z==#P8q*wmzT2-u0frF>PrR#%oZY!%Z_+}A!IMEp(rc@$)U}3!s5=*2PU-F2FxL{N` zNf!xsW_JO7^4Os^yaK$~^&bJVDz@Rkd-N+CbtVPv83*(rxmh*OJf@SBv&_I!*kSwv z={fCvG|{=EaRiWF{O32dtzci(xM2oCZ~Fm#9QA2)pNu89XZuEPzmum~A)G_-%10tm zf7VhlwEGConsX+VYiuNiNXFMj5o%upZK!+i%oI zI+)nedBQ3a1Swcq`-MWtU5Q?b3LeQ4Ejo)qvW=PYNDVI3$LX$m3!IZVoYJ>68~~Qz z*NkZ_XtU&Xm1(699lb_(w9T|(qX+c@t3M6m71}7yS&~NF|U*7AS47&-5Zw z)Hs*(&qdi(9)tD6viprUK(`%ib;FZkQjT_Q6a^gKL#Uu?z3w+xFo%7t!|4<+AAeTa zSb}PI3Zn+NF8)?5?kq5h3i86OurcDARQhNWScLwh)6W6Y1hDd&o$twGCLbF8OnSSD zdt7MxEH3b-w~u!Zx;S5gWTT>I-mrw9QF^208}p=O5lgFgq3D0p>mxpA)U0hFg^~A1 z$S{dBN5*uHQgr1UVln;_cqa>QFi!KNC|@f4hJhO?TQ~p9)L*@Z4d^8{Kx^KM{($=s z{pB%2;FFEWG$K=Gz9)w!f85Glo8K=a?Hfu3Ut*Wsik103ScNt^(J@6<%(R6`V0=w{ z&5$cxQ~5~wc%AMY#AEt<1qvjSHtravR*J$wf{{+=-6h?f`7=hcb+C?ZL)#miz%U0r z$??!WqxO#kfu5wThyQ|3If7GGQIsQA;bc?p5&KUECuMD}2C+SJI6U$CBbdjShvY`I zcHuIFAb@0Ryid-RA*jm?ECr{%WlH%(&1Bg=mziSM;(zqSndyC^-@IX0f(tesVGtPq zV#7UBixWlS5Co1Zc!zHQAi^s}l3y)fL{iP7coX=3(eL-s!FG4Yjuw!pg}i z>5Jd(#-&OoC#fv9id?4LIa)yyt5g0HRnqf!=Y|ZplCh*5Qf0f9Y})#C+lF@U%rZ#S z@}XryI-=d0Co(sok}8gY=`xGA>;(J51`CvrKWh%m8oswS;s-%4NRJ=j7wKG^{fHGz zrGawV?$@zNMmi8EnqRUl<(;U=7UqTR=M>xn?%@%Iq?OJ)VanJvSK^9G$m3rRNGcI3 zXTv~eyu+|YRWHaR%EK4B7(*H@2)0eV-3ytsm%9nIjefG7*qh{G7Au`owEYdx@IyKv zv$eI-6^NH$7az0qV~|skDxcP73jx1@Jx(#2u`h8@@qEr}Vwr_jN`6PB(hn=jp@PhA zlz$Si;*d%|M&EMNZiACFXpxdvctE1EY&y*=)Qm zdwDJF=V&Rq!A|H_Q6$t`?^Aw=D)mA|B?rBWpebBX|E8r6rB6P^Dc#^bh zJaW`#Is4=>#YtRNDCh3TmDIx&v`dw+<-B?Mqh#iC;Dd#Y!p_Air1_86WKQCG^`zLu zq+tcr*PI({@CMErPTGPgqV*^2JxOb>V6It@Of)R};rCdGXIgY|Vy~4k@ zvyyzOhP14GXBV)XOWZmPbx?+5(q_##qfrhSSr6<1#tT_Qij!t~=rfA$bi}6u@LZ|O z1&ao-gbM;JaM;jf2sg$#i#>m5i6OwAdTMAO=(ki@8lw2;8$P~}$(8PmBkQWee#vEU znbHgFj|8TR3elxhlDVGUGQ9oP`33pk%i~%eISN_`pMdS zgS2bjNi1bmYA=WwD%PSTkUh1D3TN`s0-Nv=?veG(0ru6~N@eVjHu7_Sh;U@(vvW(o zle1svwe<3U^)S!ZUQF)!f;FxosW;DceSC1y%TEw1)vEJw(bg1Jqngk_5{b7A0q3&Rj=)ZO94~Wq*X4Y*$z@ z-W;4K()FQ$kjuCX1i_cMMM1{tkgXTifEuTpJT`_+{Cu4o>|Eou)L>Xl)#dF1K^X~x z!s7B#;4HKw@0>d__gG~}`U%^;uxA-QAidxWvCskhESmu@a7Ao_7j_DJhE?~MiFrn! zPp~sUh+C=a0o&PaVUl%J3mPb1EOK5bTlpvSC<8(;(TH)AJjdQhBc#olzQ2)Y5t-_p zjGn@}LRm~ozqn!_jumkssiKmZVhzmgf=den-VAQtCCpP^^Y<$Qjrc+aD^j+Vqw?8X zJnz7{6lGZ!(82u_d(N+P{=t($*hI%WWn#rEi2W{*gOXsFhnvQ2gfeVO6>iKttax$U zP|RYiO2I`jbD2ZnwV)9980_VK3)D6wqEcuhFxh;j*?7X(M=kXR8-};SC?`D(8}^IXS(bSCN-VA^}Jjwxl>)F0v&k=B6N8Tck+q zAark^F4*T&-37Q#p^jd-B^%=y`!7^^2#jW7lm?IE7gZOyH1fzS74KmRdvt76XNG3r z`u1V7uR+Dt=IRA=S+=*qKm9C1USKhb?i4L7KU}VJ&f-d9Owb`9K9OC#E(oy1as{EB z+B8~N+Azqu%a$=d-rB43_lG^Q7K_>P_ZMts^7m1Sr^N`F6eNC#MYm}(9aEC>YhLZGpt}x|rtR&2gQ9}?cN`z; zA9TeByTR-VDEq(@w=n|cBHaMxB;#pI)nx<*K18K6n@zp+>rUlA(?taU@<`Yxtg00d!@oU$+F@H2+vXw{6(zgLpFE{tLjIs0rLtd?ND2SMV;-Or+g59$++Q^!aRyDKQfyR zAyb13`|4?9VmO{&x53h|CDd5#AaqtVc)j|I1b(;sV`22{W66P=Y7vUZ+$s|1&9}9A1U5 zpvK%M9Jy|xsc(A1N||GTzF`zQMv%$19>^9+th%O%s(LN1RA$F6oe-Jk#ywLV1J(bkiGY-_HR3X z<$Ght|GWGxH6gER9d{@kF-&Z3&@q*nvus?jYG4c)xZ-OMUC2?J>#c$Z)JKc?CM&qe%*j0`2EvcU(*eAQfN+iTwFcr z`CBvlZ9td)-K^5~G48X|-D@B;^KzZ8nSayZZyW97fS~L8vdj%KU)JsHF(F@*{%><% zlT7VQabF9^CS;$qR-*^)KZl_^>mP|T4%<8D(o-8R6Z2Tx)tfv?t@XtIu~r#6%@hoP$K?wnZ1GrN7Bk7U(sG-zQlewoWFeBvCw~T zAAF@yJk`x|Pl7mwcqc)jqc>!pEe*52;=k=?iJZ7Bv#J(M+(xy6gJ-11EDEaxD#`Hf8IhjzE`rvn;1Th&zK!0sc@}nm?3tTCpQ{vbhyP< z$;}4;rjK@zEXWxwXf6pkG~GAHI6|LSijbvolTf(R@ID_$SUaB++qR;jdSXfcLq<0N z`#?ODw=_3Sp<#YxoSpNOFK-ze#XfW}2OEn%Pi9WIuVfxT6+23G*CIq?K5ZctFHH4Q znxO{nIA-b6~xR&RuGo zJNxZxHQ^abt?T`z@rLhfvJ}~vHr=U;>Tt$_@O$+4K+ad9+uGL>Ox7U;ApO7z74e*J?u_nO(VI0zG*m= zM zS$$ZdWBopbuJRxHvQLUUpqm=Nc#&$Q6eDp*L8dYS*onyP|K;v2S0gJQ)dx>sD-HIWxBlc;#`&X?6`KkXf_kMXKXFcMWU-&cc;Qa*}TRNO~HChz8 zHughxU659H$Rp_S?2gxqCdYk`#{YY;&nG&Wt7<<#eCe8IfT%qrjd-0C&`J7y0{JuP zFMB}zwoiEehRtK2bPp$G@(FUI>apugP zeNAdGwST#vpbs~uEO$yv*Zz6M9?H{gJU|V;^bBDd_gf4>yJd={cA1@Zh%fA*qoZyh`nxOA?jV$^N~ z_qaI*R80yDi^<)m@X!h5s;_hFvBr+EJm{Jwko^YCQY~fa`7fa(b--Z7q+ zIMy9h0i%gsEB;opMM_Boe{}qJQ*LTJUG`pU!St&WS>As*F!)T`CU+>>F z)bwa{5(4OZ-e|rkXqk)LbY$Ji&EPb426l+kFHJbZw=OZtu-c*bBt8F3^2OBdg~#rC z=_uv7IMtW&7w$5o(4wUx^|y0O5M&eG_uCSgVVCy*t7*HAU>PJ0_lGK$9Aj1~J+AtmpJ9JauRBXMb)pqp6oYaVM?^ch@^Bl=m0KxL;7 zE}N)3i!qq?uyC;DPRQgk7a{D7P$@rom%Jt``~ao4sMAeQxeJIVGExIHiTiuft4j+; z+Q82{?1mT{Ysh4EVSeL<&hrvG@(oS@POt$KMdG=U)y-)_NAK->XM6Z zk$eYG6X@~f-m)g~IcCRIo}X)z$~h^i97nI-w9oi|+0?tcV2e20IzQuExr&(~qNO&G`FU{()wF?A-bR z7PRcaj{I=0DVmo`W-EAt?SwaiedTD}?f#4N5rP(xkw-8yqGk2IY(~54BH+CCxYh)7KpY^Fe{`ybcigDa~9GIL-%#^KKeU-L)9`XdF&&l(d-q-RSU2meer7^0bS_e z-9ztU(FDXM3GP+>VDMpY^>i&x(M;<2qtq+MBt7UKl;JB(}sAJUzN6F zaCP3yD%;#(0N!rB?+Vo3j$NaXcyvfS^}a)2u`ajiju3DDR8Q#NNM-2`_c3uDNZ^XF zR#$yYZq3_LT#EG_l0ygOSt-d`!_0E zfH1)Dl9@QjcPJvYsbgqiGCfORo?)=(wyqL46QtWY*?63%V@l}%;|s?p0E?C zl*t$zV|ROb8Qs7KB9Hll@!5hU_}Kl;>cQW^oG+Li)8UVb)wVkI!4%&5JF>s$;#JD! zcb*)e;S3fLR(^q;dx)rAP8s%sobebRB4)NP&(Tx;;{~g*YPX`Pxt(d6^Zino>qjDu=qEH#3%mj@s<&(oon9-wA5a@yqn*{i?Dn~xBK@? zr76_kR@Alblzbz}aqkIhJ>=6K155yvf{Tr0fr}FUw`7WB#mSmvEugX>* zZ~wf?R>-y$FHkha#6CMr*b{+i3C6gK^s1Hig0Em(eDOWZgHYjy^xRvdJGSf@P^94}UiRl=JP2^Mp0r91U6!9FvrcL&WmY@e#_b@)t7Rln0Ybac$P|@!Tq6 zO7Cg+--izM?tpKNAX8uh3|==1?{lt>7AlEYTnff6peFqdm%-@r@PcJe4W2&fq4B)c+j-?Uh(Ei2Wh@iEhbj^Ta&Gh=(cQ`nv zqjqi3a~A*6pmFwM5dFkHr#P(#?@*k_=ltqu zUomo6h6t#pc|{2r9U+ifhh8G!dBmr66%lZFT9c zJzextmHUY2M)XK3o7A(8D@C2^WA3HyQ4~}ix!zCPo=yp-PuIP z{eUo2RAy9=GDzMh@uwkoj*!%grB6+w#smqZ@2cly)NS9I1yI|>XNYyNwDcY_lH(ep zSD29rL|`B!{Og`kWt!A%qcl{7!c6bZtQ4)9RMt8aFP_q(t}!Iwn$w8P5BAX=l%rT7 zq^V4Z#1=sukBVb@GK&n|4@u0DMp|wHsL`5jlTPI&#!`oa#RV$MHtvejnl&A((c1z9p+&@f?(cDR;|%TQvOjFZjd^`7E;3N|r!%9W&Wfov;T zza@2X-E@tk`$CX>Q-AT|cP9_+COVD!xjM#>Avy{cW?-~E`btM3gGfF<&EAdbL*^2fP5hlm_{Rf^Tg zxQSF=&1%-p|3leZM%S?fX@b@hS!`L%WQ&=ZnOVAGx?*OQY%w!4GhHz=Gcz+Y)9Ur^ z^ls12=`(ZAmsyn=^{2Ax%c_isjHnZKhpk#Eu9cEzO=Z-fCsV)K`9)gHC`=*pgI$X8 zE2@fRn1A#{Oq-(&t(FhT>fZ?>y8Y6p z{QfZNsbWb(5gK%oa~ud=oL?eklAUuE%mOuE``pQKM^QsyH=;XC9m3bJF)Yvw??#>= zTtI^_dTR!>*3q$vJe7Oy6@MrU#v~Bi)BEmotJ{q2ZWLiRa$S3GlH)~ccFgVUW_F%{ zNJrqJE@O?+Z)}8p5i%7p#v*B zBUI7ncDjQ$F{PNjBaP7mA1G;R4+gVn4YX}WKZ}y%?@5fxT~(kr*fWmb}y@MweYx1#V6R@`OI@PLjL13 zlcz;>cM&7jxie}on7hKKFKqe~?~IC1OdF|86PE!}Ce(e8SVzW%wk4Rmo$?;IPHXyo zW<=*zeJP4s%yWL=j76h&7O-M5*4FYAaC^6;T;}3N_W@ZrJ66Q)ds(g0t{!9P)^}}1 zy@hLu{~20Nl=znYQWKm$`V(P?DXaS40>~Y&zz5g5J=`hu!Y!Rv_@nj##$#l~k+SSk zP~R&f<C-i`Wi{@pIK9n8?G}U?rEe;0^|>HI8MsW7Yo|A?-E(LGqui8RhZaL zUx}Tbp!R(yuVx6cy>Kl0dbG^PRjPg;df^OipAd!mOmz678IEoA7iBw_M@?wg;2>dYSY^l?JvH%UwNz6>6z{@cinWi{GQr>gcJ%!CxOhZ!dEaHmwMqv};$y z<%!%G?||;S!COE|AzN5%i*OpBAseep2*8n<*@5sXCcfy0XeHUUm>Q(Ja4O{fv9oOOIy2yuQgN3-(#lF;NI8K_g@affM}9mI`;&BMq(mE(a_$OpuOtN7lMsN`EK zSL$8bSkmX-s_IzajAn4C8+1p(==(?{N1J+`aPz@WY@ZN*JL?f+1z8aGXK?QnH*S{3 z1y3L3_zo?1T4H>TO0ntzf|B2jx+L9x&;ANXEm9Eho9pFz4DAhv7BoKT;u#41<2sm_ z&(~dVP;ZuSacTFr{ZC+HjCqP5p;j-$N_$sljgYw8_5ECWaRHy8voe9|9#VGD`L-EY z<4v{FkwgruNi-Ulwn0TLx@sNQBzF5wXrt-ruN_UyiRa8WZ44+p zrq@&7XdWC^meE?0(gCSv-@VA$`&1-8Baw)*1QL=LVlh$5o?tvCUf?`?R56M{yIjZw zDoGuTr^!W4_}dKe$Z&wN!YQ#%+(?3O{=M%;tN9K4{3jf zTruSOr+S6oydzZ?qgYtiE)=+}6ib*I%PYJo)APKhVftZyKuLc_^mjxNz%4t!?nDlT zm3v$t3^rMSuSr{tM__cyrIb!?!XOo!p4qU>9R9Shh z71&xmL$2aIQ^1w*janc92PZ&2B(d`jHiyx=j?VggP~T zj42LgY$pfE??~b}{1LeTz zk?~ONK~SNiGQLDIOCs(|?^t)Y93`J3Ut!?}4wIU2&*XZXG*p}*X~@S9GSr>{@p6x} z5Mclvdzuc&`p~G@Ry(#ff|O6g0-tS`1M8g35CcSiviW4OTlLREPrpee*5OGE)WsI% z!hhLoDoQy*5)vc#(>g*%%i%Py-=eVkBN)>YZCa1$5^*&*)71O=xB0kil z_eW;R@%xAhS4}lZb-QLNmPQhNP}1TGR82-d`978Q-U07ECaSB`l$toD&oIW=f}_al zC;Ka@_0Z_R+E*Zq0fKdXcAg1nsNmJj^qsfEO+=;?a#vvPAMDlzCCqNyISN!^#HFJ_ ze2wapuP}%YGPI{)nM$6Kl?e{`esw=lnq-nves*$`8A9mz3`11Hc)e|;V61Lu3O_sf zJ4+&mF=XyLrAj{NOOx}B5Syr;Z4EBB*+!r5!Z}KO5{{6F$)5qw199S$z$ZF!d61Q} z`U%?t3XbDSJ}}yl>FSx7%0z)a_aTD2i}@?neNfo| z2S8l?gpWMal8u01v0}47QQt4Xk9KFzs)AV63nB9^uq#Y!3iDfXpIGzbsO`TC(M=#@ zZW?iAclx)#VcU}&($q!mphn^5+p3b+ZE^|gBejP8<5TU*Ha(tXLI%;njlca1Eo2J! z*E*UxHkX3irL_MH9*L2ic<6kzbK8H& zA3^lE@biS6H#qKPI{E1BR3*NV(f?8wJ$owaWZvb&WcXV@pA%cE+R9GXZ?JA=1#H?T;`Cj_27L@`cX5?M^H1! zMe#5*i!*sUqdk=%2a;+s@%Dl9@TaW|N$+LyJ25Pmo7-PZ>2R#dGMKd_Yg0<@LAc_I zWvVs&>t!a&H_Tnw+h#=e;tz%`?=Yb|zJIvnjfv5hhAREpE4yJ+7rDz0SND)}&iN7m z7I0l%o%Y_3;|-l8MShdrpk0LN_!q6Ne?>dj}eK1FGE(2{4#5X&;n6l^v5OXWeK>U@<~o+LAqh^vUJ_oy{9N#Qrk z{CgeajI;I7OUKcd4RbwKHmm~JTcBr!2)8`B8SDAfn>lx_y6*{$P92(q0dZBw!f zJpawR^2adV%^f0PfC+J2q#+9uQ3)I70^Pm>rTm@Vxf;Cu%HK9~UaN57iZwD+@l*jn z-Jf}=t>_8zfSMPP;{Bi7d-{?z8QJTd){UCxKD zd6#*X2CMXZ&x=yvXR1?AstmOh*h4%E=G-$ML3DVa#yIx|IES`fT;hdAo>+@N&l-rZ zy7V;!4Uj)&*)9O8ID4aGdBYXA=@^K$lF#(Stn(+UOT(WALNeQ)VIsJbF0+*wr{d*P zEO$Aj6<=$fKj0sp^8Zn8On`O{^OrG5~0 z4~D$UB=hrkm9Ail8=V7(y)3i`s*jz(Hk`n+xsiHm`gh+e$V*WPU?aPT(*kQ(S-FR> z5F3?3Xi7m~v-mhXf#@-!U8IwP4IfSpF{_LRJCg1=reFhjxmh}Z6Dup5n>Vk{BOV!L zMBt*h>JR9M8D3IS_r=VyM%%M7+d3o}V>Pf}J2cqt>Nj?lI1BgAr>04AKkb516F$q( zfckdLxhWT)-ze;;f+Rx32oymQNW;t$RIr~!`KE!eMYQMU&X`senmdCK|E>$ zp;na2va5IuNg+y&r3}G7`2`%#-izkphugHXYW4Ok19Af-k4%K?-ZT0}vxi*k_I^(ZT;V6ogjY^4I+YOx=8yiudxJUI;1&|p4#nGFG$AfvC$b$` zi|M{14nZw7vgUW1cx=lMVeFMJsT2Iz#t3B9S!`PRuS?_!i4>KvsJDo{sP2G1!TM4w z%xx*^JGINX+Tt;o`9tU1V3HFwbOzz`?PZe?fH7eUv|Xy&$}KT(G0>-80D;RHgcC1& z0Z2c%gIgHx#7x5v9L!3n?INx$WK$qpoCbIKi!=+$GFezZtD-G9BpF5;D&n_D4>P3Pb&k>qZxL7yK-e-r45PEJ#B^i@>b-e3VexiiQROlJ?lxbPTU9v`-C5NLoz>k1!T!5 ztMSPVQ|-vO$&Yl>z*Al*;;QNdKcT=P3k@SzZhrkv2M6udZCwcL@rRuRdl=ExAaqaj z*bI$WNSn}P6~H%yd6~fiSjGpQM4W>kx6*CHP))`^{sJGS6KoicG1Cla7N~+TIWz+4 z$B?9U>o`%qpCdnH8-$}p&;w=WY5Hj~AE(he@6;{JL)ob>3Q}wMhi9DsK$`>v8yYxV zgF!sN_Ie_9n#8mB-1Gy4BM~@A;b9Z6;N45?(T+W#zgv$;Ayrdx8<;HFspgmA7aF;u z7kKoQH63c2hA*Kn90aY)xO|X#^t9J`RCt(X7510S(8lAV8yEDJ$L&YhsdQ8SbAK3+ zu0woG+zTko>iuh*iq283cDFYM^MW6HU^c|rT9PF-7(nsIyw{ryTj;6 z_w#_xMkzz=^Yrt7itYn$uJcbf@wabI%KsZ-MGXJDup)D5(6)HX37MT29T_A7KfVe0 z>XC}m6ShNhSBNr|)#+TWEUYl1Evx{{j8*Z^#YrZ^jjBi-NRu5VI1bbQqq>d0vl`=W z9#PS;O0X#7kG@>Lwa)q=xbT_$%(^hRUC+xhGVy$kljD1OCGdWj7UO$6s@3)RSdZd+ z`bYlq9RHB`=doL?^O;=X&70wJWcH+h?{jN*vh(SQt@G_o zEX(VyUo1<-aOn0~Y{I50{!%sGprT2pMlm;`jzU!`R3zjrKLHiv^agD*J~;M-q#!q+ z8Glq*0>GUZ7JK6=9ULpnO)4V6h)*RfA;PU18av`D9TKaYwsWHhqR8=8HJXlc9>Qm1L_3K6lUkD?3N3Ux~G)v6gnwY+*%~n zo2K55Hw3r=17z%}!mxpzvkhhwY%9!@G*&OWyQPYy{hj7!HRKUV@*2dFyT9WTLuXE& z85OYUa_Y3=2c*blf@fS|3MBrcL-5~@3e4l9DFT?Yn{hXDWLX%{ z7_CV|#FeA;SvX{Vag6U@|1?(+g}P5zi7nRRNRo(2mB~?*IzSuRb$T+RGl_VXT{U*v zq_U1rpthVPTyE1}$ZDUhmMwMCIk!<`9-*{YV18T|<~9x*&NW`PN@bpqV5KX*t%!B6 z-=HeXnr%q+ChyqO=oq=xkVg%Z(;@FOVC@&= za+Bt@NyiTw#V3W`oV5A>E0PPnNe;h(Rltu^(4iPHWE@e-iTM$7@|G{&R~!QS?jK=C+Mh#vn9KqpjGeMts2Nmy|Fg zDG7iwmH}&+F(!OiJTc5jT9lFK-*&#ZYL?o6oJsyGDHLWbY!VDJp$Zid3D-~{rZkuf zRa8LEQ;iP^#jMZOQtm58gZa1p>p$c~l%Pd*YmF zLz5{WPNq_-#(G8FnM}F@-J;Z%!g(}ZDaA~hUGACgpe-r3TxuL>;-c;tVxiZJn8{Xl}8cGe~CkWmuKwCXNPY&TREpm9yEb zC?KTgP2=ljs9j~XN>rLAWqT1#TMKs?!Cv?kn`m=nd!ePueNFQ%Y?!ziZs`E z=uR`s9R&8vq%P7}Y-Br|g$;0ACM>{g&`iRwuv#UnEVSuV2Ri)mPdwv&7#uIb>XekpYKJ(e=j2xBFR6Wq-yJt_}=;3aYBH6xOtu~d4Cnl(MLOf zo7`vRu`HOB3`@A6jHurx&_0?hTX~NrBbAfMOyOp8)4!S7lZsO#t&-VDe)Cw-bgWJ` zXP7pK?KcXeKnJIW(trm&@%YgMs>9b~8Z)dJ`1fOm)u8WEi)!#46Y?m&vg=MQ(UH_C)rQqJ*Ba5J z)C|=)0Z2$-NkK|oORh@}OV&%~NJdDyNdg;Qc5yy@ygA8L&Oey)s6{oy8}HmnwHf|rZvL`wf*p6Dd@G- z#+shpxM9ucx6}uk)YVI7HKPWN{eod4s^iE4UfzQSVZYI(Xc7U`jY}qW^E;{iZeeif ze22OHG-2TA&@_ku+y-_7ySbg@e!DPT+n$~Leu}VJbQ9_cjr7_kE4%fbKouq7D=;=1 zT7XtVlfm?GVXX{CS|zKOWvM0aUSz*^meovYFHKyJGXbXwGr>FjJ_l8jB7 zHS4;S`_f(gvB~XD1#0)^^~{b`m>PN&4ZbFKjjQ?f*iJLGb($`y+c!m?)SG0q<+Pn%{nv>aFouf?)q z>aHXrerhdshP;zcHf0(%292Ypm9WlQ7Osv}rVXR5{>Zr75`;sL$9EGN7(<_^6w6l^bUAXm6; z@*e%NerT_9kQpKgo+t;4AR_1!K>_oeen7vxmp$kj5e1i&liF7Os9L;IM8BrjFo+CM z4lk2q&7tY?s%tx^mn7)3#~}yB77+uNmh(-GmP5zL#7 z%yvSrMGza}XWRoVo)%||qy6#OOn*U{6kE~-Esxd*2G7?NC2f~-!Z;3Wk)li5p!L!! zUBe~a~AJEfd64gxDjxs#r0;k3|OsOsa$1=!+zxIRH-e@7+XZt`qf`J^d~NO)cgNeKi|O9HA~mHwdKG7nq%_(_ zE$h}byXI5-$^DK|@T4HcA=s$=XdZWuyX)c`*Ylf!f_5?Js7U--G)_0i2h4-%#k?#r zpC~YVa2_Z(_=gQqjxl-6otkdLyg|~a@h^HexLe*XU3|SUF+H+T+W4(Jg%XI|#;#*` z$@>hehLMAU0nPZg+y@@i(FFSx_+dPWuGIJHq|1ux7d1VGO@rT~V(|00MLoi=F;5vL z$~f$c^_qHIqhRn+cu3u(ACh+%<_!x5DWm4?!C)=87u?Hl)h}x%zg$r9quTMI_Y|^X zU`f10pTi%pc4s$={N%*4WO*~ZTAo-$5c5!5UL0?aFJ^m-iY5`F((oI(tv%Laxbkfm zcQXgQqM-4Sc<|l0A6$2?=XO)p_yP{`>9}>=J&&&?Px^Nw#OSg%c)i{l6%Y;gS*qo9 zv%GoVy`Y}p@32mpCQPCy^|DeF>3D^_0-xa@u`Zdq53>h}qI&Vg@@Y{c-EnW(FYJ1D z^9L!SX7Nq9C*0HHN)^~I?Yeh!2Y*M6;;V62x@+Aw(Uy%>eciHAWDk6^aTb8L)=TxZ z{0eZ)G;I<)X_VCrtK?buB7K{3!6Y{EG70ef%jM<#eD<)sTf1nosYwwe0sHEOI0XC1vFEUK7(A(vWym||B|G#} zp8eQi9=qnDpd&wq4P_c3`?MZKVSC;x+tV);XNx6EM7gIH4S2k8r1cXIpd)`4++E{UZt; z4ueTR&zh~t|LBVk#slsS$*pvRH$|3BnQg=0>H7iZ@y`QPIjlO~94ZI*7mf6+x>|hi z!EpXzg~I*5gkH^~C)UN`2KZ4;q36(*=vx8y`p*OO8SD}sJ<(QMPoJ*@SR8mEgfv1j zIz5k`XxEr8pnH`(2oAOu`~YGMA(>2yO;4b!$5#X_3_KA+E8ZHTnp)4TE7Uguj1Gte z-3IH5cSX7tEBG3XNQ5WE8Q_R`{BwpSUsL=s8lNZ$%{}lA_84!5Y)_->-WLaq4cr=X z4beOjx1$>^fD6n8`~q?f$@Rw-{gy)4t}mG)9GDtfCL}kKE72&Sk>t=^v>DN^5UIa3 zd~+T>MLuP|jd*AD2NAd+R3JPo7B1tjatiyVGJ0ks^`Wb1WFitFG14mwwzniU4Rmjl6YS@n*v;Xa;^B^ zc(xo{p=(ynYthR@4uS`PV{y#nV@QA>AbKN(p`Ga8L{dVD0n|v#SoY!@6hUW*Uuh<3 zjl6~eqa%pY1X+X55ID-K>5MwZ;_ZgOLa;!@Mqf*f=z0C|nBGTrVjd7WtpnB)|3PY|nkVZ2$-rqK-M18M zhfD?p3d4tBVlz-1xb_7H#~|;LkP1tOf6ZL!vBu~O90sy|Yr$^FO2mbt(qYN?3?w^s zJ!R30L^gseVvfOIJAo;39fXsDE0Il5TSOeAj*zFBa@B+{gL#ovkiAG=MAB7`2p9+r z^!l=cyOEhlj77#G)qm8`mvY!Y@h+mGi%>_Z|EwW0FzhP~9!J(BRuw6aY@)sYS;K0e z*|!i}fILc^C{`YSU)9`hI_vhe-p1V53m+skjy7-EyuJ^$SPn6SgTyltoL!6|pK9MMVtD zN0JmfZxH#nBicfhj7s&j|*lr3!)?w zLS=OmyeZ#Rpo-y@vGN(!fx#p431*Z~vnm56;c6+_j0(Wm5u*eON+lKJVwOG~-eU8l zfMM!{Wl9I-gVHgjlqwKEFlU4~p`Q|6N;$EVS_x1I5(E~EI48(Zrl~X(FIlQA7lMR< z86)lq2$VP~&ZXswfQp4+RVtS`{fH5b1Z~Pi<;t=(`Q{RPwf)QyFSH~v$|x25Vs7Q; ze0#ut=!i=7?i#CT3&v zv6lTNZRv>N)lN8g;{v0mX`gV8S#F>e;388WF>?0oI6f>{sRo3vLy+ z!q{$yV@pc90=JSjMYoC@(SmkS=!i({`s4g*Kwg-rT7)(BI*YsM-P}>Zv}PFv@lbx0 zXu^PX1P`_c%ZvHl>`@g1zhU-3r&08PUIY=g5KEvL{32!!gQ8*lfN2ErfH?RfW)Xvq z;okw_2np;smO@kN#SKy9I)i`#$p{tfGL{9?^7*D5JEdLF09wR?)_hZ;o%U|#0CiOzr>qYBT>jmqDWD^NN_CoYRl0c9^=0W5^!a;^XIP|ZQ{}mlW8N;CdF@`$!a}0e9 zx$Q?ADmOCoLCIE1Ie0gC^Z8$~zk`1*{+|BD`AfQny7n_2EgdBtL+ghYs@6{}bS-4& z{ZhRWy%fD>y#T!ey#&2_y$HREVh-YBDW(OO1vu94tgx&wtZ>$F7vC>nFJMv~bZz-< zTKSMr4J3~3c z@T744?MmZHVk+U0fyjyH#ChzvaGX1#gobW0AafJwvT?}TuM1m~<1>8bQ|%2&dy!`1M4tRg0zqA1yfHpr>tqFG%N-wFPV zOL}pW>WLp2c${2L&c~M1%cZq4Can`L%SE+nCOH$t8U376j)^DKvw+H4!72{op?$ff zidr#~!U^h(WljgjgOjn@l&V_(!?f}_KtZjBNyLOkh7;$h6YG)X+;aXcVwzLtY1@=r z$*pKnd$&x6CTFA5#Sx1L#O6-r1W(2*Xb$1w6nBcF*g^cTdICAag`?C#`mkvNCL@ls z$Wit*JqASoq>zOnnn24a z<6LyCJgwUeq;~cbfbv5K#Dr(TG^StE^Y0e-BVZWOtLw(}tARp+C&iLxO*W;U&}-|C zIyRbG&JGh$^RtFpM{viyV?F{-t^WH^2jz|o<$+U+*;xWUeqc~a@P*jYagC2qe89kN zX+Ho|Ey4lj7)!EA^^9IZx4U1baYnbNA0!kU0y-8otGYq;oL)+|MqXj?7GL#%UVXQ{ zADSWr6f`^{HZCK(rETAq2`&q}ado#|UN@=VIFu%Q1GXj8lJyna8U2!3^{8HBH@;sx z)GK@>PC7liSz83LYnBpJ5ke)l7DJPs?etc7H^9#U>JZ^0Q3@&vc%BB&gk{axW0XHk z9yd*DDDB;^9tR-Bli^HpWIZ;Rna@v?aEZew^^ueNQl_-akfup9q?j{~@1=G-`QfmL zL%4bN$oOeOH6mPKuCX+k*luq9=@#-6gX%n}=|=G@f|`WS#A%~qt3?RHD8oL`*Qr}H@2Zc?rbl-+rzUgrM99J>(6y;sH16uz zZS1@Ea)*(cjO>^9LWflVucV7I*2!zk%|QEA%Ni*s-nToyf&Tf+-`@8gc2%1`Xz`Bk z=V}6GLAqXO@jOpkW!Y~llO3=>i@fhtqSxNlbRp3!K6c8o?JpiXU=?>aoOZJvT~|+P zEDc6aYCNUmP8b;4yzi57&TldeoaHDd+P z47d;M%*x7AE3$Sr&U1(4xW{1~t5TbyuL5|tpS1-hOtO@DV}GHYkVh#x)9edVgm`b; zQ=8s+%+iPdFC-6{{}Zx@%>N0AL+1a4%pvptjdxBmy0JD0E_8-n z#4J6RSylnNh&)0me!wi@|ATuL+0&L3w|b`BbkY>8<@?)Hqxq4E5Jqzo6QG#F`WPqH zy#?AFY=Q7!!ZKA<;|7lFvbVLs=4Qb2_^I)t#SA+gTdnu|*;&iI{VnKZsPQb&Db*Ob zFiBCI5t9^`@mz$*-2S3@XU;cWHZEMn`lJ0tZMMuxpbEW1hi3J2eRRH0R7GT)!@*=@ zEN^zA?eo*^a-4p4hB&;QkWg^Hk=*pz1`HVuMOwXDPk{I@_V4Kc1|ok<3~S~V!fUpH zVC?neY@((gC>ZA9a4hCpm>3st7d(EcrdHu0v)-G{mxmM0hv^;@R=PUYfQY(sQcD12 zOZgZ@*`Fjb)GGNljT%s1o4|GaPDN;f$4D#cBsp)kx?7p2`ZaRD7#h`KLKGEm&Qt!L zC2wb4hj;S9b^pm>z`({rut*Kv+!jl>*x#t8JM)5YkK zxDJuink}w+GxT@F5u=KpcgnS@|5BCeI$Xd~mI=Q+TyvC}`g7Kt4=YA2CSq4Hu;_@I zx53Ue`7f|9TFm2zp;40pq-&Cm8G3hqgpp83*2sfZ9GARgXk+$tY{Ny~)%X|2nY#arfcgUXO&t(07 zIQRSFMq9dWXtGuGrqSi8ss-VGWpzfl|IJLr0N)%g6XVG&)VdQ86G zf@!)cx~#r2YFmO^LtlLRfmMUI1a<}y{ciA;Lud(((8vIN3Hl6L9=R{0OJIxkTMeoU z_~W<7e~N#GeX6*59^$@--84OXMPNmm7bG=PoTzxlv853 zw4)}~>iTat@Aa95llR)yN9$FlTDQ0ipF3QMu(j=trJI_I(uu2?ZEG7(*YS^Kyqm<5 z@$S(*W382a=<|BAmvc$cIE-~sy4_w;n`4T3@<(@FggMe?d}+k=%y=old2F#PiGx5< zluDtumk|g8#J79uo`$tC-6)zH%O9A!3Cnex#tJeiaZSk1kc}8&s-Itv2hH>V?9dMk8 zYCQQK{FoyIHydI|rtXqp73jIU`a39CRvt`6C_DU3ipgiTx0* zLF+j{qaD$?K>;rS+@>?N7K_ByBR zZf0aGQbuSpT_lGOQ`3&efzhlgMqP*Ju|?m=@vrP!pmuYdN65M}N$q}s=!F5pcf(sB z*9c4P_9=N18}iZA0MuyJ=W0Pw?G;x4JnV^q5pYtCh8sa}wIEoWNl#g|1&I4w4w>HOSF$GWG^6q-yxPB+{P)m}-mQlE zG)<&Vv;aJwCB;FwRBEcBJ26Gie?5nCywp%HKeXIEcp{-cl=G#?t7;2KcIL%KeH;)t z039~n&rJI}eRMkmqi7Ry;Xg0-Je)IqZ1&C?7u~gZoGxBB|0l;7hB|#QMeuLmK#^TkJqkllV^Ovq@A{8qZ z^!!Q0`TS-g+wNfQ;a{%DwM#(N)~)yxxM9+9d*HQs+l(=B{rf~P3K1r+S<8C+>{!z7 zL1@$YhZoE1MgV3#0{F&#pptpo6hmLAbuvxL3LD-Y;*vb)%OZ_K@E9_JErzRcRfxNb zvu7H^9(KD~5p5Y`Qv|Z#z>k^jw42Gpr;;*V2vU0m?PWpma4qNIa}>J|pGgAG*9G&f z2f8edLxS>;-bH(n4~k$tS~XISQ8&TSq?~4Npv8Nd(5me}`!M!2J5^fOImg*f5l{Hn zM71HU>Hc0LpYeazV$t$)JbzT}Fjp&Aj3VqySC!)1<(ALzGkmEjNPK*0du^GzBpR4~ zps0UR3T@&h_Nexn&Mgot@Y$wBJVGYUk~F*43QKII6=M$Lg?6+~8TCzq7xeVU@`Uln zfJJx5{>X(lxTG(hN2>=|6`OnIwY2q&+T`yup)Qj+DzNX!+TN|YTnqH2s@n}2KnsmM z9GInl*C<#zF>+uR`{|&AM3=tCmb8fo(8l3FKmSP}L~3Y|BdNLBVr;2Be?L(x=i`|o zmD#!It>|Kc;o6&WPIm}oDR~;U$)L5>T;Fztja4R#4IuMYO#TB>}yL< z3SVq?)EoL3U+gu|Ct}TcLMEsyGFW+1{FBAU7Q4R}d7^U@Y~=x+DNfZn;(4SP1RoBz zi9BhP13TCIdMco&N#KppThqd{{%p=UPaD~iW-1*og|T53F|u*c|1>b3Z;d{iKeA$| zv&N9=x>)J{P<1>J>b@&MaN6+~~Wllc#+W$6J)8D1XAf~)2fnDHN|7L@N&@GW`du!{fHL@!H z`}6DPf6b1SR))F^{9)X;e0v#N68EB349shS&dn@!TpiXX(q$-%J z-6ft4JBo6+`0$Umtv5eFBk_y0c_BO!ZP#P$nRW}yN-7%D2~+q-U?sVVD)+wZDm;Yl zr+i(_iUk`Ul(=wZKrUz8ItfgQ8174^O*ik&N6%xq!*=eXp?x0GpCQ6Gd2j(^Wn8Gp zmfy!9As#Kwx4$9Q3|fgMob6iCMw4Y%!|*0?U$*(US38h-%6qTm@R}JSfe_D@5D$E; z;7Fsvo7)2t;&u*ga@Rj~v~>dHTMr*Z*I0q9RGrYyL{qaBk{BuRVhdwm>Q$gs# zjX9MW5Ao~6KYR@c3xBjTq(!jQrAprH(J9*38j^JK^(SF3vS0j^h78)zE-`Y^2t<7A zxaePUfq;YX2!4E+aiJ4Ii*f^8pM{wqb$D8-44p&~a3&E@^U5XoNpq$Is~*JH?LAIm zG$$kiew!Tm+}fqSc(?T!zA{nGNYcO}DO^<3o{ga3|D2h0L_QMRID74|YJcT`g?c$K zytO?g(uGEk+HsmxtI8JAcy?Ih;qQp%aow3S#c{nAmc z*MFw$rx#ixi#e?fE7d7s${%>4Y6e?6zD#V{Sd#wK0^FMUhH!-1euIuuuy2AdagD#U zBjg1uhCnjryDGE4q!iL*+#0&dVDCluNndAWm)pSGP$j}eeP_H7H^lKQ71S+EJo$j3 z7=CC&=GY62M6pjNBiMriU)5-X>lEB6@s!c>qCmb&HDyAsH zoS(+qJ*73?IF&x;m}*E9J)2T|N4x0elP#M=7n@P*Nv+UTYgA7upJ4x$$Vl`m(70a-3QfKW`F}@bE1;wP|0a#KBim(r=}`lE zyL!4t;OM*f(Sj1?(f<-t<|~%KL-ze_zPGB`TZ%a+Y?+yviF^xQ)ZccP87kKK^)m^*D)?q7apx7L#2Yr{Rcn64!$wZ7nsR^1{YU`gxe?gyx%zp=Uq!;cZi zH8jql#;4o`=o#k@V%15)Bx6Vs-|+eg!<5JzE?M=1qj)O2*_DKO6cj|%2craJWMLVj zQl(iYHC7bSRi(<_)jY%f*U>-RPj#dH8u`Glg8Dy3|Nms9qb4PL`F{-T3jEOR@?(>u zWR50QX)~?(ywW=t=u=8IE^@YGgK5RFLY8YV>qbZV8@0`J2gVKC@i!{F79%cNC%t#< zDs4N>=Aol+Bvu$49-ck2wz{>ZO?)?9*@KRNvuvtuvrdpmoWaAHzxYiF>8VAQX_g zE4%Qx?LG_{fw*6OT-3o+&$d z-YLHL8%6(*m4B8~>mS%4y9G?h$dCe73=6&)1&LJ5RQHNJ!0`Yj6x;92$@&{~yk_ub&%Q-v6i6oKJ5%rG?+Vee3xu|2g*m2Oq0{v+zzR zq$jD%r^o2_bai!^77X{ll;M%ylyT58u+7*1s&D$WQC&-2U(C+LN5{(~62L)armbnL zBWY}JevR~hOL?J2Zrqfm!BV zQCMjOHv=QfS76#?5dlh~8VT#RhIfj)izP!!s;y0Q+Ogg zKHh{_J9Zz4pJom;-x|Y?9g+m>FeTQGX+6)C(tze?V%Xs>MZgX-V(o}beDThyl!0O9 zBJ}uRl_p?^Ik9%2-l>k9uI5i5Ui{|>V4eEb{wBXzz*Q;+{D>|dQCL49aE+d zumgB$G;wyIUTBPLhu<^;b^xz8Ce9Aj-A%}LEWJg*4&YuV;_N^jC_=Vl-(S3TK#E|Z z%SF^KF0ws2jEwm5Iz}6UIDetG#F6bYWyND3Mr)Bc+fZAc$hK`}gW5)>$*AorWRvwd ape7R?Lae~rfq_9B2=@TXaAi(V^8x^j`zm1o literal 0 HcmV?d00001 From 9a70dbb51ab9a5140691c6822ee7002ddd366337 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Thu, 17 Dec 2015 12:05:43 -0500 Subject: [PATCH 141/167] Add ability to profile query and collectors Provides a new flag which can be enabled on a per-request basis. When `"profile": true` is set, the search request will execute in a mode that collects detailed timing information for query components. ``` GET /test/test/_search { "profile": true, "query": { "match": { "foo": "bar" } } } ``` Closes #14889 Squashed commit of the following: commit a92db5723d2c61b8449bd163d2f006d12f9889ad Merge: 117dd99 3f87b08 Author: Zachary Tong Date: Thu Dec 17 09:44:10 2015 -0500 Merge remote-tracking branch 'upstream/master' into query_profiler commit 117dd9992e8014b70203c6110925643769d80e62 Merge: 9b29d68 82a64fd Author: Zachary Tong Date: Tue Dec 15 13:27:18 2015 -0500 Merge remote-tracking branch 'upstream/master' into query_profiler Conflicts: core/src/main/java/org/elasticsearch/search/SearchService.java commit 9b29d6823a71140ecd872df25ff9f7478e7fe766 Author: Zachary Tong Date: Mon Dec 14 16:13:23 2015 -0500 [TEST] Profile flag needs to be set, ensure searches go against primary only for consistency commit 4d602d8ad1f8cbc7b475450921fa3bc7d395b34f Merge: 8b48e87 7742c1e Author: Zachary Tong Date: Mon Dec 14 10:56:25 2015 -0500 Merge remote-tracking branch 'upstream/master' into query_profiler commit 8b48e876348b163ab730eeca7fa35142165b05f9 Author: Zachary Tong Date: Mon Dec 14 10:56:01 2015 -0500 Delegate straight to in.matchCost, no need for profiling commit fde3b0587911f0b5f15e779c671d0510cbd568a9 Author: Zachary Tong Date: Mon Dec 14 10:28:23 2015 -0500 Documentation tweaks, renaming build_weight -> create_weight commit 46f5e011ee23fe9bb8a1f11ceb4fa9d19fe48e2e Author: Zachary Tong Date: Mon Dec 14 10:27:52 2015 -0500 Profile TwoPhaseIterator should override matchCost() commit b59f894ddb11b2a7beebba06c4ec5583ff91a7b2 Merge: 9aa1a3a b4e0c87 Author: Zachary Tong Date: Wed Dec 9 14:23:26 2015 -0500 Merge remote-tracking branch 'upstream/master' into query_profiler commit 9aa1a3a25c34c9cd9fffaa6114c25a0ec791307d Author: Zachary Tong Date: Wed Dec 9 13:41:48 2015 -0500 Revert "Move some of the collector wrapping logic into ProfileCollectorBuilder" This reverts commit 02cc31767fb76a7ecd44a302435e93a05fb4220e. commit 57f7c04cea66b3f98ba2bec4879b98e4fba0b3c0 Author: Zachary Tong Date: Wed Dec 9 13:41:31 2015 -0500 Revert "Rearrange if/else to make intent clearer" This reverts commit 59b63c533fcaddcdfe4656e86a6f6c4cb1bc4a00. commit 2874791b9c9cd807113e75e38be465f3785c154e Author: Zachary Tong Date: Wed Dec 9 13:38:13 2015 -0500 Revert "Move state into ProfileCollectorBuilder" This reverts commit 0bb3ee0dd96170b06f07ec9e2435423d686a5ae6. commit 0bb3ee0dd96170b06f07ec9e2435423d686a5ae6 Author: Zachary Tong Date: Thu Dec 3 11:21:55 2015 -0500 Move state into ProfileCollectorBuilder commit 59b63c533fcaddcdfe4656e86a6f6c4cb1bc4a00 Author: Zachary Tong Date: Wed Dec 2 17:21:12 2015 -0500 Rearrange if/else to make intent clearer commit 511db0af2f3a86328028b88a6b25fa3dfbab963b Author: Zachary Tong Date: Wed Dec 2 17:12:06 2015 -0500 Rename WEIGHT -> BUILD_WEIGHT commit 02cc31767fb76a7ecd44a302435e93a05fb4220e Author: Zachary Tong Date: Wed Dec 2 17:11:22 2015 -0500 Move some of the collector wrapping logic into ProfileCollectorBuilder commit e69356d3cb4c60fa281dad36d84faa64f5c32bc4 Author: Zachary Tong Date: Mon Nov 30 15:12:35 2015 -0500 Cleanup imports commit c1b4f284f16712be60cd881f7e4a3e8175667d62 Author: Zachary Tong Date: Mon Nov 30 15:11:25 2015 -0500 Review cleanup: Make InternalProfileShardResults writeable commit 9e61c72f7e1787540f511777050a572b7d297636 Author: Zachary Tong Date: Mon Nov 30 15:01:22 2015 -0500 Review cleanup: Merge ProfileShardResult, InternalProfileShardResult. Convert to writeable commit 709184e1554f567c645690250131afe8568a5799 Author: Zachary Tong Date: Mon Nov 30 14:38:08 2015 -0500 Review cleanup: Merge ProfileResult, InternalProfileResult. Convert to writeable commit 7d72690c44f626c34e9c608754bc7843dd7fd8fe Author: Zachary Tong Date: Mon Nov 30 14:01:34 2015 -0500 Review cleanup: use primitive (and default) for profile flag commit 97d557388541bbd3388cdcce7d9718914d88de6d Author: Zachary Tong Date: Mon Nov 30 13:09:12 2015 -0500 Review cleanup: Use Collections.emptyMap() instead of building an empty one explicitly commit 219585b8729a8b0982e653d99eb959efd0bef84e Author: Zachary Tong Date: Mon Nov 30 13:08:12 2015 -0500 Add todo to revisit profiler architecture in the future commit b712edb2160e032ee4b2f2630fadf131a0936886 Author: Zachary Tong Date: Mon Nov 30 13:05:32 2015 -0500 Split collector serialization from timing, use writeable instead of streamable Previously, the collector timing was done in the same class that was serialized, which required leaving the collector null when serializing. Besides being a bit gross, this made it difficult to change the class to Writeable. This splits up the timing (InternalProfileCollector + ProfileCollector) and the serialization of the times (CollectorResult). CollectorResult is writeable, and also acts as the public interface. commit 6ddd77d066262d4400e3d338b11cebe7dd27ca78 Author: Zachary Tong Date: Wed Nov 25 13:15:12 2015 -0500 Remove dead code commit 06033f8a056e2121d157654a65895c82bbe93a51 Author: Zachary Tong Date: Wed Nov 25 12:49:51 2015 -0500 Review cleanup: Delegate to in.getProfilers() Note: Need to investigate how this is used exactly so we can add a test, it isn't touched by a normal inner_hits query... commit a77e13da21b4bad1176ca2b5d5b76034fb12802f Author: Zachary Tong Date: Wed Nov 25 11:59:58 2015 -0500 Review cleanup: collapse to single `if` statement commit e97bb6215a5ebb508b0293ac3acd60d5ae479be1 Author: Zachary Tong Date: Wed Nov 25 11:39:43 2015 -0500 Review cleanup: Return empty map instead of null for profile results Note: we still need to check for nullness in SearchPhaseController, since an empty/no-hits result won't have profiling instantiated (or any other component like aggs or suggest). Therefore QuerySearchResult.profileResults() is still @Nullable commit db8e691de2a727389378b459fa76c942572e6015 Author: Zachary Tong Date: Wed Nov 25 10:14:47 2015 -0500 Review cleanup: renaming, formatting fixes, assertions commit 9011775fe80ba22c2fd948ca64df634b4e32772d Author: Zachary Tong Date: Thu Nov 19 20:09:52 2015 -0500 [DOCS] Add missing annotation commit 4b58560b06f08d4b99b149af20916ee839baabd7 Author: Zachary Tong Date: Thu Nov 19 20:07:17 2015 -0500 [DOCS] Update documentation for new format commit f0458c58e5538ed8ec94849d4baf3250aa9ec841 Author: Adrien Grand Date: Tue Nov 17 10:14:09 2015 +0100 Reduce visibility of internal classes. commit d0a7d319098e60b028fa772bf8a99b2df9cf6146 Merge: e158070 1bdf29e Author: Adrien Grand Date: Tue Nov 17 10:09:18 2015 +0100 Merge branch 'master' into query_profiler commit e158070a48cb096551f3bb3ecdcf2b53bbc5e3c5 Author: Adrien Grand Date: Tue Nov 17 10:08:48 2015 +0100 Fix compile error due to bad html in javadocs. commit a566b5d08d659daccb087a9afbe908ec3d96cd6e Author: Zachary Tong Date: Mon Nov 16 17:48:37 2015 -0500 Remove unused collector commit 4060cd72d150cc68573dbde62ca7321c47f75703 Author: Zachary Tong Date: Mon Nov 16 17:48:10 2015 -0500 Comment cleanup commit 43137952bf74728f5f5d5a8d1bfc073e0f9fe4f9 Author: Zachary Tong Date: Mon Nov 16 17:32:06 2015 -0500 Fix negative formatted time commit 5ef3a980266326aff12d4fe380f73455ff28209f Author: Adrien Grand Date: Fri Nov 13 17:10:17 2015 +0100 Fix javadocs. commit 276114d29e4b17a0cc0982cfff51434f712dc59e Author: Adrien Grand Date: Fri Nov 13 16:25:23 2015 +0100 Fix: include rewrite time as well... commit 21d9e17d05487bf4903ae3d2ab6f429bece2ffef Author: Adrien Grand Date: Fri Nov 13 15:10:15 2015 +0100 Remove TODO about profiling explain. commit 105a31e8e570efb879447159c3852871f5cf7db4 Author: Adrien Grand Date: Fri Nov 13 14:59:30 2015 +0100 Fix nocommit now that the global collector is a root collector. commit 2e8fc5cf84adb1bfaba296808c329e5f982c9635 Author: Adrien Grand Date: Fri Nov 13 14:53:38 2015 +0100 Make collector wrapping more explicit/robust (and a bit less magical). commit 5e30b570b0835e1ce79a57933a31b6a2d0d58e2d Author: Adrien Grand Date: Fri Nov 13 12:44:03 2015 +0100 Simplify recording API a bit. commit 9b453afced6adc0a59ca1d67d90c28796b105185 Author: Adrien Grand Date: Fri Nov 13 10:54:25 2015 +0100 Fix serialization-related nocommits. commit ad97b200bb123d4e9255e7c8e02f7e43804057a5 Author: Adrien Grand Date: Fri Nov 13 10:46:30 2015 +0100 Fix DFS. commit a6de06986cd348a831bd45e4f524d2e14d9e03c3 Author: Adrien Grand Date: Thu Nov 12 19:29:16 2015 +0100 Remove forbidden @Test annotation. commit 4991a28e19501109af98026e14756cb25a56f4f4 Author: Adrien Grand Date: Thu Nov 12 19:25:59 2015 +0100 Limit the impact of query profiling on the SearchContext API. Rule is: we can put as much as we want in the search.profile package but should aim at touching as little as possible other areas of the code base. commit 353d8d75a5ce04d9c3908a0a63d4ca6e884c519a Author: Adrien Grand Date: Thu Nov 12 18:05:09 2015 +0100 Remove dead code. commit a3ffafb5ddbb5a2acf43403c946e5ed128f47528 Author: Adrien Grand Date: Thu Nov 12 15:30:35 2015 +0100 Remove call to forbidden String.toLowerCase() API. commit 1fa8c7a00324fa4e32bd24135ebba5ecf07606f1 Author: Adrien Grand Date: Thu Nov 12 15:30:27 2015 +0100 Fix compilation. commit 2067f1797e53bef0e1a8c9268956bc5fb8f8ad97 Merge: 22e631f fac472f Author: Adrien Grand Date: Thu Nov 12 15:21:12 2015 +0100 Merge branch 'master' into query_profiler commit 22e631fe6471fed19236578e97c628d5cda401a9 Author: Zachary Tong Date: Tue Nov 3 18:52:05 2015 -0500 Fix and simplify serialization of shard profile results commit 461da250809451cd2b47daf647343afbb4b327f2 Author: Zachary Tong Date: Tue Nov 3 18:32:22 2015 -0500 Remove helper methods, simpler without them commit 5687aa1c93d45416d895c2eecc0e6a6b302139f2 Author: Zachary Tong Date: Tue Nov 3 18:29:32 2015 -0500 [TESTS] Fix tests for new rewrite format commit ba9e82857fc6d4c7b72ef4d962d2102459365299 Author: Zachary Tong Date: Fri Oct 30 15:28:14 2015 -0400 Rewrites begone! Record all rewrites as a single time metric commit 5f28d7cdff9ee736651d564f71f713bf45fb1d91 Author: Zachary Tong Date: Thu Oct 29 15:36:06 2015 -0400 Merge duplicate rewrites into one entry By using the Query as the key in a map, we can easily merge rewrites together. This means the preProcess(), assertion and main query rewrites all get merged together. Downside is that rewrites of the same Query (hashcode) but in different places get lumped together. I think the simplicity of the solution is better than the slight loss in output fidelity. commit 9a601ea46bb21052746157a45dcc6de6bc350e9c Author: Zachary Tong Date: Thu Oct 29 15:28:27 2015 -0400 Allow multiple "searches" per profile (e.g. query + global agg) commit ee30217328381cd83f9e653d3a4d870c1d2bdfce Author: Zachary Tong Date: Thu Oct 29 11:29:18 2015 -0400 Update comment, add nullable annotation commit 405c6463a64e118f170959827931e8c6a1661f13 Author: Zachary Tong Date: Thu Oct 29 11:04:30 2015 -0400 remove out-dated comment commit 2819ae8f4cf1bfd5670dbd1c0e06195ae457b58f Author: Adrien Grand Date: Tue Oct 27 19:50:47 2015 +0100 Don't render children in the profiles when there are no children. commit 7677c2ddefef321bbe74660471603d202a4ab66f Author: Adrien Grand Date: Tue Oct 27 19:50:35 2015 +0100 Set the profiler on the ContextIndexSearcher. commit 74a4338c35dfed779adc025ec17cfd4d1c9f66f5 Author: Adrien Grand Date: Tue Oct 27 19:50:01 2015 +0100 Fix json rendering. commit 6674d5bebe187b0b0d8b424797606fdf2617dd27 Author: Adrien Grand Date: Tue Oct 27 19:20:19 2015 +0100 Revert "nocommit - profiling never enabled because setProfile() on ContextIndexSearcher never called" This reverts commit d3dc10949024342055f0d4fb7e16c7a43423bfab. commit d3dc10949024342055f0d4fb7e16c7a43423bfab Author: Zachary Tong Date: Fri Oct 23 17:20:57 2015 -0400 nocommit - profiling never enabled because setProfile() on ContextIndexSearcher never called Previously, it was enabled by using DefaultSearchContext as a third-party "proxy", but since the refactor to make it unit testable, setProfile() needs to be called explicitly. Unfortunately, this is not possible because SearchService only has access to an IndexSearcher. And it is not cast'able to a DefaultIndexSearcher. commit b9ba9c5d1f93b9c45e97b0a4e35da6f472c9ea53 Author: Zachary Tong Date: Fri Oct 23 16:27:00 2015 -0400 [TESTS] Fix unit tests commit cf5d1e016b2b4a583175e07c16c7152f167695ce Author: Zachary Tong Date: Fri Oct 23 16:22:34 2015 -0400 Increment token after recording a rewrite commit b7d08f64034e498533c4a81bff8727dd8ac2843e Author: Zachary Tong Date: Fri Oct 23 16:14:09 2015 -0400 Fix NPE if a top-level root doesn't have children commit e4d3b514bafe2a3a9db08438c89f0ed68628f2d6 Author: Zachary Tong Date: Fri Oct 23 16:05:47 2015 -0400 Fix NPE when profiling is disabled commit 445384fe48ed62fdd01f7fc9bf3e8361796d9593 Author: Zachary Tong Date: Fri Oct 23 16:05:37 2015 -0400 [TESTS] Fix integration tests commit b478296bb04fece827a169e7522df0a5ea7840a3 Author: Zachary Tong Date: Fri Oct 23 15:43:24 2015 -0400 Move rewrites to their own section, remove reconciliation Big commit because the structural change affected a lot of the wrapping code. Major changes: - Rewrites are now in their own section in the response - Reconciliation is gone...we don't attempt to roll the rewrites into the query tree structure - InternalProfileShardResults (plural) simply holds a Map and helps to serialize / ToXContent - InternalProfileShardResult (singular) is now the holder for all shard-level profiling details. Currently this includes query, collectors and rewrite. In the future it would hold suggest, aggs, etc - When the user requests the profiled results, they get back a Map instead of doing silly helper methods to convert to maps, etc - Shard details are baked into a string instead of serializing the object commit 24819ad094b208d0e94f17ce9c3f7c92f7414124 Author: Zachary Tong Date: Fri Oct 23 10:25:38 2015 -0400 Make Profile results immutable by removing relative_time commit bfaf095f45fed74194ef78160a8e5dcae1850f9e Author: Adrien Grand Date: Fri Oct 23 10:54:59 2015 +0200 Add nocommits. commit e9a128d0d26d5b383b52135ca886f2c987850179 Author: Adrien Grand Date: Fri Oct 23 10:39:37 2015 +0200 Move all profile-related classes to the same package. commit f20b7c7fdf85384ecc37701bb65310fb8c20844f Author: Adrien Grand Date: Fri Oct 23 10:33:14 2015 +0200 Reorganize code a bit to ease unit testing of ProfileCollector. commit 3261306edad6a0c70f59eaee8fe58560f61a75fd Author: Adrien Grand Date: Thu Oct 22 18:07:28 2015 +0200 Remove irrelevant nocommit. commit a6ac868dad12a2e17929878681f66dbd0948d322 Author: Adrien Grand Date: Thu Oct 22 18:06:45 2015 +0200 Make CollectorResult's reason a free-text field to ease bw compat. commit 5d0bf170781a950d08b81871cd1e403e49f3cc12 Author: Adrien Grand Date: Thu Oct 22 16:50:52 2015 +0200 Add unit tests for ProfileWeight/ProfileScorer. commit 2cd88c412c6e62252504ef69a59216adbb574ce4 Author: Adrien Grand Date: Thu Oct 22 15:55:17 2015 +0200 Rename InternalQueryProfiler to Profiler. commit 84f5718fa6779f710da129d9e0e6ff914fd85e36 Author: Adrien Grand Date: Thu Oct 22 15:53:58 2015 +0200 Merge InternalProfileBreakdown into ProfileBreakdown. commit 135168eaeb8999c8117ea25288104b0961ce9b35 Author: Adrien Grand Date: Thu Oct 22 13:56:57 2015 +0200 Make it possible to instantiate a ContextIndexSearcher without SearchContext. commit 5493fb52376b48460c4ce2dedbe00cc5f6620499 Author: Adrien Grand Date: Thu Oct 22 11:53:29 2015 +0200 Move ProfileWeight/Scorer to their own files. commit bf2d917b9dae3b32dfc29c35a7cac4ccb7556cce Author: Adrien Grand Date: Thu Oct 22 11:38:24 2015 +0200 Fix bug that caused phrase queries to fail. commit b2bb0c92c343334ec1703a221af24a1b55e36d53 Author: Adrien Grand Date: Thu Oct 22 11:36:17 2015 +0200 Parsing happens on the coordinating node now. commit 416cabb8621acb5cd8dfa77374fd23e428f52fe9 Author: Adrien Grand Date: Thu Oct 22 11:22:17 2015 +0200 Fix compilation (in particular remove guava deps). commit f996508645f842629d403fc2e71c1890c0e2cac9 Merge: 4616a25 bc3b91e Author: Adrien Grand Date: Thu Oct 22 10:44:38 2015 +0200 Merge branch 'master' into query_profiler commit 4616a25afffe9c24c6531028f7fccca4303d2893 Author: Zachary Tong Date: Tue Oct 20 12:11:32 2015 -0400 Make Java Count API compatible with profiling commit cbfba74e16083d719722500ac226efdb5cb2ff55 Author: Zachary Tong Date: Tue Oct 20 12:11:19 2015 -0400 Fix serialization of profile query param, NPE commit e33ffac383b03247046913da78c8a27e457fae78 Author: Zachary Tong Date: Tue Oct 20 11:17:48 2015 -0400 TestSearchContext should return null Profiler instead of exception commit 73a02d69b466dc1a5b8a5f022464d6c99e6c2ac3 Author: Zachary Tong Date: Mon Oct 19 12:07:29 2015 -0400 [DOCS] Update docs to reflect new ID format commit 36248e388c354f954349ecd498db7b66f84ce813 Author: Zachary Tong Date: Mon Oct 19 12:03:03 2015 -0400 Use the full [node][index][shard] string as profile result ID commit 5cfcc4a6a6b0bcd6ebaa7c8a2d0acc32529a80e1 Author: Zachary Tong Date: Thu Oct 15 17:51:40 2015 -0400 Add failing test for phrase matching Stack trace generated: [2015-10-15 17:50:54,438][ERROR][org.elasticsearch.search.profile] shard [[JNj7RX_oSJikcnX72aGBoA][test][2]], reason [RemoteTransportException[[node_s0][local[1]][indices:data/read/search[phase/query]]]; nested: QueryPhaseExecutionException[Query Failed [Failed to execute main query]]; nested: AssertionError[nextPosition() called more than freq() times!]; ], cause [java.lang.AssertionError: nextPosition() called more than freq() times! at org.apache.lucene.index.AssertingLeafReader$AssertingPostingsEnum.nextPosition(AssertingLeafReader.java:353) at org.apache.lucene.search.ExactPhraseScorer.phraseFreq(ExactPhraseScorer.java:132) at org.apache.lucene.search.ExactPhraseScorer.access$000(ExactPhraseScorer.java:27) at org.apache.lucene.search.ExactPhraseScorer$1.matches(ExactPhraseScorer.java:69) at org.elasticsearch.common.lucene.search.ProfileQuery$ProfileScorer$2.matches(ProfileQuery.java:226) at org.apache.lucene.search.ConjunctionDISI$TwoPhaseConjunctionDISI.matches(ConjunctionDISI.java:175) at org.apache.lucene.search.ConjunctionDISI$TwoPhase.matches(ConjunctionDISI.java:213) at org.apache.lucene.search.ConjunctionDISI.doNext(ConjunctionDISI.java:128) at org.apache.lucene.search.ConjunctionDISI.nextDoc(ConjunctionDISI.java:151) at org.apache.lucene.search.ConjunctionScorer.nextDoc(ConjunctionScorer.java:62) at org.elasticsearch.common.lucene.search.ProfileQuery$ProfileScorer$1.nextDoc(ProfileQuery.java:205) at org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll(Weight.java:224) at org.apache.lucene.search.Weight$DefaultBulkScorer.score(Weight.java:169) at org.apache.lucene.search.BulkScorer.score(BulkScorer.java:39) at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:795) at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:509) at org.elasticsearch.search.query.QueryPhase.execute(QueryPhase.java:347) at org.elasticsearch.search.query.QueryPhase.execute(QueryPhase.java:111) at org.elasticsearch.search.SearchService.loadOrExecuteQueryPhase(SearchService.java:366) at org.elasticsearch.search.SearchService.executeQueryPhase(SearchService.java:378) at org.elasticsearch.search.action.SearchServiceTransportAction$SearchQueryTransportHandler.messageReceived(SearchServiceTransportAction.java:368) at org.elasticsearch.search.action.SearchServiceTransportAction$SearchQueryTransportHandler.messageReceived(SearchServiceTransportAction.java:365) at org.elasticsearch.transport.local.LocalTransport$2.doRun(LocalTransport.java:280) at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) commit 889fe6383370fe919aaa9f0af398e3040209e40b Author: Zachary Tong Date: Thu Oct 15 17:30:38 2015 -0400 [DOCS] More docs commit 89177965d031d84937753538b88ea5ebae2956b0 Author: Zachary Tong Date: Thu Oct 15 09:59:09 2015 -0400 Fix multi-stage rewrites to recursively find most appropriate descendant rewrite Previously, we chose the first rewrite that matched. But in situations where a query may rewrite several times, this won't build the tree correctly. Instead we need to recurse down all the rewrites until we find the most appropriate "leaf" rewrite The implementation of this is kinda gross: we recursively call getRewrittenParentToken(), which does a linear scan over the rewriteMap and tries to find rewrites with a larger token value (since we know child tokens are always larger). Can almost certainly find a better way to do this... commit 0b4d782b5348e5d03fd26f7d91bc4a3fbcb7f6a5 Author: Zachary Tong Date: Wed Oct 14 19:30:06 2015 -0400 [Docs] Documentation checkpoint commit 383636453f6610fcfef9070c21ae7ca11346793e Author: Zachary Tong Date: Wed Sep 16 16:02:22 2015 -0400 Comments commit a81e8f31e681be16e89ceab9ba3c3e0a018f18ef Author: Zachary Tong Date: Wed Sep 16 15:48:49 2015 -0400 [TESTS] Ensure all tests use QUERY_THEN_FETCH, DFS does not profile commit 1255c2d790d85fcb9cbb78bf2a53195138c6bc24 Author: Zachary Tong Date: Tue Sep 15 16:43:46 2015 -0400 Refactor rewrite handling to handle identical rewrites commit 85b7ec82eb0b26a6fe87266b38f5f86f9ac0c44f Author: Zachary Tong Date: Tue Sep 15 08:51:14 2015 -0400 Don't update parent when a token is added as root -- Fixes NPE commit 109d02bdbc49741a3b61e8624521669b0968b839 Author: Zachary Tong Date: Tue Sep 15 08:50:40 2015 -0400 Don't set the rewritten query if not profiling -- Fixes NPE commit 233cf5e85f6f2c39ed0a2a33d7edd3bbd40856e8 Author: Zachary Tong Date: Mon Sep 14 18:04:51 2015 -0400 Update tests to new response format commit a930b1fc19de3a329abc8ffddc6711c1246a4b15 Author: Zachary Tong Date: Mon Sep 14 18:03:58 2015 -0400 Fix serialization commit 69afdd303660510c597df9bada5531b19d134f3d Author: Zachary Tong Date: Mon Sep 14 15:11:31 2015 -0400 Comments and cleanup commit 64e7ca7f78187875378382ec5d5aa2462ff71df5 Author: Zachary Tong Date: Mon Sep 14 14:40:21 2015 -0400 Move timing into dedicated class, add proper rewrite integration commit b44ff85ddbba0a080e65f2e7cc8c50d30e95df8e Author: Zachary Tong Date: Mon Sep 14 12:00:38 2015 -0400 Checkpoint - Refactoring to use a token-based dependency tree commit 52cedd5266d6a87445c6a4cff3be8ff2087cd1b7 Author: Zachary Tong Date: Fri Sep 4 19:18:19 2015 -0400 Need to set context profiling flag before calling queryPhase.preProcess commit c524670cb1ce29b4b3a531fa2bff0c403b756f46 Author: Adrien Grand Date: Fri Sep 4 18:00:37 2015 +0200 Reduce profiling overhead a bit. This removes hash-table lookups everytime we start/stop a profiling clock. commit 111444ff8418737082236492b37321fc96041e09 Author: Adrien Grand Date: Fri Sep 4 16:18:59 2015 +0200 Add profiling of two-phase iterators. This is useful for eg. phrase queries or script filters, since they are typically consumed through their two-phase iterator instead of the scorer. commit f275e690459e73211bc8494c6de595c0320f4c0b Author: Adrien Grand Date: Fri Sep 4 16:03:21 2015 +0200 Some more improvements. I changed profiling to disable bulk scoring, since it makes it impossible to know where time is spent. Also I removed profiling of operations that are always fast (eg. normalization) and added nextDoc/advance. commit 3c8dcd872744de8fd76ce13b6f18f36f8de44068 Author: Adrien Grand Date: Fri Sep 4 14:39:50 2015 +0200 Remove println. commit d68304862fb38a3823aebed35a263bd9e2176c2f Author: Adrien Grand Date: Fri Sep 4 14:36:03 2015 +0200 Fix some test failures introduced by the rebase... commit 04d53ca89fb34b7a21515d770c32aaffcc513b90 Author: Adrien Grand Date: Fri Sep 4 13:57:35 2015 +0200 Reconcile conflicting changes after rebase commit fed03ec8e2989a0678685cd6c50a566cec42ea4f Author: Zachary Tong Date: Thu Aug 20 22:40:39 2015 -0400 Add Collectors to profile results Profile response element has now been re-arranged so that everything is listed per-shard to facilitate grouping elements together. The new `collector` element looks like this: ``` "profile": { "shards": [ { "shard_id": "keP4YFywSXWALCl4m4k24Q", "query": [...], "collector": [ { "name": "MultiCollector", "purpose": "search_multi", "time": "16.44504400ms", "relative_time": "100.0000000%", "children": [ { "name": "FilteredCollector", "purpose": "search_post_filter", "time": "4.556013000ms", "relative_time": "27.70447437%", "children": [ { "name": "SimpleTopScoreDocCollector", "purpose": "search_sorted", "time": "1.352166000ms", "relative_time": "8.222331299%", "children": [] } ] }, { "name": "BucketCollector: [[non_global_term, another_agg]]", "purpose": "aggregation", "time": "10.40379400ms", "relative_time": "63.26400829%", "children": [] }, ... ``` commit 1368b495c934be642c00f6cbf9fc875d7e6c07ff Author: Zachary Tong Date: Wed Aug 19 12:43:03 2015 -0400 Move InternalProfiler to profile package commit 53584de910db6d4a6bb374c9ebb954f204882996 Author: Zachary Tong Date: Tue Aug 18 18:34:58 2015 -0400 Only reconcile rewrite timing when rewritten query is different from original commit 9804c3b29d2107cd97f1c7e34d77171b62cb33d0 Author: Zachary Tong Date: Tue Aug 18 16:40:15 2015 -0400 Comments and cleanup commit 8e898cc7c59c0c1cc5ed576dfed8e3034ca0967f Author: Zachary Tong Date: Tue Aug 18 14:19:07 2015 -0400 [TESTS] Fix comparison test to ensure results sort identically commit f402a29001933eef29d5a62e81c8563f1c8d0969 Author: Zachary Tong Date: Tue Aug 18 14:17:59 2015 -0400 Add note about DFS being unable to profile commit d446e08d3bc91cd85b24fc908e2d82fc5739d598 Author: Zachary Tong Date: Tue Aug 18 14:17:23 2015 -0400 Implement some missing methods commit 13ca94fb86fb037a30d181b73d9296153a63d6e4 Author: Zachary Tong Date: Tue Aug 18 13:10:54 2015 -0400 [TESTS] Comments & cleanup commit c76c8c771fdeee807761c25938a642612a6ed8e7 Author: Zachary Tong Date: Tue Aug 18 13:06:08 2015 -0400 [TESTS] Fix profileMatchesRegular to handle NaN scores and nearlyEqual floats commit 7e7a10ecd26677b2239149468e24938ce5cc18e1 Author: Zachary Tong Date: Tue Aug 18 12:22:16 2015 -0400 Move nearlyEquals() utility function to shared location commit 842222900095df4b27ff3593dbb55a42549f2697 Author: Zachary Tong Date: Tue Aug 18 12:04:35 2015 -0400 Fixup rebase conflicts commit 674f162d7704dd2034b8361358decdefce1f76ce Author: Zachary Tong Date: Mon Aug 17 15:29:35 2015 -0400 [TESTS] Update match and bool tests commit 520380a85456d7137734aed0b06a740e18c9cdec Author: Zachary Tong Date: Mon Aug 17 15:28:09 2015 -0400 Make naming consistent re: plural commit b9221501d839bb24d6db575d08e9bee34043fc65 Author: Zachary Tong Date: Mon Aug 17 15:27:39 2015 -0400 Children need to be added to list after serialization commit 05fa51df940c332fbc140517ee56e849f2d40a72 Author: Zachary Tong Date: Mon Aug 17 15:22:41 2015 -0400 Re-enable bypass for non-profiled queries commit f132204d264af77a75bd26a02d4e251a19eb411d Author: Zachary Tong Date: Mon Aug 17 15:21:14 2015 -0400 Fix serialization of QuerySearchResult, InternalProfileResult commit 27b98fd475fc2e9508c91436ef30624bdbee54ba Author: Zachary Tong Date: Mon Aug 10 17:39:17 2015 -0400 Start to add back tests, refactor Java api commit bcfc9fefd49307045108408dc160774666510e85 Author: Zachary Tong Date: Tue Aug 4 17:08:10 2015 -0400 Checkpoint commit 26a530e0101ce252450eb23e746e48c2fd1bfcae Author: Zachary Tong Date: Tue Jul 14 13:30:32 2015 -0400 Add createWeight() checkpoint commit f0dd61de809c5c13682aa213c0be65972537a0df Author: Zachary Tong Date: Mon Jul 13 12:36:27 2015 -0400 checkpoint commit 377ee8ce5729b8d388c4719913b48fae77a16686 Author: Zachary Tong Date: Wed Mar 18 10:45:01 2015 -0400 checkpoint --- .../action/search/SearchRequestBuilder.java | 8 + .../action/search/SearchResponse.java | 14 + .../percolator/PercolateContext.java | 9 +- .../elasticsearch/search/SearchService.java | 9 +- .../search/aggregations/AggregationPhase.java | 28 +- .../search/aggregations/BucketCollector.java | 6 + .../search/builder/SearchSourceBuilder.java | 40 +- .../controller/SearchPhaseController.java | 16 +- .../search/internal/ContextIndexSearcher.java | 57 +- .../search/internal/DefaultSearchContext.java | 14 +- .../internal/FilteredSearchContext.java | 9 +- .../internal/InternalSearchResponse.java | 44 +- .../search/internal/SearchContext.java | 8 +- .../internal/ShardSearchLocalRequest.java | 12 + .../search/internal/ShardSearchRequest.java | 11 + .../internal/ShardSearchTransportRequest.java | 10 + .../search/profile/CollectorResult.java | 156 +++++ .../profile/InternalProfileCollector.java | 135 ++++ .../profile/InternalProfileShardResults.java | 89 +++ .../search/profile/InternalProfileTree.java | 235 +++++++ .../search/profile/ProfileBreakdown.java | 113 ++++ .../search/profile/ProfileCollector.java | 94 +++ .../search/profile/ProfileResult.java | 165 +++++ .../search/profile/ProfileScorer.java | 158 +++++ .../search/profile/ProfileShardResult.java | 103 +++ .../search/profile/ProfileWeight.java | 97 +++ .../search/profile/Profiler.java | 130 ++++ .../search/profile/Profilers.java | 59 ++ .../search/query/QueryPhase.java | 68 +- .../search/query/QuerySearchResult.java | 41 ++ .../pipeline/moving/avg/MovAvgIT.java | 1 + .../search/profile/ProfileTests.java | 173 +++++ .../search/profile/QueryProfilerIT.java | 596 +++++++++++++++++ .../search/profile/RandomQueryGenerator.java | 266 ++++++++ .../test/hamcrest/DoubleMatcher.java | 45 ++ docs/reference/search.asciidoc | 2 + docs/reference/search/profile.asciidoc | 601 ++++++++++++++++++ .../elasticsearch/test/TestSearchContext.java | 12 +- 38 files changed, 3599 insertions(+), 35 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java create mode 100644 core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java create mode 100644 core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java create mode 100644 core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java create mode 100644 core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java create mode 100644 core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java create mode 100644 core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java create mode 100644 core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java create mode 100644 core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java create mode 100644 core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java create mode 100644 core/src/main/java/org/elasticsearch/search/profile/Profiler.java create mode 100644 core/src/main/java/org/elasticsearch/search/profile/Profilers.java create mode 100644 core/src/test/java/org/elasticsearch/search/profile/ProfileTests.java create mode 100644 core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java create mode 100644 core/src/test/java/org/elasticsearch/search/profile/RandomQueryGenerator.java create mode 100644 core/src/test/java/org/elasticsearch/test/hamcrest/DoubleMatcher.java create mode 100644 docs/reference/search/profile.asciidoc diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 442b0915e3b..52d45ec9407 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -473,6 +473,14 @@ public class SearchRequestBuilder extends ActionRequestBuilderfalse + */ + public SearchRequestBuilder setProfile(boolean profile) { + sourceBuilder().profile(profile); + return this; + } + @Override public String toString() { if (request.source() != null) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 769e0978a71..e6681bf2b9f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -32,9 +33,12 @@ import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; +import java.util.List; +import java.util.Map; import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; import static org.elasticsearch.search.internal.InternalSearchResponse.readInternalSearchResponse; @@ -160,6 +164,16 @@ public class SearchResponse extends ActionResponse implements StatusToXContent { this.scrollId = scrollId; } + /** + * If profiling was enabled, this returns an object containing the profile results from + * each shard. If profiling was not enabled, this will return null + * + * @return The profile results or null + */ + public @Nullable Map> getProfileResults() { + return internalResponse.profile(); + } + static final class Fields { static final XContentBuilderString _SCROLL_ID = new XContentBuilderString("_scroll_id"); static final XContentBuilderString TOOK = new XContentBuilderString("took"); diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java index 5b09b55f8dc..8df956f2cea 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java @@ -43,7 +43,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; @@ -74,6 +73,8 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.profile.Profiler; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -139,7 +140,7 @@ public class PercolateContext extends SearchContext { this.bigArrays = bigArrays.withCircuitBreaking(); this.querySearchResult = new QuerySearchResult(0, searchShardTarget); this.engineSearcher = indexShard.acquireSearcher("percolate"); - this.searcher = new ContextIndexSearcher(this, engineSearcher); + this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); this.scriptService = scriptService; this.numberOfShards = request.getNumberOfShards(); this.aliasFilter = aliasFilter; @@ -748,5 +749,7 @@ public class PercolateContext extends SearchContext { } @Override - public QueryCache getQueryCache() { return indexService.cache().query();} + public Profilers getProfilers() { + throw new UnsupportedOperationException(); + } } diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 00939a74cf8..29ad7ccf0a1 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.ObjectFloatHashMap; import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.ObjectSet; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; @@ -86,6 +87,7 @@ import org.elasticsearch.search.fetch.script.ScriptFieldsContext.ScriptField; import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.internal.*; import org.elasticsearch.search.internal.SearchContext.Lifetime; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.*; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.threadpool.ThreadPool; @@ -552,7 +554,7 @@ public class SearchService extends AbstractLifecycleComponent imp Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; - SearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout); + DefaultSearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout); SearchContext.setCurrent(context); try { @@ -659,7 +661,7 @@ public class SearchService extends AbstractLifecycleComponent imp } } - private void parseSource(SearchContext context, SearchSourceBuilder source) throws SearchContextException { + private void parseSource(DefaultSearchContext context, SearchSourceBuilder source) throws SearchContextException { // nothing to parse... if (source == null) { return; @@ -715,6 +717,9 @@ public class SearchService extends AbstractLifecycleComponent imp if (source.minScore() != null) { context.minimumScore(source.minScore()); } + if (source.profile()) { + context.setProfilers(new Profilers(context.searcher())); + } context.timeoutInMillis(source.timeoutInMillis()); context.terminateAfter(source.terminateAfter()); if (source.aggregations() != null) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java index 742f678f6ff..0681996e3ec 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Collector; import org.apache.lucene.search.Query; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.search.Queries; @@ -30,10 +31,13 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.profile.CollectorResult; +import org.elasticsearch.search.profile.InternalProfileCollector; import org.elasticsearch.search.query.QueryPhaseExecutionException; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -81,8 +85,13 @@ public class AggregationPhase implements SearchPhase { } context.aggregations().aggregators(aggregators); if (!collectors.isEmpty()) { - final BucketCollector collector = BucketCollector.wrap(collectors); - collector.preCollection(); + Collector collector = BucketCollector.wrap(collectors); + ((BucketCollector)collector).preCollection(); + if (context.getProfilers() != null) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_AGGREGATION, + // TODO: report on child aggs as well + Collections.emptyList()); + } context.queryCollectors().put(AggregationPhase.class, collector); } } catch (IOException e) { @@ -116,6 +125,7 @@ public class AggregationPhase implements SearchPhase { BucketCollector globalsCollector = BucketCollector.wrap(globals); Query query = Queries.newMatchAllQuery(); Query searchFilter = context.searchFilter(context.types()); + if (searchFilter != null) { BooleanQuery filtered = new BooleanQuery.Builder() .add(query, Occur.MUST) @@ -124,8 +134,20 @@ public class AggregationPhase implements SearchPhase { query = filtered; } try { + final Collector collector; + if (context.getProfilers() == null) { + collector = globalsCollector; + } else { + InternalProfileCollector profileCollector = new InternalProfileCollector( + globalsCollector, CollectorResult.REASON_AGGREGATION_GLOBAL, + // TODO: report on sub collectors + Collections.emptyList()); + collector = profileCollector; + // start a new profile with this collector + context.getProfilers().addProfiler().setCollector(profileCollector); + } globalsCollector.preCollection(); - context.searcher().search(query, globalsCollector); + context.searcher().search(query, collector); } catch (Exception e) { throw new QueryPhaseExecutionException(context, "Failed to execute global aggregators", e); } finally { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java b/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java index ee38e2b3610..c1c1bff1adb 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.Collector; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.stream.StreamSupport; @@ -99,6 +100,11 @@ public abstract class BucketCollector implements Collector { } return false; } + + @Override + public String toString() { + return Arrays.toString(collectors); + } }; } } diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 465729ca714..3ea2d604b8a 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.builder; import com.carrotsearch.hppc.ObjectFloatHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.elasticsearch.Version; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -91,6 +92,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ public static final ParseField RESCORE_FIELD = new ParseField("rescore"); public static final ParseField STATS_FIELD = new ParseField("stats"); public static final ParseField EXT_FIELD = new ParseField("ext"); + public static final ParseField PROFILE_FIELD = new ParseField("profile"); private static final SearchSourceBuilder PROTOTYPE = new SearchSourceBuilder(); @@ -158,6 +160,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ private BytesReference ext = null; + private boolean profile = false; + + /** * Constructs a new search source builder. */ @@ -475,6 +480,22 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ return this; } + /** + * Should the query be profiled. Defaults to false + */ + public SearchSourceBuilder profile(boolean profile) { + this.profile = profile; + return this; + } + + /** + * Return whether to profile query execution, or {@code null} if + * unspecified. + */ + public boolean profile() { + return profile; + } + /** * Gets the bytes representing the rescore builders for this request. */ @@ -723,6 +744,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ builder.fieldNames = fieldNames; } else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) { builder.sort(parser.text()); + } else if (context.parseFieldMatcher().match(currentFieldName, PROFILE_FIELD)) { + builder.profile = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", parser.getTokenLocation()); @@ -931,6 +954,10 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ builder.field(EXPLAIN_FIELD.getPreferredName(), explain); } + if (profile) { + builder.field("profile", true); + } + if (fetchSourceContext != null) { builder.field(_SOURCE_FIELD.getPreferredName(), fetchSourceContext); } @@ -1212,6 +1239,11 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ if (in.readBoolean()) { builder.ext = in.readBytesReference(); } + if (in.getVersion().onOrAfter(Version.V_2_2_0)) { + builder.profile = in.readBoolean(); + } else { + builder.profile = false; + } return builder; } @@ -1325,13 +1357,16 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ if (hasExt) { out.writeBytesReference(ext); } + if (out.getVersion().onOrAfter(Version.V_2_2_0)) { + out.writeBoolean(profile); + } } @Override public int hashCode() { return Objects.hash(aggregations, explain, fetchSourceContext, fieldDataFields, fieldNames, from, highlightBuilder, indexBoost, innerHitsBuilder, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields, - size, sorts, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version); + size, sorts, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version, profile); } @Override @@ -1364,6 +1399,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ && Objects.equals(terminateAfter, other.terminateAfter) && Objects.equals(timeoutInMillis, other.timeoutInMillis) && Objects.equals(trackScores, other.trackScores) - && Objects.equals(version, other.version); + && Objects.equals(version, other.version) + && Objects.equals(profile, other.profile); } } diff --git a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java index f76527163cb..835e6e71425 100644 --- a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java @@ -43,7 +43,6 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; @@ -52,9 +51,11 @@ import org.elasticsearch.search.fetch.FetchSearchResultProvider; import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.InternalSearchHits; import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.profile.InternalProfileShardResults; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.search.profile.ProfileShardResult; import java.io.IOException; import java.util.ArrayList; @@ -410,6 +411,17 @@ public class SearchPhaseController extends AbstractComponent { } } + //Collect profile results + InternalProfileShardResults shardResults = null; + if (!queryResults.isEmpty() && firstResult.profileResults() != null) { + Map> profileResults = new HashMap<>(queryResults.size()); + for (AtomicArray.Entry entry : queryResults) { + String key = entry.value.queryResult().shardTarget().toString(); + profileResults.put(key, entry.value.queryResult().profileResults()); + } + shardResults = new InternalProfileShardResults(profileResults); + } + if (aggregations != null) { List pipelineAggregators = firstResult.pipelineAggregators(); if (pipelineAggregators != null) { @@ -427,7 +439,7 @@ public class SearchPhaseController extends AbstractComponent { InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore); - return new InternalSearchResponse(searchHits, aggregations, suggest, timedOut, terminatedEarly); + return new InternalSearchResponse(searchHits, aggregations, suggest, shardResults, timedOut, terminatedEarly); } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 0a9b860edb7..a7bacb64d94 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -26,6 +26,9 @@ import org.apache.lucene.search.*; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.search.dfs.AggregatedDfs; +import org.elasticsearch.search.profile.ProfileBreakdown; +import org.elasticsearch.search.profile.ProfileWeight; +import org.elasticsearch.search.profile.Profiler; import java.io.IOException; @@ -43,26 +46,44 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { private final Engine.Searcher engineSearcher; - public ContextIndexSearcher(SearchContext searchContext, Engine.Searcher searcher) { + // TODO revisit moving the profiler to inheritance or wrapping model in the future + private Profiler profiler; + + public ContextIndexSearcher(Engine.Searcher searcher, + QueryCache queryCache, QueryCachingPolicy queryCachingPolicy) { super(searcher.reader()); in = searcher.searcher(); engineSearcher = searcher; setSimilarity(searcher.searcher().getSimilarity(true)); - setQueryCache(searchContext.getQueryCache()); - setQueryCachingPolicy(searchContext.indexShard().getQueryCachingPolicy()); + setQueryCache(queryCache); + setQueryCachingPolicy(queryCachingPolicy); } @Override public void close() { } + public void setProfiler(Profiler profiler) { + this.profiler = profiler; + } + public void setAggregatedDfs(AggregatedDfs aggregatedDfs) { this.aggregatedDfs = aggregatedDfs; } @Override public Query rewrite(Query original) throws IOException { - return in.rewrite(original); + if (profiler != null) { + profiler.startRewriteTime(); + } + + try { + return in.rewrite(original); + } finally { + if (profiler != null) { + profiler.stopAndAddRewriteTime(); + } + } } @Override @@ -72,8 +93,34 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { if (aggregatedDfs != null && needsScores) { // if scores are needed and we have dfs data then use it return super.createNormalizedWeight(query, needsScores); + } else if (profiler != null) { + // we need to use the createWeight method to insert the wrappers + return super.createNormalizedWeight(query, needsScores); + } else { + return in.createNormalizedWeight(query, needsScores); + } + } + + @Override + public Weight createWeight(Query query, boolean needsScores) throws IOException { + if (profiler != null) { + // createWeight() is called for each query in the tree, so we tell the queryProfiler + // each invocation so that it can build an internal representation of the query + // tree + ProfileBreakdown profile = profiler.getQueryBreakdown(query); + profile.startTime(ProfileBreakdown.TimingType.CREATE_WEIGHT); + final Weight weight; + try { + weight = super.createWeight(query, needsScores); + } finally { + profile.stopAndRecordTime(); + profiler.pollLastQuery(); + } + return new ProfileWeight(query, weight, profile); + } else { + // needs to be 'super', not 'in' in order to use aggregated DFS + return super.createWeight(query, needsScores); } - return in.createNormalizedWeight(query, needsScores); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index 1174fcdd8a9..2d3f6590629 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -58,6 +58,8 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.SearchContextHighlight; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.query.QueryPhaseExecutionException; +import org.elasticsearch.search.profile.Profiler; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -129,10 +131,10 @@ public class DefaultSearchContext extends SearchContext { private List rescore; private SearchLookup searchLookup; private volatile long keepAlive; - private ScoreDoc lastEmittedDoc; private final long originNanoTime = System.nanoTime(); private volatile long lastAccessTime = -1; private InnerHitsContext innerHitsContext; + private Profilers profilers; private final Map subPhaseContexts = new HashMap<>(); private final Map, Collector> queryCollectors = new HashMap<>(); @@ -158,7 +160,7 @@ public class DefaultSearchContext extends SearchContext { this.fetchResult = new FetchSearchResult(id, shardTarget); this.indexShard = indexShard; this.indexService = indexService; - this.searcher = new ContextIndexSearcher(this, engineSearcher); + this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); this.timeEstimateCounter = timeEstimateCounter; this.timeoutInMillis = timeout.millis(); } @@ -724,5 +726,11 @@ public class DefaultSearchContext extends SearchContext { } @Override - public QueryCache getQueryCache() { return indexService.cache().query();} + public Profilers getProfilers() { + return profilers; + } + + public void setProfilers(Profilers profilers) { + this.profilers = profilers; + } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 7225c7b32bd..1f04d013401 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -49,6 +48,7 @@ import org.elasticsearch.search.fetch.script.ScriptFieldsContext; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.SearchContextHighlight; import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -517,8 +517,11 @@ public abstract class FilteredSearchContext extends SearchContext { } @Override - public Map, Collector> queryCollectors() { return in.queryCollectors();} + public Profilers getProfilers() { + return in.getProfilers(); + } @Override - public QueryCache getQueryCache() { return in.getQueryCache();} + public Map, Collector> queryCollectors() { return in.queryCollectors();} + } diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java index 7b73772f9da..b8255e0bb52 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java @@ -28,9 +28,14 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.profile.InternalProfileShardResults; +import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHits; @@ -40,7 +45,7 @@ import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHit public class InternalSearchResponse implements Streamable, ToXContent { public static InternalSearchResponse empty() { - return new InternalSearchResponse(InternalSearchHits.empty(), null, null, false, null); + return new InternalSearchResponse(InternalSearchHits.empty(), null, null, null, false, null); } private InternalSearchHits hits; @@ -49,6 +54,8 @@ public class InternalSearchResponse implements Streamable, ToXContent { private Suggest suggest; + private InternalProfileShardResults profileResults; + private boolean timedOut; private Boolean terminatedEarly = null; @@ -56,10 +63,12 @@ public class InternalSearchResponse implements Streamable, ToXContent { private InternalSearchResponse() { } - public InternalSearchResponse(InternalSearchHits hits, InternalAggregations aggregations, Suggest suggest, boolean timedOut, Boolean terminatedEarly) { + public InternalSearchResponse(InternalSearchHits hits, InternalAggregations aggregations, Suggest suggest, + InternalProfileShardResults profileResults, boolean timedOut, Boolean terminatedEarly) { this.hits = hits; this.aggregations = aggregations; this.suggest = suggest; + this.profileResults = profileResults; this.timedOut = timedOut; this.terminatedEarly = terminatedEarly; } @@ -84,6 +93,19 @@ public class InternalSearchResponse implements Streamable, ToXContent { return suggest; } + /** + * Returns the profile results for this search response (including all shards). + * An empty map is returned if profiling was not enabled + * + * @return Profile results + */ + public Map> profile() { + if (profileResults == null) { + return Collections.emptyMap(); + } + return profileResults.getShardResults(); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { hits.toXContent(builder, params); @@ -93,6 +115,9 @@ public class InternalSearchResponse implements Streamable, ToXContent { if (suggest != null) { suggest.toXContent(builder, params); } + if (profileResults != null) { + profileResults.toXContent(builder, params); + } return builder; } @@ -114,6 +139,12 @@ public class InternalSearchResponse implements Streamable, ToXContent { timedOut = in.readBoolean(); terminatedEarly = in.readOptionalBoolean(); + + if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) { + profileResults = new InternalProfileShardResults(in); + } else { + profileResults = null; + } } @Override @@ -134,5 +165,14 @@ public class InternalSearchResponse implements Streamable, ToXContent { out.writeBoolean(timedOut); out.writeOptionalBoolean(terminatedEarly); + + if (out.getVersion().onOrAfter(Version.V_2_2_0)) { + if (profileResults == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + profileResults.writeTo(out); + } + } } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 0f61b2bc6a3..4e4e9dd5dd7 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -56,6 +55,7 @@ import org.elasticsearch.search.fetch.script.ScriptFieldsContext; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.SearchContextHighlight; import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -303,6 +303,11 @@ public abstract class SearchContext extends DelegatingHasContextAndHeaders imple public abstract FetchSearchResult fetchResult(); + /** + * Return a handle over the profilers for the current search request, or {@code null} if profiling is not enabled. + */ + public abstract Profilers getProfilers(); + /** * Schedule the release of a resource. The time when {@link Releasable#close()} will be called on this object * is function of the provided {@link Lifetime}. @@ -367,5 +372,4 @@ public abstract class SearchContext extends DelegatingHasContextAndHeaders imple CONTEXT } - public abstract QueryCache getQueryCache(); } diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 47791aeddfa..9d15dfd5790 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -71,6 +71,8 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S private Boolean requestCache; private long nowInMillis; + private boolean profile; + ShardSearchLocalRequest() { } @@ -165,6 +167,16 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S return scroll; } + @Override + public void setProfile(boolean profile) { + this.profile = profile; + } + + @Override + public boolean isProfile() { + return profile; + } + @SuppressWarnings("unchecked") protected void innerReadFrom(StreamInput in) throws IOException { index = in.readString(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index fb631b08270..b1730b6a14e 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -59,6 +59,17 @@ public interface ShardSearchRequest extends HasContextAndHeaders { Scroll scroll(); + /** + * Sets if this shard search needs to be profiled or not + * @param profile True if the shard should be profiled + */ + void setProfile(boolean profile); + + /** + * Returns true if this shard search is being profiled or not + */ + boolean isProfile(); + /** * Returns the cache key for this shard search request, based on its content */ diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 279d9d6bd20..0f9c0ced411 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -150,4 +150,14 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha public BytesReference cacheKey() throws IOException { return shardSearchLocalRequest.cacheKey(); } + + @Override + public void setProfile(boolean profile) { + shardSearchLocalRequest.setProfile(profile); + } + + @Override + public boolean isProfile() { + return shardSearchLocalRequest.isProfile(); + } } diff --git a/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java b/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java new file mode 100644 index 00000000000..4949c6388d2 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +/** + * Public interface and serialization container for profiled timings of the + * Collectors used in the search. Children CollectorResult's may be + * embedded inside of a parent CollectorResult + */ +public class CollectorResult implements ToXContent, Writeable { + + public static final String REASON_SEARCH_COUNT = "search_count"; + public static final String REASON_SEARCH_TOP_HITS = "search_top_hits"; + public static final String REASON_SEARCH_TERMINATE_AFTER_COUNT = "search_terminate_after_count"; + public static final String REASON_SEARCH_POST_FILTER = "search_post_filter"; + public static final String REASON_SEARCH_MIN_SCORE = "search_min_score"; + public static final String REASON_SEARCH_MULTI = "search_multi"; + public static final String REASON_SEARCH_TIMEOUT = "search_timeout"; + public static final String REASON_AGGREGATION = "aggregation"; + public static final String REASON_AGGREGATION_GLOBAL = "aggregation_global"; + + private static final ParseField NAME = new ParseField("name"); + private static final ParseField REASON = new ParseField("reason"); + private static final ParseField TIME = new ParseField("time"); + private static final ParseField CHILDREN = new ParseField("children"); + + /** + * A more friendly representation of the Collector's class name + */ + private final String collectorName; + + /** + * A "hint" to help provide some context about this Collector + */ + private final String reason; + + /** + * The total elapsed time for this Collector + */ + private final Long time; + + /** + * A list of children collectors "embedded" inside this collector + */ + private List children; + + public CollectorResult(String collectorName, String reason, Long time, List children) { + this.collectorName = collectorName; + this.reason = reason; + this.time = time; + this.children = children; + } + + public CollectorResult(StreamInput in) throws IOException { + this.collectorName = in.readString(); + this.reason = in.readString(); + this.time = in.readLong(); + int size = in.readVInt(); + this.children = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + CollectorResult child = new CollectorResult(in); + this.children.add(child); + } + } + + /** + * @return the profiled time for this collector (inclusive of children) + */ + public long getTime() { + return this.time; + } + + /** + * @return a human readable "hint" about what this collector was used for + */ + public String getReason() { + return this.reason; + } + + /** + * @return the lucene class name of the collector + */ + public String getName() { + return this.collectorName; + } + + /** + * @return a list of children collectors + */ + public List getProfiledChildren() { + return children; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder = builder.startObject() + .field(NAME.getPreferredName(), toString()) + .field(REASON.getPreferredName(), reason) + .field(TIME.getPreferredName(), String.format(Locale.US, "%.10gms", (double) (getTime() / 1000000.0))); + + if (!children.isEmpty()) { + builder = builder.startArray(CHILDREN.getPreferredName()); + for (CollectorResult child : children) { + builder = child.toXContent(builder, params); + } + builder = builder.endArray(); + } + builder = builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(collectorName); + out.writeString(reason); + out.writeLong(time); + out.writeVInt(children.size()); + for (CollectorResult child : children) { + child.writeTo(out); + } + } + + @Override + public Object readFrom(StreamInput in) throws IOException { + return new CollectorResult(in); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java new file mode 100644 index 00000000000..132731f37c6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.LeafCollector; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * This class wraps a Lucene Collector and times the execution of: + * - setScorer() + * - collect() + * - doSetNextReader() + * - needsScores() + * + * InternalProfiler facilitates the linking of the the Collector graph + */ +public class InternalProfileCollector implements Collector { + + /** + * A more friendly representation of the Collector's class name + */ + private final String collectorName; + + /** + * A "hint" to help provide some context about this Collector + */ + private final String reason; + + /** The wrapped collector */ + private final ProfileCollector collector; + + /** + * A list of "embedded" children collectors + */ + private final List children; + + public InternalProfileCollector(Collector collector, String reason, List children) { + this.collector = new ProfileCollector(collector); + this.reason = reason; + this.collectorName = deriveCollectorName(collector); + this.children = children; + } + + /** + * @return the profiled time for this collector (inclusive of children) + */ + public long getTime() { + return collector.getTime(); + } + + /** + * @return a human readable "hint" about what this collector was used for + */ + public String getReason() { + return this.reason; + } + + /** + * @return the lucene class name of the collector + */ + public String getName() { + return this.collectorName; + } + + /** + * Creates a human-friendly representation of the Collector name. + * + * Bucket Collectors use the aggregation name in their toString() method, + * which makes the profiled output a bit nicer. + * + * @param c The Collector to derive a name from + * @return A (hopefully) prettier name + */ + private String deriveCollectorName(Collector c) { + String s = c.getClass().getSimpleName(); + + // MutiCollector which wraps multiple BucketCollectors is generated + // via an anonymous class, so this corrects the lack of a name by + // asking the enclosingClass + if (s.equals("")) { + s = c.getClass().getEnclosingClass().getSimpleName(); + } + + // Aggregation collector toString()'s include the user-defined agg name + if (reason.equals(CollectorResult.REASON_AGGREGATION) || reason.equals(CollectorResult.REASON_AGGREGATION_GLOBAL)) { + s += ": [" + c.toString() + "]"; + } + return s; + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + return collector.getLeafCollector(context); + } + + @Override + public boolean needsScores() { + return collector.needsScores(); + } + + public CollectorResult getCollectorTree() { + return InternalProfileCollector.doGetCollectorTree(this); + } + + private static CollectorResult doGetCollectorTree(InternalProfileCollector collector) { + List childResults = new ArrayList<>(collector.children.size()); + for (InternalProfileCollector child : collector.children) { + CollectorResult result = doGetCollectorTree(child); + childResults.add(result); + } + return new CollectorResult(collector.getName(), collector.getReason(), collector.getTime(), childResults); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java new file mode 100644 index 00000000000..2ab3b632119 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java @@ -0,0 +1,89 @@ +package org.elasticsearch.search.profile; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.*; +import java.util.stream.Collectors; + +/** + * A container class to hold all the profile results across all shards. Internally + * holds a map of shard ID -> Profiled results + */ +public final class InternalProfileShardResults implements Writeable, ToXContent{ + + private Map> shardResults; + + public InternalProfileShardResults(Map> shardResults) { + Map> transformed = + shardResults.entrySet() + .stream() + .collect(Collectors.toMap( + Map.Entry::getKey, + e -> Collections.unmodifiableList(e.getValue())) + ); + this.shardResults = Collections.unmodifiableMap(transformed); + } + + public InternalProfileShardResults(StreamInput in) throws IOException { + int size = in.readInt(); + shardResults = new HashMap<>(size); + + for (int i = 0; i < size; i++) { + String key = in.readString(); + int shardResultsSize = in.readInt(); + + List shardResult = new ArrayList<>(shardResultsSize); + + for (int j = 0; j < shardResultsSize; j++) { + ProfileShardResult result = new ProfileShardResult(in); + shardResult.add(result); + } + shardResults.put(key, shardResult); + } + } + + public Map> getShardResults() { + return this.shardResults; + } + + @Override + public InternalProfileShardResults readFrom(StreamInput in) throws IOException { + return new InternalProfileShardResults(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(shardResults.size()); + for (Map.Entry> entry : shardResults.entrySet()) { + out.writeString(entry.getKey()); + out.writeInt(entry.getValue().size()); + + for (ProfileShardResult result : entry.getValue()) { + result.writeTo(out); + } + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("profile").startArray("shards"); + + for (Map.Entry> entry : shardResults.entrySet()) { + builder.startObject().field("id",entry.getKey()).startArray("searches"); + for (ProfileShardResult result : entry.getValue()) { + builder.startObject(); + result.toXContent(builder, params); + builder.endObject(); + } + builder.endArray().endObject(); + } + + builder.endArray().endObject(); + return builder; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java new file mode 100644 index 00000000000..4bc8a85a781 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java @@ -0,0 +1,235 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.search.Query; + +import java.util.*; +import java.util.concurrent.LinkedBlockingDeque; + +/** + * This class tracks the dependency tree for queries (scoring and rewriting) and + * generates {@link ProfileBreakdown} for each node in the tree. It also finalizes the tree + * and returns a list of {@link ProfileResult} that can be serialized back to the client + */ +final class InternalProfileTree { + + private ArrayList timings; + + /** Maps the Query to it's list of children. This is basically the dependency tree */ + private ArrayList> tree; + + /** A list of the original queries, keyed by index position */ + private ArrayList queries; + + /** A list of top-level "roots". Each root can have its own tree of profiles */ + private ArrayList roots; + + /** Rewrite time */ + private long rewriteTime; + private long rewriteScratch; + + /** A temporary stack used to record where we are in the dependency tree. Only used by scoring queries */ + private Deque stack; + + private int currentToken = 0; + + public InternalProfileTree() { + timings = new ArrayList<>(10); + stack = new LinkedBlockingDeque<>(10); + tree = new ArrayList<>(10); + queries = new ArrayList<>(10); + roots = new ArrayList<>(10); + } + + /** + * Returns a {@link ProfileBreakdown} for a scoring query. Scoring queries (e.g. those + * that are past the rewrite phase and are now being wrapped by createWeight() ) follow + * a recursive progression. We can track the dependency tree by a simple stack + * + * The only hiccup is that the first scoring query will be identical to the last rewritten + * query, so we need to take special care to fix that + * + * @param query The scoring query we wish to profile + * @return A ProfileBreakdown for this query + */ + public ProfileBreakdown getQueryBreakdown(Query query) { + int token = currentToken; + + boolean stackEmpty = stack.isEmpty(); + + // If the stack is empty, we are a new root query + if (stackEmpty) { + + // We couldn't find a rewritten query to attach to, so just add it as a + // top-level root. This is just a precaution: it really shouldn't happen. + // We would only get here if a top-level query that never rewrites for some reason. + roots.add(token); + + // Increment the token since we are adding a new node, but notably, do not + // updateParent() because this was added as a root + currentToken += 1; + stack.add(token); + + return addDependencyNode(query, token); + } + + updateParent(token); + + // Increment the token since we are adding a new node + currentToken += 1; + stack.add(token); + + return addDependencyNode(query, token); + } + + /** + * Begin timing a query for a specific Timing context + */ + public void startRewriteTime() { + assert rewriteScratch == 0; + rewriteScratch = System.nanoTime(); + } + + /** + * Halt the timing process and add the elapsed rewriting time. + * startRewriteTime() must be called for a particular context prior to calling + * stopAndAddRewriteTime(), otherwise the elapsed time will be negative and + * nonsensical + * + * @return The elapsed time + */ + public long stopAndAddRewriteTime() { + long time = Math.max(1, System.nanoTime() - rewriteScratch); + rewriteTime += time; + rewriteScratch = 0; + return time; + } + + /** + * Helper method to add a new node to the dependency tree. + * + * Initializes a new list in the dependency tree, saves the query and + * generates a new {@link ProfileBreakdown} to track the timings + * of this query + * + * @param query The query to profile + * @param token The assigned token for this query + * @return A ProfileBreakdown to profile this query + */ + private ProfileBreakdown addDependencyNode(Query query, int token) { + + // Add a new slot in the dependency tree + tree.add(new ArrayList<>(5)); + + // Save our query for lookup later + queries.add(query); + + ProfileBreakdown queryTimings = new ProfileBreakdown(); + timings.add(token, queryTimings); + return queryTimings; + } + + /** + * Removes the last (e.g. most recent) value on the stack + */ + public void pollLast() { + stack.pollLast(); + } + + /** + * After the query has been run and profiled, we need to merge the flat timing map + * with the dependency graph to build a data structure that mirrors the original + * query tree + * + * @return a hierarchical representation of the profiled query tree + */ + public List getQueryTree() { + ArrayList results = new ArrayList<>(5); + for (Integer root : roots) { + results.add(doGetQueryTree(root)); + } + return results; + } + + /** + * Recursive helper to finalize a node in the dependency tree + * @param token The node we are currently finalizing + * @return A hierarchical representation of the tree inclusive of children at this level + */ + private ProfileResult doGetQueryTree(int token) { + Query query = queries.get(token); + ProfileBreakdown breakdown = timings.get(token); + Map timings = breakdown.toTimingMap(); + List children = tree.get(token); + List childrenProfileResults = Collections.emptyList(); + + if (children != null) { + childrenProfileResults = new ArrayList<>(children.size()); + for (Integer child : children) { + ProfileResult childNode = doGetQueryTree(child); + childrenProfileResults.add(childNode); + } + } + + // TODO this would be better done bottom-up instead of top-down to avoid + // calculating the same times over and over...but worth the effort? + long nodeTime = getNodeTime(timings, childrenProfileResults); + String queryDescription = query.getClass().getSimpleName(); + String luceneName = query.toString(); + return new ProfileResult(queryDescription, luceneName, timings, childrenProfileResults, nodeTime); + } + + public long getRewriteTime() { + return rewriteTime; + } + + /** + * Internal helper to add a child to the current parent node + * + * @param childToken The child to add to the current parent + */ + private void updateParent(int childToken) { + Integer parent = stack.peekLast(); + ArrayList parentNode = tree.get(parent); + parentNode.add(childToken); + tree.set(parent, parentNode); + } + + /** + * Internal helper to calculate the time of a node, inclusive of children + * + * @param timings A map of breakdown timing for the node + * @param children All children profile results at this node + * @return The total time at this node, inclusive of children + */ + private static long getNodeTime(Map timings, List children) { + long nodeTime = 0; + for (long time : timings.values()) { + nodeTime += time; + } + + // Then add up our children + for (ProfileResult child : children) { + nodeTime += getNodeTime(child.getTimeBreakdown(), child.getProfiledChildren()); + } + return nodeTime; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java new file mode 100644 index 00000000000..55ad77b6937 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * A record of timings for the various operations that may happen during query execution. + * A node's time may be composed of several internal attributes (rewriting, weighting, + * scoring, etc). + */ +public final class ProfileBreakdown { + + /** Enumeration of all supported timing types. */ + public enum TimingType { + CREATE_WEIGHT, + BUILD_SCORER, + NEXT_DOC, + ADVANCE, + MATCH, + SCORE; + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } + + /** + * The accumulated timings for this query node + */ + private final long[] timings; + + /** Scrach to store the current timing type. */ + private TimingType currentTimingType; + + /** + * The temporary scratch space for holding start-times + */ + private long scratch; + + /** Sole constructor. */ + public ProfileBreakdown() { + timings = new long[TimingType.values().length]; + } + + /** + * Begin timing a query for a specific Timing context + * @param timing The timing context being profiled + */ + public void startTime(TimingType timing) { + assert currentTimingType == null; + assert scratch == 0; + currentTimingType = timing; + scratch = System.nanoTime(); + } + + /** + * Halt the timing process and save the elapsed time. + * startTime() must be called for a particular context prior to calling + * stopAndRecordTime(), otherwise the elapsed time will be negative and + * nonsensical + * + * @return The elapsed time + */ + public long stopAndRecordTime() { + long time = Math.max(1, System.nanoTime() - scratch); + timings[currentTimingType.ordinal()] += time; + currentTimingType = null; + scratch = 0L; + return time; + } + + /** Convert this record to a map from {@link TimingType} to times. */ + public Map toTimingMap() { + Map map = new HashMap<>(); + for (TimingType timingType : TimingType.values()) { + map.put(timingType.toString(), timings[timingType.ordinal()]); + } + return Collections.unmodifiableMap(map); + } + + /** + * Add other's timings into this breakdown + * @param other Another Breakdown to merge with this one + */ + public void merge(ProfileBreakdown other) { + assert(timings.length == other.timings.length); + for (int i = 0; i < timings.length; ++i) { + timings[i] += other.timings[i]; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java new file mode 100644 index 00000000000..7d7538c9117 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.FilterCollector; +import org.apache.lucene.search.FilterLeafCollector; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Scorer; + +import java.io.IOException; + +/** A collector that profiles how much time is spent calling it. */ +final class ProfileCollector extends FilterCollector { + + private long time; + + /** Sole constructor. */ + public ProfileCollector(Collector in) { + super(in); + } + + /** Return the wrapped collector. */ + public Collector getDelegate() { + return in; + } + + @Override + public boolean needsScores() { + final long start = System.nanoTime(); + try { + return super.needsScores(); + } finally { + time += Math.max(1, System.nanoTime() - start); + } + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + final long start = System.nanoTime(); + final LeafCollector inLeafCollector; + try { + inLeafCollector = super.getLeafCollector(context); + } finally { + time += Math.max(1, System.nanoTime() - start); + } + return new FilterLeafCollector(inLeafCollector) { + + @Override + public void collect(int doc) throws IOException { + final long start = System.nanoTime(); + try { + super.collect(doc); + } finally { + time += Math.max(1, System.nanoTime() - start); + } + } + + @Override + public void setScorer(Scorer scorer) throws IOException { + final long start = System.nanoTime(); + try { + super.setScorer(scorer); + } finally { + time += Math.max(1, System.nanoTime() - start); + } + } + }; + } + + /** Return the total time spent on this collector. */ + public long getTime() { + return time; + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java new file mode 100644 index 00000000000..4c8752fdaf2 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java @@ -0,0 +1,165 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +/** + * This class is the internal representation of a profiled Query, corresponding + * to a single node in the query tree. It is built after the query has finished executing + * and is merely a structured representation, rather than the entity that collects the timing + * profile (see InternalProfiler for that) + * + * Each InternalProfileResult has a List of InternalProfileResults, which will contain + * "children" queries if applicable + */ +final class ProfileResult implements Writeable, ToXContent { + + private static final ParseField QUERY_TYPE = new ParseField("query_type"); + private static final ParseField LUCENE_DESCRIPTION = new ParseField("lucene"); + private static final ParseField NODE_TIME = new ParseField("time"); + private static final ParseField CHILDREN = new ParseField("children"); + private static final ParseField BREAKDOWN = new ParseField("breakdown"); + + private final String queryType; + private final String luceneDescription; + private final Map timings; + private final long nodeTime; + private final List children; + + public ProfileResult(String queryType, String luceneDescription, Map timings, List children, long nodeTime) { + this.queryType = queryType; + this.luceneDescription = luceneDescription; + this.timings = timings; + this.children = children; + this.nodeTime = nodeTime; + } + + public ProfileResult(StreamInput in) throws IOException{ + this.queryType = in.readString(); + this.luceneDescription = in.readString(); + this.nodeTime = in.readLong(); + + int timingsSize = in.readVInt(); + this.timings = new HashMap<>(timingsSize); + for (int i = 0; i < timingsSize; ++i) { + timings.put(in.readString(), in.readLong()); + } + + int size = in.readVInt(); + this.children = new ArrayList<>(size); + + for (int i = 0; i < size; i++) { + children.add(new ProfileResult(in)); + } + } + + /** + * Retrieve the lucene description of this query (e.g. the "explain" text) + */ + public String getLuceneDescription() { + return luceneDescription; + } + + /** + * Retrieve the name of the query (e.g. "TermQuery") + */ + public String getQueryName() { + return queryType; + } + + /** + * Returns the timing breakdown for this particular query node + */ + public Map getTimeBreakdown() { + return Collections.unmodifiableMap(timings); + } + + /** + * Returns the total time (inclusive of children) for this query node. + * + * @return elapsed time in nanoseconds + */ + public long getTime() { + return nodeTime; + } + + /** + * Returns a list of all profiled children queries + */ + public List getProfiledChildren() { + return Collections.unmodifiableList(children); + } + + @Override + public ProfileResult readFrom(StreamInput in) throws IOException { + return new ProfileResult(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(queryType); + out.writeString(luceneDescription); + out.writeLong(nodeTime); // not Vlong because can be negative + out.writeVInt(timings.size()); + for (Map.Entry entry : timings.entrySet()) { + out.writeString(entry.getKey()); + out.writeLong(entry.getValue()); + } + out.writeVInt(children.size()); + for (ProfileResult child : children) { + child.writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder = builder.startObject() + .field(QUERY_TYPE.getPreferredName(), queryType) + .field(LUCENE_DESCRIPTION.getPreferredName(), luceneDescription) + .field(NODE_TIME.getPreferredName(), String.format(Locale.US, "%.10gms", (double)(getTime() / 1000000.0))) + .field(BREAKDOWN.getPreferredName(), timings); + + if (!children.isEmpty()) { + builder = builder.startArray(CHILDREN.getPreferredName()); + for (ProfileResult child : children) { + builder = child.toXContent(builder, params); + } + builder = builder.endArray(); + } + + builder = builder.endObject(); + return builder; + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java new file mode 100644 index 00000000000..b0dc6f2cd4e --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java @@ -0,0 +1,158 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; + +import java.io.IOException; +import java.util.Collection; + +/** + * {@link Scorer} wrapper that will compute how much time is spent on moving + * the iterator, confirming matches and computing scores. + */ +final class ProfileScorer extends Scorer { + + private final Scorer scorer; + private ProfileWeight profileWeight; + private final ProfileBreakdown profile; + + ProfileScorer(ProfileWeight w, Scorer scorer, ProfileBreakdown profile) throws IOException { + super(w); + this.scorer = scorer; + this.profileWeight = w; + this.profile = profile; + } + + @Override + public int docID() { + return scorer.docID(); + } + + @Override + public int advance(int target) throws IOException { + profile.startTime(ProfileBreakdown.TimingType.ADVANCE); + try { + return scorer.advance(target); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public int nextDoc() throws IOException { + profile.startTime(ProfileBreakdown.TimingType.NEXT_DOC); + try { + return scorer.nextDoc(); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public float score() throws IOException { + profile.startTime(ProfileBreakdown.TimingType.SCORE); + try { + return scorer.score(); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public int freq() throws IOException { + return scorer.freq(); + } + + @Override + public long cost() { + return scorer.cost(); + } + + @Override + public Weight getWeight() { + return profileWeight; + } + + @Override + public Collection getChildren() { + return scorer.getChildren(); + } + + @Override + public TwoPhaseIterator asTwoPhaseIterator() { + final TwoPhaseIterator in = scorer.asTwoPhaseIterator(); + if (in == null) { + return null; + } + final DocIdSetIterator inApproximation = in.approximation(); + final DocIdSetIterator approximation = new DocIdSetIterator() { + + @Override + public int advance(int target) throws IOException { + profile.startTime(ProfileBreakdown.TimingType.ADVANCE); + try { + return inApproximation.advance(target); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public int nextDoc() throws IOException { + profile.startTime(ProfileBreakdown.TimingType.NEXT_DOC); + try { + return inApproximation.nextDoc(); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public int docID() { + return inApproximation.docID(); + } + + @Override + public long cost() { + return inApproximation.cost(); + } + }; + return new TwoPhaseIterator(approximation) { + @Override + public boolean matches() throws IOException { + profile.startTime(ProfileBreakdown.TimingType.MATCH); + try { + return in.matches(); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public float matchCost() { + return in.matchCost(); + } + }; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java new file mode 100644 index 00000000000..6e005babb3c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.*; + +/** + * A container class to hold the profile results for a single shard in the request. + * Contains a list of query profiles, a collector tree and a total rewrite tree. + */ +public final class ProfileShardResult implements Writeable, ToXContent { + + private final List profileResults; + + private final CollectorResult profileCollector; + + private final long rewriteTime; + + public ProfileShardResult(List profileResults, long rewriteTime, + CollectorResult profileCollector) { + assert(profileCollector != null); + this.profileResults = profileResults; + this.profileCollector = profileCollector; + this.rewriteTime = rewriteTime; + } + + public ProfileShardResult(StreamInput in) throws IOException { + int profileSize = in.readVInt(); + profileResults = new ArrayList<>(profileSize); + for (int j = 0; j < profileSize; j++) { + profileResults.add(new ProfileResult(in)); + } + + profileCollector = new CollectorResult(in); + rewriteTime = in.readLong(); + } + + public List getQueryResults() { + return Collections.unmodifiableList(profileResults); + } + + public long getRewriteTime() { + return rewriteTime; + } + + public CollectorResult getCollectorResult() { + return profileCollector; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray("query"); + for (ProfileResult p : profileResults) { + p.toXContent(builder, params); + } + builder.endArray(); + builder.field("rewrite_time", rewriteTime); + builder.startArray("collector"); + profileCollector.toXContent(builder, params); + builder.endArray(); + return builder; + } + + @Override + public ProfileShardResult readFrom(StreamInput in) throws IOException { + return new ProfileShardResult(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(profileResults.size()); + for (ProfileResult p : profileResults) { + p.writeTo(out); + } + profileCollector.writeTo(out); + out.writeLong(rewriteTime); + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java new file mode 100644 index 00000000000..1ce5cd721fe --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BulkScorer; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; + +import java.io.IOException; +import java.util.Set; + +/** + * Weight wrapper that will compute how much time it takes to build the + * {@link Scorer} and then return a {@link Scorer} that is wrapped in + * order to compute timings as well. + */ +public final class ProfileWeight extends Weight { + + private final Weight subQueryWeight; + private final ProfileBreakdown profile; + + public ProfileWeight(Query query, Weight subQueryWeight, ProfileBreakdown profile) throws IOException { + super(query); + this.subQueryWeight = subQueryWeight; + this.profile = profile; + } + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + profile.startTime(ProfileBreakdown.TimingType.BUILD_SCORER); + final Scorer subQueryScorer; + try { + subQueryScorer = subQueryWeight.scorer(context); + } finally { + profile.stopAndRecordTime(); + } + if (subQueryScorer == null) { + return null; + } + + return new ProfileScorer(this, subQueryScorer, profile); + } + + @Override + public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { + // We use the default bulk scorer instead of the specialized one. The reason + // is that Lucene's BulkScorers do everything at once: finding matches, + // scoring them and calling the collector, so they make it impossible to + // see where time is spent, which is the purpose of query profiling. + // The default bulk scorer will pull a scorer and iterate over matches, + // this might be a significantly different execution path for some queries + // like disjunctions, but in general this is what is done anyway + return super.bulkScorer(context); + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + return subQueryWeight.explain(context, doc); + } + + @Override + public float getValueForNormalization() throws IOException { + return subQueryWeight.getValueForNormalization(); + } + + @Override + public void normalize(float norm, float topLevelBoost) { + subQueryWeight.normalize(norm, topLevelBoost); + } + + @Override + public void extractTerms(Set set) { + subQueryWeight.extractTerms(set); + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/Profiler.java b/core/src/main/java/org/elasticsearch/search/profile/Profiler.java new file mode 100644 index 00000000000..bf0c9ec01b6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/Profiler.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.search.Query; + +import java.util.*; + +/** + * This class acts as a thread-local storage for profiling a query. It also + * builds a representation of the query tree which is built constructed + * "online" as the weights are wrapped by ContextIndexSearcher. This allows us + * to know the relationship between nodes in tree without explicitly + * walking the tree or pre-wrapping everything + * + * A Profiler is associated with every Search, not per Search-Request. E.g. a + * request may execute two searches (query + global agg). A Profiler just + * represents one of those + */ +public final class Profiler { + + private final InternalProfileTree queryTree = new InternalProfileTree(); + + /** + * The root Collector used in the search + */ + private InternalProfileCollector collector; + + public Profiler() {} + + /** Set the collector that is associated with this profiler. */ + public void setCollector(InternalProfileCollector collector) { + if (this.collector != null) { + throw new IllegalStateException("The collector can only be set once."); + } + this.collector = Objects.requireNonNull(collector); + } + + /** + * Get the {@link ProfileBreakdown} for the given query, potentially creating it if it did not exist. + * This should only be used for queries that will be undergoing scoring. Do not use it to profile the + * rewriting phase + */ + public ProfileBreakdown getQueryBreakdown(Query query) { + return queryTree.getQueryBreakdown(query); + } + + /** + * Begin timing the rewrite phase of a request. All rewrites are accumulated together into a + * single metric + */ + public void startRewriteTime() { + queryTree.startRewriteTime(); + } + + /** + * Stop recording the current rewrite and add it's time to the total tally, returning the + * cumulative time so far. + * + * @return cumulative rewrite time + */ + public long stopAndAddRewriteTime() { + return queryTree.stopAndAddRewriteTime(); + } + + /** + * Removes the last (e.g. most recent) query on the stack. This should only be called for scoring + * queries, not rewritten queries + */ + public void pollLastQuery() { + queryTree.pollLast(); + } + + /** + * @return a hierarchical representation of the profiled query tree + */ + public List getQueryTree() { + return queryTree.getQueryTree(); + } + + /** + * @return total time taken to rewrite all queries in this profile + */ + public long getRewriteTime() { + return queryTree.getRewriteTime(); + } + + /** + * Return the current root Collector for this search + */ + public CollectorResult getCollector() { + return collector.getCollectorTree(); + } + + /** + * Helper method to convert Profiler into InternalProfileShardResults, which can be + * serialized to other nodes, emitted as JSON, etc. + * + * @param profilers A list of Profilers to convert into InternalProfileShardResults + * @return A list of corresponding InternalProfileShardResults + */ + public static List buildShardResults(List profilers) { + List results = new ArrayList<>(profilers.size()); + for (Profiler profiler : profilers) { + ProfileShardResult result = new ProfileShardResult( + profiler.getQueryTree(), profiler.getRewriteTime(), profiler.getCollector()); + results.add(result); + } + return results; + } + + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/Profilers.java b/core/src/main/java/org/elasticsearch/search/profile/Profilers.java new file mode 100644 index 00000000000..0fb7d9ac1c9 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/Profilers.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.search.internal.ContextIndexSearcher; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** Wrapper around several {@link Profiler}s that makes management easier. */ +public final class Profilers { + + private final ContextIndexSearcher searcher; + private final List profilers; + + /** Sole constructor. This {@link Profilers} instance will initiall wrap one {@link Profiler}. */ + public Profilers(ContextIndexSearcher searcher) { + this.searcher = searcher; + this.profilers = new ArrayList<>(); + addProfiler(); + } + + /** Switch to a new profile. */ + public Profiler addProfiler() { + Profiler profiler = new Profiler(); + searcher.setProfiler(profiler); + profilers.add(profiler); + return profiler; + } + + /** Get the current profiler. */ + public Profiler getCurrent() { + return profilers.get(profilers.size() - 1); + } + + /** Return the list of all created {@link Profiler}s so far. */ + public List getProfilers() { + return Collections.unmodifiableList(profilers); + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java index ce8836cd336..08ff849871f 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -52,13 +52,16 @@ import org.elasticsearch.search.SearchService; import org.elasticsearch.search.aggregations.AggregationPhase; import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.profile.*; import org.elasticsearch.search.rescore.RescorePhase; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.sort.SortParseElement; import org.elasticsearch.search.sort.TrackScoresParseElement; import org.elasticsearch.search.suggest.SuggestPhase; +import java.util.AbstractList; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -124,6 +127,11 @@ public class QueryPhase implements SearchPhase { } suggestPhase.execute(searchContext); aggregationPhase.execute(searchContext); + + if (searchContext.getProfilers() != null) { + List shardResults = Profiler.buildShardResults(searchContext.getProfilers().getProfilers()); + searchContext.queryResult().profileResults(shardResults); + } } private static boolean returnsDocsInOrder(Query query, Sort sort) { @@ -147,6 +155,7 @@ public class QueryPhase implements SearchPhase { QuerySearchResult queryResult = searchContext.queryResult(); queryResult.searchTimedOut(false); + final boolean doProfile = searchContext.getProfilers() != null; final SearchType searchType = searchContext.searchType(); boolean rescore = false; try { @@ -162,9 +171,13 @@ public class QueryPhase implements SearchPhase { Callable topDocsCallable; assert query == searcher.rewrite(query); // already rewritten + if (searchContext.size() == 0) { // no matter what the value of from is final TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); collector = totalHitCountCollector; + if (searchContext.getProfilers() != null) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_COUNT, Collections.emptyList()); + } topDocsCallable = new Callable() { @Override public TopDocs call() throws Exception { @@ -219,6 +232,9 @@ public class QueryPhase implements SearchPhase { topDocsCollector = TopScoreDocCollector.create(numDocs, lastEmittedDoc); } collector = topDocsCollector; + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TOP_HITS, Collections.emptyList()); + } topDocsCallable = new Callable() { @Override public TopDocs call() throws Exception { @@ -254,27 +270,57 @@ public class QueryPhase implements SearchPhase { final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER; if (terminateAfterSet) { + final Collector child = collector; // throws Lucene.EarlyTerminationException when given count is reached collector = Lucene.wrapCountBasedEarlyTerminatingCollector(collector, searchContext.terminateAfter()); + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TERMINATE_AFTER_COUNT, + Collections.singletonList((InternalProfileCollector) child)); + } } if (searchContext.parsedPostFilter() != null) { + final Collector child = collector; // this will only get applied to the actual search collector and not // to any scoped collectors, also, it will only be applied to the main collector // since that is where the filter should only work final Weight filterWeight = searcher.createNormalizedWeight(searchContext.parsedPostFilter().query(), false); collector = new FilteredCollector(collector, filterWeight); + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_POST_FILTER, + Collections.singletonList((InternalProfileCollector) child)); + } } // plug in additional collectors, like aggregations - List allCollectors = new ArrayList<>(); - allCollectors.add(collector); - allCollectors.addAll(searchContext.queryCollectors().values()); - collector = MultiCollector.wrap(allCollectors); + final List subCollectors = new ArrayList<>(); + subCollectors.add(collector); + subCollectors.addAll(searchContext.queryCollectors().values()); + collector = MultiCollector.wrap(subCollectors); + if (doProfile && collector instanceof InternalProfileCollector == false) { + // When there is a single collector to wrap, MultiCollector returns it + // directly, so only wrap in the case that there are several sub collectors + final List children = new AbstractList() { + @Override + public InternalProfileCollector get(int index) { + return (InternalProfileCollector) subCollectors.get(index); + } + @Override + public int size() { + return subCollectors.size(); + } + }; + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MULTI, children); + } // apply the minimum score after multi collector so we filter aggs as well if (searchContext.minimumScore() != null) { + final Collector child = collector; collector = new MinimumScoreCollector(collector, searchContext.minimumScore()); + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MIN_SCORE, + Collections.singletonList((InternalProfileCollector) child)); + } } if (collector.getClass() == TotalHitCountCollector.class) { @@ -319,13 +365,21 @@ public class QueryPhase implements SearchPhase { final boolean timeoutSet = searchContext.timeoutInMillis() != SearchService.NO_TIMEOUT.millis(); if (timeoutSet && collector != null) { // collector might be null if no collection is actually needed + final Collector child = collector; // TODO: change to use our own counter that uses the scheduler in ThreadPool // throws TimeLimitingCollector.TimeExceededException when timeout has reached collector = Lucene.wrapTimeLimitingCollector(collector, searchContext.timeEstimateCounter(), searchContext.timeoutInMillis()); + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TIMEOUT, + Collections.singletonList((InternalProfileCollector) child)); + } } try { if (collector != null) { + if (doProfile) { + searchContext.getProfilers().getCurrent().setCollector((InternalProfileCollector) collector); + } searcher.search(query, collector); } } catch (TimeLimitingCollector.TimeExceededException e) { @@ -343,7 +397,13 @@ public class QueryPhase implements SearchPhase { queryResult.topDocs(topDocsCallable.call()); + if (searchContext.getProfilers() != null) { + List shardResults = Profiler.buildShardResults(searchContext.getProfilers().getProfilers()); + searchContext.queryResult().profileResults(shardResults); + } + return rescore; + } catch (Throwable e) { throw new QueryPhaseExecutionException(searchContext, "Failed to execute main query", e); } diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 7f8d12a9c90..9223eb5a82d 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -20,6 +20,8 @@ package org.elasticsearch.search.query; import org.apache.lucene.search.TopDocs; +import org.elasticsearch.Version; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -29,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorStreams; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; +import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; @@ -53,6 +56,7 @@ public class QuerySearchResult extends QuerySearchResultProvider { private Suggest suggest; private boolean searchTimedOut; private Boolean terminatedEarly = null; + private List profileShardResults; public QuerySearchResult() { @@ -120,6 +124,22 @@ public class QuerySearchResult extends QuerySearchResultProvider { this.aggregations = aggregations; } + /** + * Returns the profiled results for this search, or potentially null if result was empty + * @return The profiled results, or null + */ + public @Nullable List profileResults() { + return profileShardResults; + } + + /** + * Sets the finalized profiling results for this query + * @param shardResults The finalized profile + */ + public void profileResults(List shardResults) { + this.profileShardResults = shardResults; + } + public List pipelineAggregators() { return pipelineAggregators; } @@ -191,6 +211,15 @@ public class QuerySearchResult extends QuerySearchResultProvider { } searchTimedOut = in.readBoolean(); terminatedEarly = in.readOptionalBoolean(); + + if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) { + int profileSize = in.readVInt(); + profileShardResults = new ArrayList<>(profileSize); + for (int i = 0; i < profileSize; i++) { + ProfileShardResult result = new ProfileShardResult(in); + profileShardResults.add(result); + } + } } @Override @@ -229,5 +258,17 @@ public class QuerySearchResult extends QuerySearchResultProvider { } out.writeBoolean(searchTimedOut); out.writeOptionalBoolean(terminatedEarly); + + if (out.getVersion().onOrAfter(Version.V_2_2_0)) { + if (profileShardResults == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeVInt(profileShardResults.size()); + for (ProfileShardResult shardResult : profileShardResults) { + shardResult.writeTo(out); + } + } + } } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java index 90d4437fcea..207b626409b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java @@ -57,6 +57,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.min; import static org.elasticsearch.search.aggregations.AggregationBuilders.range; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.movingAvg; +import static org.elasticsearch.test.hamcrest.DoubleMatcher.nearlyEqual; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; diff --git a/core/src/test/java/org/elasticsearch/search/profile/ProfileTests.java b/core/src/test/java/org/elasticsearch/search/profile/ProfileTests.java new file mode 100644 index 00000000000..83f6efaa150 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/profile/ProfileTests.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.RandomApproximationQuery; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.TestUtil; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.search.internal.ContextIndexSearcher; +import org.elasticsearch.test.ESTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public class ProfileTests extends ESTestCase { + + static Directory dir; + static IndexReader reader; + static ContextIndexSearcher searcher; + + @BeforeClass + public static void before() throws IOException { + dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); + final int numDocs = TestUtil.nextInt(random(), 1, 20); + for (int i = 0; i < numDocs; ++i) { + final int numHoles = random().nextInt(5); + for (int j = 0; j < numHoles; ++j) { + w.addDocument(new Document()); + } + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + w.addDocument(doc); + } + reader = w.getReader(); + w.close(); + Engine.Searcher engineSearcher = new Engine.Searcher("test", new IndexSearcher(reader)); + searcher = new ContextIndexSearcher(engineSearcher, IndexSearcher.getDefaultQueryCache(), MAYBE_CACHE_POLICY); + } + + @AfterClass + public static void after() throws IOException { + IOUtils.close(reader, dir); + dir = null; + reader = null; + searcher = null; + } + + public void testBasic() throws IOException { + Profiler profiler = new Profiler(); + searcher.setProfiler(profiler); + Query query = new TermQuery(new Term("foo", "bar")); + searcher.search(query, 1); + List results = profiler.getQueryTree(); + assertEquals(1, results.size()); + Map breakdown = results.get(0).getTimeBreakdown(); + assertThat(breakdown.get(ProfileBreakdown.TimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.SCORE.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.MATCH.toString()).longValue(), equalTo(0L)); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testNoScoring() throws IOException { + Profiler profiler = new Profiler(); + searcher.setProfiler(profiler); + Query query = new TermQuery(new Term("foo", "bar")); + searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed + List results = profiler.getQueryTree(); + assertEquals(1, results.size()); + Map breakdown = results.get(0).getTimeBreakdown(); + assertThat(breakdown.get(ProfileBreakdown.TimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.SCORE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.MATCH.toString()).longValue(), equalTo(0L)); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testUseIndexStats() throws IOException { + Profiler profiler = new Profiler(); + searcher.setProfiler(profiler); + Query query = new TermQuery(new Term("foo", "bar")); + searcher.count(query); // will use index stats + List results = profiler.getQueryTree(); + assertEquals(0, results.size()); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testApproximations() throws IOException { + Profiler profiler = new Profiler(); + Engine.Searcher engineSearcher = new Engine.Searcher("test", new IndexSearcher(reader)); + // disable query caching since we want to test approximations, which won't + // be exposed on a cached entry + ContextIndexSearcher searcher = new ContextIndexSearcher(engineSearcher, null, MAYBE_CACHE_POLICY); + searcher.setProfiler(profiler); + Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()); + searcher.count(query); + List results = profiler.getQueryTree(); + assertEquals(1, results.size()); + Map breakdown = results.get(0).getTimeBreakdown(); + assertThat(breakdown.get(ProfileBreakdown.TimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.SCORE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.MATCH.toString()).longValue(), greaterThan(0L)); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + + } + + public void testCollector() throws IOException { + TotalHitCountCollector collector = new TotalHitCountCollector(); + ProfileCollector profileCollector = new ProfileCollector(collector); + assertEquals(0, profileCollector.getTime()); + final LeafCollector leafCollector = profileCollector.getLeafCollector(reader.leaves().get(0)); + assertThat(profileCollector.getTime(), greaterThan(0L)); + long time = profileCollector.getTime(); + leafCollector.setScorer(Lucene.illegalScorer("dummy scorer")); + assertThat(profileCollector.getTime(), greaterThan(time)); + time = profileCollector.getTime(); + leafCollector.collect(0); + assertThat(profileCollector.getTime(), greaterThan(time)); + } +} diff --git a/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java b/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java new file mode 100644 index 00000000000..bb33364a751 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java @@ -0,0 +1,596 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.action.search.*; +import org.elasticsearch.search.SearchHit; +import org.apache.lucene.util.English; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.index.query.*; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.search.profile.RandomQueryGenerator.randomQueryBuilder; +import static org.elasticsearch.test.hamcrest.DoubleMatcher.nearlyEqual; +import static org.hamcrest.Matchers.*; + + +public class QueryProfilerIT extends ESIntegTestCase { + + /** + * This test simply checks to make sure nothing crashes. Test indexes 100-150 documents, + * constructs 20-100 random queries and tries to profile them + */ + public void testProfileQuery() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + List stringFields = Arrays.asList("field1"); + List numericFields = Arrays.asList("field2"); + + indexRandom(true, docs); + + refresh(); + int iters = between(20, 100); + for (int i = 0; i < iters; i++) { + QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3); + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + for (Map.Entry> shard : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shard.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + + } + } + + /** + * This test generates 1-10 random queries and executes a profiled and non-profiled + * search for each query. It then does some basic sanity checking of score and hits + * to make sure the profiling doesn't interfere with the hits being returned + */ + public void testProfileMatchesRegular() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + List stringFields = Arrays.asList("field1"); + List numericFields = Arrays.asList("field2"); + + indexRandom(true, docs); + + refresh(); + int iters = between(1, 10); + for (int i = 0; i < iters; i++) { + QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3); + logger.info(q.toString()); + + + SearchRequestBuilder vanilla = client().prepareSearch("test") + .setQuery(q) + .setProfile(false) + .addSort("_score", SortOrder.DESC) + .addSort("_uid", SortOrder.ASC) + .setPreference("_primary") + .setSearchType(SearchType.QUERY_THEN_FETCH); + + SearchRequestBuilder profile = client().prepareSearch("test") + .setQuery(q) + .setProfile(true) + .addSort("_score", SortOrder.DESC) + .addSort("_uid", SortOrder.ASC) + .setPreference("_primary") + .setSearchType(SearchType.QUERY_THEN_FETCH); + + MultiSearchResponse.Item[] responses = client().prepareMultiSearch() + .add(vanilla) + .add(profile) + .execute().actionGet().getResponses(); + + SearchResponse vanillaResponse = responses[0].getResponse(); + SearchResponse profileResponse = responses[1].getResponse(); + + float vanillaMaxScore = vanillaResponse.getHits().getMaxScore(); + float profileMaxScore = profileResponse.getHits().getMaxScore(); + if (Float.isNaN(vanillaMaxScore)) { + assertTrue("Vanilla maxScore is NaN but Profile is not [" + profileMaxScore + "]", + Float.isNaN(profileMaxScore)); + } else { + assertTrue("Profile maxScore of [" + profileMaxScore + "] is not close to Vanilla maxScore [" + vanillaMaxScore + "]", + nearlyEqual(vanillaMaxScore, profileMaxScore, 0.001)); + } + + assertThat("Profile totalHits of [" + profileResponse.getHits().totalHits() + "] is not close to Vanilla totalHits [" + vanillaResponse.getHits().totalHits() + "]", + vanillaResponse.getHits().getTotalHits(), equalTo(profileResponse.getHits().getTotalHits())); + + SearchHit[] vanillaHits = vanillaResponse.getHits().getHits(); + SearchHit[] profileHits = profileResponse.getHits().getHits(); + + for (int j = 0; j < vanillaHits.length; j++) { + assertThat("Profile hit #" + j + " has a different ID from Vanilla", + vanillaHits[j].getId(), equalTo(profileHits[j].getId())); + } + + } + } + + /** + * This test verifies that the output is reasonable for a simple, non-nested query + */ + public void testSimpleMatch() throws Exception { + createIndex("test"); + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + ensureGreen(); + + QueryBuilder q = QueryBuilders.matchQuery("field1", "one"); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + Map> p = resp.getProfileResults(); + assertNotNull(p); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertEquals(result.getQueryName(), "TermQuery"); + assertEquals(result.getLuceneDescription(), "field1:one"); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + /** + * This test verifies that the output is reasonable for a nested query + */ + public void testBool() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one")).must(QueryBuilders.matchQuery("field1", "two")); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + Map> p = resp.getProfileResults(); + assertNotNull(p); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertEquals(result.getQueryName(), "BooleanQuery"); + assertEquals(result.getLuceneDescription(), "+field1:one +field1:two"); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + assertEquals(result.getProfiledChildren().size(), 2); + + // Check the children + List children = result.getProfiledChildren(); + assertEquals(children.size(), 2); + + ProfileResult childProfile = children.get(0); + assertEquals(childProfile.getQueryName(), "TermQuery"); + assertEquals(childProfile.getLuceneDescription(), "field1:one"); + assertThat(childProfile.getTime(), greaterThan(0L)); + assertNotNull(childProfile.getTimeBreakdown()); + assertEquals(childProfile.getProfiledChildren().size(), 0); + + childProfile = children.get(1); + assertEquals(childProfile.getQueryName(), "TermQuery"); + assertEquals(childProfile.getLuceneDescription(), "field1:two"); + assertThat(childProfile.getTime(), greaterThan(0L)); + assertNotNull(childProfile.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + + + } + + /** + * Tests a boolean query with no children clauses + */ + public void testEmptyBool() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boolQuery(); + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + /** + * Tests a series of three nested boolean queries with a single "leaf" match query. + * The rewrite process will "collapse" this down to a single bool, so this tests to make sure + * nothing catastrophic happens during that fairly substantial rewrite + */ + public void testCollapsingBool() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one")))); + + + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testBoosting() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boostingQuery(QueryBuilders.matchQuery("field1", "one"), QueryBuilders.matchQuery("field1", "two")) + .boost(randomFloat()) + .negativeBoost(randomFloat()); + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testDisMaxRange() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.disMaxQuery() + .boost(0.33703882f) + .add(QueryBuilders.rangeQuery("field2").from(null).to(73).includeLower(true).includeUpper(true)); + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testRange() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5); + + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testPhrase() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i) + " " + English.intToEnglish(i+1), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.matchPhraseQuery("field1", "one two"); + + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setIndices("test") + .setTypes("type1") + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + if (resp.getShardFailures().length > 0) { + for (ShardSearchFailure f : resp.getShardFailures()) { + logger.error(f.toString()); + } + fail(); + } + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + /** + * This test makes sure no profile results are returned when profiling is disabled + */ + public void testNoProfile() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + refresh(); + QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5); + + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch().setQuery(q).setProfile(false).execute().actionGet(); + assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); + } + +} + diff --git a/core/src/test/java/org/elasticsearch/search/profile/RandomQueryGenerator.java b/core/src/test/java/org/elasticsearch/search/profile/RandomQueryGenerator.java new file mode 100644 index 00000000000..fb8cd40ce52 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/profile/RandomQueryGenerator.java @@ -0,0 +1,266 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.util.English; +import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.index.query.*; + +import java.util.ArrayList; +import java.util.List; + +import static com.carrotsearch.randomizedtesting.RandomizedTest.*; +import static org.junit.Assert.assertTrue; + + +public class RandomQueryGenerator { + public static QueryBuilder randomQueryBuilder(List stringFields, List numericFields, int numDocs, int depth) { + assertTrue("Must supply at least one string field", stringFields.size() > 0); + assertTrue("Must supply at least one numeric field", numericFields.size() > 0); + + // If depth is exhausted, or 50% of the time return a terminal + // Helps limit ridiculously large compound queries + if (depth == 0 || randomBoolean()) { + return randomTerminalQuery(stringFields, numericFields, numDocs); + } + + switch (randomIntBetween(0,5)) { + case 0: + return randomTerminalQuery(stringFields, numericFields, numDocs); + case 1: + return QueryBuilders.boolQuery().must(randomQueryBuilder(stringFields, numericFields, numDocs, depth -1)) + .filter(randomQueryBuilder(stringFields, numericFields, numDocs, depth -1)); + case 2: + return randomBoolQuery(stringFields, numericFields, numDocs, depth); + case 3: + // disabled for now because of https://issues.apache.org/jira/browse/LUCENE-6781 + //return randomBoostingQuery(stringFields, numericFields, numDocs, depth); + case 4: + return randomConstantScoreQuery(stringFields, numericFields, numDocs, depth); + case 5: + return randomDisMaxQuery(stringFields, numericFields, numDocs, depth); + default: + return randomTerminalQuery(stringFields, numericFields, numDocs); + } + } + + private static QueryBuilder randomTerminalQuery(List stringFields, List numericFields, int numDocs) { + switch (randomIntBetween(0,6)) { + case 0: + return randomTermQuery(stringFields, numDocs); + case 1: + return randomTermsQuery(stringFields, numDocs); + case 2: + return randomRangeQuery(numericFields, numDocs); + case 3: + return QueryBuilders.matchAllQuery(); + case 4: + return randomCommonTermsQuery(stringFields, numDocs); + case 5: + return randomFuzzyQuery(stringFields); + case 6: + return randomIDsQuery(); + default: + return randomTermQuery(stringFields, numDocs); + } + } + + private static String randomQueryString(int max) { + StringBuilder qsBuilder = new StringBuilder(); + + for (int i = 0; i < max; i++) { + qsBuilder.append(English.intToEnglish(randomInt(max))); + qsBuilder.append(" "); + } + + return qsBuilder.toString().trim(); + } + + private static String randomField(List fields) { + return fields.get(randomInt(fields.size() - 1)); + } + + + + private static QueryBuilder randomTermQuery(List fields, int numDocs) { + return QueryBuilders.termQuery(randomField(fields), randomQueryString(1)); + } + + private static QueryBuilder randomTermsQuery(List fields, int numDocs) { + int numTerms = randomInt(numDocs); + ArrayList terms = new ArrayList<>(numTerms); + + for (int i = 0; i < numTerms; i++) { + terms.add(randomQueryString(1)); + } + + return QueryBuilders.termsQuery(randomField(fields), terms); + } + + private static QueryBuilder randomRangeQuery(List fields, int numDocs) { + QueryBuilder q = QueryBuilders.rangeQuery(randomField(fields)); + + if (randomBoolean()) { + ((RangeQueryBuilder)q).from(randomIntBetween(0, numDocs / 2 - 1)); + } + if (randomBoolean()) { + ((RangeQueryBuilder)q).to(randomIntBetween(numDocs / 2, numDocs)); + } + + return q; + } + + private static QueryBuilder randomBoolQuery(List stringFields, List numericFields, int numDocs, int depth) { + QueryBuilder q = QueryBuilders.boolQuery(); + int numClause = randomIntBetween(0,5); + for (int i = 0; i < numClause; i++) { + ((BoolQueryBuilder)q).must(randomQueryBuilder(stringFields, numericFields,numDocs, depth -1)); + } + + numClause = randomIntBetween(0,5); + for (int i = 0; i < numClause; i++) { + ((BoolQueryBuilder)q).should(randomQueryBuilder(stringFields, numericFields,numDocs, depth -1)); + } + + numClause = randomIntBetween(0,5); + for (int i = 0; i < numClause; i++) { + ((BoolQueryBuilder)q).mustNot(randomQueryBuilder(stringFields, numericFields, numDocs, depth -1)); + } + + return q; + } + + private static QueryBuilder randomBoostingQuery(List stringFields, List numericFields, int numDocs, int depth) { + return QueryBuilders.boostingQuery( + randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1), + randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1)) + .boost(randomFloat()) + .negativeBoost(randomFloat()); + } + + private static QueryBuilder randomConstantScoreQuery(List stringFields, List numericFields, int numDocs, int depth) { + return QueryBuilders.constantScoreQuery(randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1)); + } + + private static QueryBuilder randomCommonTermsQuery(List fields, int numDocs) { + int numTerms = randomInt(numDocs); + + QueryBuilder q = QueryBuilders.commonTermsQuery(randomField(fields), randomQueryString(numTerms)); + if (randomBoolean()) { + ((CommonTermsQueryBuilder)q).boost(randomFloat()); + } + + if (randomBoolean()) { + ((CommonTermsQueryBuilder)q).cutoffFrequency(randomFloat()); + } + + if (randomBoolean()) { + ((CommonTermsQueryBuilder)q).highFreqMinimumShouldMatch(Integer.toString(randomInt(numTerms))) + .highFreqOperator(randomBoolean() ? Operator.AND : Operator.OR); + } + + if (randomBoolean()) { + ((CommonTermsQueryBuilder)q).lowFreqMinimumShouldMatch(Integer.toString(randomInt(numTerms))) + .lowFreqOperator(randomBoolean() ? Operator.AND : Operator.OR); + } + + return q; + } + + private static QueryBuilder randomFuzzyQuery(List fields) { + + QueryBuilder q = QueryBuilders.fuzzyQuery(randomField(fields), randomQueryString(1)); + + if (randomBoolean()) { + ((FuzzyQueryBuilder)q).boost(randomFloat()); + } + + if (randomBoolean()) { + switch (randomIntBetween(0, 4)) { + case 0: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.AUTO); + break; + case 1: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.ONE); + break; + case 2: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.TWO); + break; + case 3: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.ZERO); + break; + case 4: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.fromEdits(randomIntBetween(0,2))); + break; + default: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.AUTO); + break; + } + } + + if (randomBoolean()) { + ((FuzzyQueryBuilder)q).maxExpansions(Math.abs(randomInt())); + } + + if (randomBoolean()) { + ((FuzzyQueryBuilder)q).prefixLength(Math.abs(randomInt())); + } + + if (randomBoolean()) { + ((FuzzyQueryBuilder)q).transpositions(randomBoolean()); + } + + return q; + } + + private static QueryBuilder randomDisMaxQuery(List stringFields, List numericFields, int numDocs, int depth) { + QueryBuilder q = QueryBuilders.disMaxQuery(); + + int numClauses = randomIntBetween(1, 10); + for (int i = 0; i < numClauses; i++) { + ((DisMaxQueryBuilder)q).add(randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1)); + } + + if (randomBoolean()) { + ((DisMaxQueryBuilder)q).boost(randomFloat()); + } + + if (randomBoolean()) { + ((DisMaxQueryBuilder)q).tieBreaker(randomFloat()); + } + + return q; + } + + private static QueryBuilder randomIDsQuery() { + QueryBuilder q = QueryBuilders.idsQuery(); + + int numIDs = randomInt(100); + for (int i = 0; i < numIDs; i++) { + ((IdsQueryBuilder)q).addIds(String.valueOf(randomInt())); + } + + if (randomBoolean()) { + ((IdsQueryBuilder)q).boost(randomFloat()); + } + + return q; + } +} \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/test/hamcrest/DoubleMatcher.java b/core/src/test/java/org/elasticsearch/test/hamcrest/DoubleMatcher.java new file mode 100644 index 00000000000..de275eaffca --- /dev/null +++ b/core/src/test/java/org/elasticsearch/test/hamcrest/DoubleMatcher.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.hamcrest; + + +public class DoubleMatcher { + + /** + * Better floating point comparisons courtesy of https://github.com/brazzy/floating-point-gui.de + * + * Snippet adapted to use doubles instead of floats + */ + public static boolean nearlyEqual(double a, double b, double epsilon) { + final double absA = Math.abs(a); + final double absB = Math.abs(b); + final double diff = Math.abs(a - b); + + if (a == b) { // shortcut, handles infinities + return true; + } else if (a == 0 || b == 0 || diff < Double.MIN_NORMAL) { + // a or b is zero or both are extremely close to it + // relative error is less meaningful here + return diff < (epsilon * Double.MIN_NORMAL); + } else { // use relative error + return diff / Math.min((absA + absB), Double.MAX_VALUE) < epsilon; + } + } +} diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index 2d8a1f8bc9a..da7d2e5ee4b 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -95,6 +95,8 @@ include::search/validate.asciidoc[] include::search/explain.asciidoc[] +include::search/profile.asciidoc[] + include::search/percolate.asciidoc[] include::search/field-stats.asciidoc[] diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc new file mode 100644 index 00000000000..3d701ae5198 --- /dev/null +++ b/docs/reference/search/profile.asciidoc @@ -0,0 +1,601 @@ +[[search-profile]] +== Profile API + +coming[2.2.0] + +experimental[] + +The Profile API provides detailed timing information about the execution of individual components +in a query. It gives the user insight into how queries are executed at a low level so that +the user can understand why certain queries are slow, and take steps to improve their slow queries. + +The output from the Profile API is *very* verbose, especially for complicated queries executed across +many shards. Pretty-printing the response is recommended to help understand the output + +[NOTE] +--------------------- +The details provided by the Profile API directly expose Lucene class names and concepts, which means +that complete interpretation of the results require fairly advanced knowledge of Lucene. This +page attempts to give a crash-course in how Lucene executes queries so that you can use the Profile API to successfully +diagnose and debug queries, but it is only an overview. For complete understanding, please refer +to Lucene's documentation and, in places, the code. + +With that said, a complete understanding is often not required to fix a slow query. It is usually +sufficient to see that a particular component of a query is slow, and not necessarily understand why +the `advance` phase of that query is the cause, for example. +--------------------- + +[float] +=== Usage + +Any `_search` request can be profiled by adding a top-level `profile` parameter: + +[source,js] +-------------------------------------------------- +curl -XGET 'localhost:9200/_search' -d '{ + "profile": true,<1> + "query" : { + "match" : { "message" : "search test" } + } +} +-------------------------------------------------- +<1> Setting the top-level `profile` parameter to `true` will enable profiling +for the search + +This will yield the following result: + +[source,js] +-------------------------------------------------- +{ + "took": 25, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "failed": 0 + }, + "hits": { + "total": 1, + "max_score": 1, + "hits": [ ... ] <1> + }, + "profile": { + "shards": [ + { + "id": "[htuC6YnSSSmKFq5UBt0YMA][test][0]", + "searches": [ + { + "query": [ + { + "query_type": "BooleanQuery", + "lucene": "message:search message:test", + "time": "15.52889800ms", + "breakdown": { + "score": 0, + "next_doc": 24495, + "match": 0, + "create_weight": 8488388, + "build_scorer": 7016015, + "advance": 0 + }, + "children": [ + { + "query_type": "TermQuery", + "lucene": "message:search", + "time": "4.938855000ms", + "breakdown": { + "score": 0, + "next_doc": 18332, + "match": 0, + "create_weight": 2945570, + "build_scorer": 1974953, + "advance": 0 + } + }, + { + "query_type": "TermQuery", + "lucene": "message:test", + "time": "0.5016660000ms", + "breakdown": { + "score": 0, + "next_doc": 0, + "match": 0, + "create_weight": 170534, + "build_scorer": 331132, + "advance": 0 + } + } + ] + } + ], + "rewrite_time": 185002, + "collector": [ + { + "name": "SimpleTopScoreDocCollector", + "reason": "search_top_hits", + "time": "2.206529000ms" + } + ] + } + ] + } + ] + } +} +-------------------------------------------------- +<1> Search results are returned, but were omitted here for brevity + +Even for a simple query, the response is relatively complicated. Let's break it down piece-by-piece before moving +to more complex examples. + +First, the overall structure of the profile response is as follows: + +[source,js] +-------------------------------------------------- +{ + "profile": { + "shards": [ + { + "id": "[htuC6YnSSSmKFq5UBt0YMA][test][0]", <1> + "searches": [ + { + "query": [...], <2> + "rewrite_time": 185002, <3> + "collector": [...] <4> + } + ] + } + ] + } +} +-------------------------------------------------- +<1> A profile is returned for each shard that participated in the response, and is identified +by a unique ID +<2> Each profile contains a section which holds details about the query execution +<3> Each profile has a single time representing the cumulative rewrite time +<4> Each profile also contains a section about the Lucene Collectors which run the search + +Because a search request may be executed against one or more shards in an index, and a search may cover +one or more indices, the top level element in the profile response is an array of `shard` objects. +Each shard object lists it's `id` which uniquely identifies the shard. The ID's format is +`[nodeID][indexName][shardID]`. + +The profile itself may consist of one or more "searches", where a search is a query executed against the underlying +Lucene index. Most Search Requests submitted by the user will only execute a single `search` against the Lucene index. +But occasionally multiple searches will be executed, such as including a global aggregation (which needs to execute +a secondary "match_all" query for the global context). + +Inside each `search` object there will be two arrays of profiled information: +a `query` array and a `collector` array. In the future, more sections may be added, such as `suggest`, `highlight`, +`aggregations`, etc + +There will also be a `rewrite` metric showing the total time spent rewriting the query (in nanoseconds). + +=== `query` Section + +The `query` section contains detailed timing of the query tree executed by Lucene on a particular shard. +The overall structure of this query tree will resemble your original Elasticsearch query, but may be slightly +(or sometimes very) different. It will also use similar but not always identical naming. Using our previous +`term` query example, let's analyze the `query` section: + +[source,js] +-------------------------------------------------- +"query": [ + { + "query_type": "BooleanQuery", + "lucene": "message:search message:test", + "time": "15.52889800ms", + "breakdown": {...}, + "children": [ + { + "query_type": "TermQuery", + "lucene": "message:search", + "time": "4.938855000ms", + "breakdown": {...} + }, + { + "query_type": "TermQuery", + "lucene": "message:test", + "time": "0.5016660000ms", + "breakdown": {...} + } + ] + } +] +-------------------------------------------------- +<1> The breakdown timings are omitted for simplicity + +Based on the profile structure, we can see that our `match` query was rewritten by Lucene into a BooleanQuery with two +clauses (both holding a TermQuery). The `"query_type"` field displays the Lucene class name, and often aligns with +the equivalent name in Elasticsearch. The `"lucene"` field displays the Lucene explanation text for the query, and +is made available to help differentiating between parts of your query (e.g. both `"message:search"` and `"message:test"` +are TermQuery's and would appear identical otherwise. + +The `"time"` field shows that this query took ~15ms for the entire BooleanQuery to execute. The recorded time is inclusive +of all children. + +The `"breakdown"` field will give detailed stats about how the time was spent, we'll look at +that in a moment. Finally, the `"children"` array lists any sub-queries that may be present. Because we searched for two +values ("search test"), our BooleanQuery holds two children TermQueries. They have identical information (query_type, time, +breakdown, etc). Children are allowed to have their own children. + +==== Timing Breakdown + +The `breakdown` component lists detailed timing statistics about low-level Lucene execution: + +[source,js] +-------------------------------------------------- +"breakdown": { + "score": 0, + "next_doc": 24495, + "match": 0, + "create_weight": 8488388, + "build_scorer": 7016015, + "advance": 0 + +} +-------------------------------------------------- + +Timings are listed in wall-clock nanoseconds and are not normalized at all. All caveats about the overall +`time` apply here. The intention of the breakdown is to give you a feel for A) what machinery in Lucene is +actually eating time, and B) the magnitude of differences in times between the various components. Like the overall time, +the breakdown is inclusive of all children times. + +The meaning of the stats are as follows: + +[float] +=== All parameters: + +[horizontal] + +`create_weight`:: + + A Query in Lucene must be capable of reuse across multiple IndexSearchers (think of it as the engine that + executes a search against a specific Lucene Index). This puts Lucene in a tricky spot, since many queries + need to accumulate temporary state/statistics associated with the index it is being used against, but the + Query contract mandates that it must be immutable. + + To get around this, Lucene asks each query to generate a Weight object which acts as a temporary context + object to hold state associated with this particular (IndexSearcher, Query) tuple. The `weight` metric + shows how long this process takes + +`build_scorer`:: + + This parameter shows how long it takes to build a Scorer for the query. A Scorer is the mechanism that + iterates over matching documents generates a score per-document (e.g. how well does "foo" match the document?). + Note, this records the time required to generate the Scorer object, not actuall score the documents. Some + queries have faster or slower initialization of the Scorer, depending on optimizations, complexity, etc. + + This may also showing timing associated with caching, if enabled and/or applicable for the query + +`next_doc`:: + + The Lucene method `next_doc` returns Doc ID of the next document matching the query. This statistic shows + the time it takes to determine which document is the next match, a process that varies considerably depending + on the nature of the query. Next_doc is a specialized form of advance() which is more convenient for many + queries in Lucene. It is equivalent to advance(docId() + 1) + +`advance`:: + + `advance` is the "lower level" version of next_doc: it serves the same purpose of finding the next matching + doc, but requires the calling query to perform extra tasks such as identifying and moving past skips, etc. + However, not all queries can use next_doc, so `advance` is also timed for those queries. + + Conjunctions (e.g. `must` clauses in a boolean) are typical consumers of `advance` + +`matches`:: + + Some queries, such as phrase queries, match documents using a "Two Phase" process. First, the document is + "approximately" matched, and if it matches approximately, it is checked a second time with a more rigorous + (and expensive) process. The second phase verification is what the `matches` statistic measures. + + For example, a phrase query first checks a document approximately by ensuring all terms in the phrase are + present in the doc. If all the terms are present, it then executes the second phase verification to ensure + the terms are in-order to form the phrase, which is relatively more expensive than just checking for presence + of the terms. + + Because this two-phase process is only used by a handful of queries, the `metric` statistic will often be zero + +`score`:: + + This records the time taken to score a particular document via it's Scorer + +=== `collectors` Section + +The Collectors portion of the response shows high-level execution details. Lucene works by defining a "Collector" +which is responsible for coordinating the traversal, scoring and collection of matching documents. Collectors +are also how a single query can record aggregation results, execute unscoped "global" queries, execute post-query +filters, etc. + +Looking at the previous example: + +[source,js] +-------------------------------------------------- +"collector": [ + { + "name": "SimpleTopScoreDocCollector", + "reason": "search_top_hits", + "time": "2.206529000ms" + } +] +-------------------------------------------------- + +We see a single collector named `SimpleTopScoreDocCollector`. This is the default "scoring and sorting" Collector +used by Elasticsearch. The `"reason"` field attempts to give an plain english description of the class name. The +`"time` is similar to the time in the Query tree: a wall-clock time inclusive of all children. Similarly, `children` lists +all sub-collectors. + +It should be noted that Collector times are **independent** from the Query times. They are calculated, combined +and normalized independently! Due to the nature of Lucene's execution, it is impossible to "merge" the times +from the Collectors into the Query section, so they are displayed in separate portions. + +For reference, the various collector reason's are: + +[horizontal] +`search_sorted`:: + + A collector that scores and sorts documents. This is the most common collector and will be seen in most + simple searches + +`search_count`:: + + A collector that only counts the number of documents that match the query, but does not fetch the source. + This is seen when `size: 0` or `search_type=count` is specified + +`search_terminate_after_count`:: + + A collector that terminates search execution after `n` matching documents have been found. This is seen + when the `terminate_after_count` query parameter has been specified + +`search_min_score`:: + + A collector that only returns matching documents that have a score greater than `n`. This is seen when + the top-level paramenter `min_score` has been specified. + +`search_multi`:: + + A collector that wraps several other collectors. This is seen when combinations of search, aggregations, + global aggs and post_filters are combined in a single search. + +`search_timeout`:: + + A collector that halts execution after a specified period of time. This is seen when a `timeout` top-level + parameter has been specified. + +`aggregation`:: + + A collector that Elasticsearch uses to run aggregations against the query scope. A single `aggregation` + collector is used to collect documents for *all* aggregations, so you will see a list of aggregations + in the name rather. + +`global_aggregation`:: + + A collector that executes an aggregation against the global query scope, rather than the specified query. + Because the global scope is necessarily different from the executed query, it must execute it's own + match_all query (which you will see added to the Query section) to collect your entire dataset + + + +=== `rewrite` Section + + All queries in Lucene undergo a "rewriting" process. A query (and its sub-queries) may be rewritten one or + more times, and the process continues until the query stops changing. This process allows Lucene to perform + optimizations, such as removing redundant clauses, replacing one query for a more efficient execution path, + etc. For example a Boolean -> Boolean -> TermQuery can be rewritten to a TermQuery, because all the Booleans + are unnecessary in this case. + + The rewriting process is complex and difficult to display, since queries can change drastically. Rather than + showing the intermediate results, the total rewrite time is simply displayed as a value (in nanoseconds). This + value is cumulative and contains the total time for all queries being rewritten. + +=== A more complex example + +To demonstrate a slightly more complex query and the associated results, we can profile the following query: + +[source,js] +-------------------------------------------------- +GET /test/_search +{ + "profile": true, + "query": { + "term": { + "message": { + "value": "search" + } + } + }, + "aggs": { + "non_global_term": { + "terms": { + "field": "agg" + }, + "aggs": { + "second_term": { + "terms": { + "field": "sub_agg" + } + } + } + }, + "another_agg": { + "cardinality": { + "field": "aggB" + } + }, + "global_agg": { + "global": {}, + "aggs": { + "my_agg2": { + "terms": { + "field": "globalAgg" + } + } + } + } + }, + "post_filter": { + "term": { + "my_field": "foo" + } + } +} +-------------------------------------------------- + +This example has: + +- A query +- A scoped aggregation +- A global aggregation +- A post_filter + +And the response: + + +[source,js] +-------------------------------------------------- +{ + "profile": { + "shards": [ + { + "id": "[P6-vulHtQRWuD4YnubWb7A][test][0]", + "searches": [ + { + "query": [ + { + "query_type": "TermQuery", + "lucene": "my_field:foo", + "time": "0.4094560000ms", + "breakdown": { + "score": 0, + "next_doc": 0, + "match": 0, + "create_weight": 31584, + "build_scorer": 377872, + "advance": 0 + } + }, + { + "query_type": "TermQuery", + "lucene": "message:search", + "time": "0.3037020000ms", + "breakdown": { + "score": 0, + "next_doc": 5936, + "match": 0, + "create_weight": 185215, + "build_scorer": 112551, + "advance": 0 + } + } + ], + "rewrite_time": 7208, + "collector": [ + { + "name": "MultiCollector", + "reason": "search_multi", + "time": "1.378943000ms", + "children": [ + { + "name": "FilteredCollector", + "reason": "search_post_filter", + "time": "0.4036590000ms", + "children": [ + { + "name": "SimpleTopScoreDocCollector", + "reason": "search_top_hits", + "time": "0.006391000000ms" + } + ] + }, + { + "name": "BucketCollector: [[non_global_term, another_agg]]", + "reason": "aggregation", + "time": "0.9546020000ms" + } + ] + } + ] + }, + { + "query": [ + { + "query_type": "MatchAllDocsQuery", + "lucene": "*:*", + "time": "0.04829300000ms", + "breakdown": { + "score": 0, + "next_doc": 3672, + "match": 0, + "create_weight": 6311, + "build_scorer": 38310, + "advance": 0 + } + } + ], + "rewrite_time": 1067, + "collector": [ + { + "name": "GlobalAggregator: [global_agg]", + "reason": "aggregation_global", + "time": "0.1226310000ms" + } + ] + } + ] + } + ] + } +} +-------------------------------------------------- + +As you can see, the output is significantly verbose from before. All the major portions of the query are +represented: + +1. The first `TermQuery` (message:search) represents the main `term` query +2. The second `TermQuery` (my_field:foo) represents the `post_filter` query +3. There is a `MatchAllDocsQuery` (*:*) query which is being executed as a second, distinct search. This was +not part of the query specified by the user, but is auto-generated by the global aggregation to provide a global query scope + +The Collector tree is fairly straightforward, showing how a single MultiCollector wraps a FilteredCollector +to execute the post_filter (and in turn wraps the normal scoring SimpleCollector), a BucketCollector to run +all scoped aggregations. In the MatchAll search, there is a single GlobalAggregator to run the global aggregation. + +=== Performance Notes + +Like any profiler, the Profile API introduce a non-negligible overhead to query execution. The act of instrumenting +low-level method calls such as `advance` and `next_doc` can be fairly expensive, since these methods are called +in tight loops. Therefore, profiling should not be enabled in production settings by default, and should not +be compared against non-profiled query times. Profiling is just a diagnostic tool. + +There are also cases where special Lucene optimizations are disabled, since they are not amenable to profiling. This +could cause some queries to report larger relative times than their non-profiled counterparts, but in general should +not have a drastic effect compared to other components in the profiled query. + +=== Limitations + +- Profiling statistics are currently not available for suggestions, highlighting, `dfs_query_then_fetch` +- Detailed breakdown for aggregations is not currently available past the high-level overview provided +from the Collectors +- The Profiler is still highly experimental. The Profiler is instrumenting parts of Lucene that were +never designed to be exposed in this manner, and so all results should be viewed as a best effort to provide detailed +diagnostics. We hope to improve this over time. If you find obviously wrong numbers, strange query structures or +other bugs, please report them! + +=== Understanding MultiTermQuery output + +A special note needs to be made about the `MultiTermQuery` class of queries. This includes wildcards, regex and fuzzy +queries. These queries emit very verbose responses, and are not overly structured. + +Essentially, these queries rewrite themselves on a per-segment basis. If you imagine the wildcard query `b*`, it technically +can match any token that begins with the letter "b". It would be impossible to enumerate all possible combinations, +so Lucene rewrites the query in context of the segment being evaluated. E.g. one segment may contain the tokens +`[bar, baz]`, so the query rewrites to a BooleanQuery combination of "bar" and "baz". Another segment may only have the +token `[bakery]`, so query rewrites to a single TermQuery for "bakery". + +Due to this dynamic, per-segment rewriting, the clean tree structure becomes distorted and no longer follows a clean +"lineage" showing how one query rewrites into the next. At present time, all we can do is apologize, and suggest you +collapse the details for that query's children if it is too confusing. Luckily, all the timing statistics are correct, +just not the physical layout in the response, so it is sufficient to just analyze the top-level MultiTermQuery and +ignore it's children if you find the details too tricky to interpret. + +Hopefully this will be fixed in future iterations, but it is a tricky problem to solve and still in-progress :) \ No newline at end of file diff --git a/test-framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test-framework/src/main/java/org/elasticsearch/test/TestSearchContext.java index 468b1877250..51fb0c905f4 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/TestSearchContext.java +++ b/test-framework/src/main/java/org/elasticsearch/test/TestSearchContext.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; @@ -60,6 +59,8 @@ import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.profile.Profiler; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -293,7 +294,7 @@ public class TestSearchContext extends SearchContext { } public void setSearcher(Engine.Searcher searcher) { - this.searcher = new ContextIndexSearcher(this, searcher); + this.searcher = new ContextIndexSearcher(searcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); } @Override @@ -660,8 +661,11 @@ public class TestSearchContext extends SearchContext { public void copyContextAndHeadersFrom(HasContextAndHeaders other) {} @Override - public Map, Collector> queryCollectors() {return queryCollectors;} + public Profilers getProfilers() { + return null; // no profiling + } @Override - public QueryCache getQueryCache() { return indexService.cache().query();} + public Map, Collector> queryCollectors() {return queryCollectors;} + } From b62f8f11d4d5dde7161b4f6da9c87563e128c0c2 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 17 Dec 2015 16:57:41 -0500 Subject: [PATCH 142/167] Address possible race condition in test This commit addresses a potential race condition in ClusterServiceIT#testClusterStateBatchedUpdates. The potential race condition is that the main test thread could be released to execute the final test assertions before the cluster state publication callbacks had completed thereby causing a situtation where the test assertions could be executed before the final test state had been realized. --- .../org/elasticsearch/cluster/ClusterServiceIT.java | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java index a7417b4362f..5a59341ef35 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java @@ -54,6 +54,7 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -762,6 +763,11 @@ public class ClusterServiceIT extends ESIntegTestCase { } } + int numberOfThreads = randomIntBetween(2, 8); + int tasksSubmittedPerThread = randomIntBetween(1, 1024); + int numberOfExecutors = Math.max(1, numberOfThreads / 4); + final Semaphore semaphore = new Semaphore(numberOfExecutors); + class TaskExecutor implements ClusterStateTaskExecutor { private AtomicInteger counter = new AtomicInteger(); private AtomicInteger batches = new AtomicInteger(); @@ -775,6 +781,7 @@ public class ClusterServiceIT extends ESIntegTestCase { if (randomBoolean()) { maybeUpdatedClusterState = ClusterState.builder(currentState).build(); batches.incrementAndGet(); + semaphore.acquire(); } return BatchResult.builder().successes(tasks).build(maybeUpdatedClusterState); } @@ -787,10 +794,9 @@ public class ClusterServiceIT extends ESIntegTestCase { @Override public void clusterStatePublished(ClusterState newClusterState) { published.incrementAndGet(); + semaphore.release(); } } - int numberOfThreads = randomIntBetween(2, 8); - int tasksSubmittedPerThread = randomIntBetween(1, 1024); ConcurrentMap counters = new ConcurrentHashMap<>(); CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); @@ -807,7 +813,6 @@ public class ClusterServiceIT extends ESIntegTestCase { } }; - int numberOfExecutors = Math.max(1, numberOfThreads / 4); List executors = new ArrayList<>(); for (int i = 0; i < numberOfExecutors; i++) { executors.add(new TaskExecutor()); @@ -853,6 +858,8 @@ public class ClusterServiceIT extends ESIntegTestCase { // wait until all the cluster state updates have been processed updateLatch.await(); + // and until all of the publication callbacks have completed + semaphore.acquire(numberOfExecutors); // assert the number of executed tasks is correct assertEquals(numberOfThreads * tasksSubmittedPerThread, counter.get()); From f5a992486e62157163873e1c4114ba3a6e1893d8 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Thu, 17 Dec 2015 17:02:07 -0500 Subject: [PATCH 143/167] [Docs] Add missing annotation, fix blockdef --- docs/reference/search/profile.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 3d701ae5198..366af6d57c2 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -13,7 +13,7 @@ The output from the Profile API is *very* verbose, especially for complicated qu many shards. Pretty-printing the response is recommended to help understand the output [NOTE] ---------------------- +======================================= The details provided by the Profile API directly expose Lucene class names and concepts, which means that complete interpretation of the results require fairly advanced knowledge of Lucene. This page attempts to give a crash-course in how Lucene executes queries so that you can use the Profile API to successfully @@ -23,7 +23,7 @@ to Lucene's documentation and, in places, the code. With that said, a complete understanding is often not required to fix a slow query. It is usually sufficient to see that a particular component of a query is slow, and not necessarily understand why the `advance` phase of that query is the cause, for example. ---------------------- +======================================= [float] === Usage @@ -185,7 +185,7 @@ The overall structure of this query tree will resemble your original Elasticsear "query_type": "BooleanQuery", "lucene": "message:search message:test", "time": "15.52889800ms", - "breakdown": {...}, + "breakdown": {...}, <1> "children": [ { "query_type": "TermQuery", From e128298c5da69ce8ccc1cd82d2b94cabd52df094 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Thu, 17 Dec 2015 17:27:28 -0500 Subject: [PATCH 144/167] [Docs] Formatting tweaks --- docs/reference/search/profile.asciidoc | 35 +++++++++++++++----------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 366af6d57c2..b62d83eee6b 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -247,14 +247,14 @@ The meaning of the stats are as follows: === All parameters: [horizontal] - `create_weight`:: A Query in Lucene must be capable of reuse across multiple IndexSearchers (think of it as the engine that executes a search against a specific Lucene Index). This puts Lucene in a tricky spot, since many queries need to accumulate temporary state/statistics associated with the index it is being used against, but the Query contract mandates that it must be immutable. - + {empty} + + {empty} + To get around this, Lucene asks each query to generate a Weight object which acts as a temporary context object to hold state associated with this particular (IndexSearcher, Query) tuple. The `weight` metric shows how long this process takes @@ -265,7 +265,8 @@ The meaning of the stats are as follows: iterates over matching documents generates a score per-document (e.g. how well does "foo" match the document?). Note, this records the time required to generate the Scorer object, not actuall score the documents. Some queries have faster or slower initialization of the Scorer, depending on optimizations, complexity, etc. - + {empty} + + {empty} + This may also showing timing associated with caching, if enabled and/or applicable for the query `next_doc`:: @@ -280,7 +281,8 @@ The meaning of the stats are as follows: `advance` is the "lower level" version of next_doc: it serves the same purpose of finding the next matching doc, but requires the calling query to perform extra tasks such as identifying and moving past skips, etc. However, not all queries can use next_doc, so `advance` is also timed for those queries. - + {empty} + + {empty} + Conjunctions (e.g. `must` clauses in a boolean) are typical consumers of `advance` `matches`:: @@ -288,18 +290,21 @@ The meaning of the stats are as follows: Some queries, such as phrase queries, match documents using a "Two Phase" process. First, the document is "approximately" matched, and if it matches approximately, it is checked a second time with a more rigorous (and expensive) process. The second phase verification is what the `matches` statistic measures. - + {empty} + + {empty} + For example, a phrase query first checks a document approximately by ensuring all terms in the phrase are present in the doc. If all the terms are present, it then executes the second phase verification to ensure the terms are in-order to form the phrase, which is relatively more expensive than just checking for presence of the terms. - + {empty} + + {empty} + Because this two-phase process is only used by a handful of queries, the `metric` statistic will often be zero `score`:: This records the time taken to score a particular document via it's Scorer + === `collectors` Section The Collectors portion of the response shows high-level execution details. Lucene works by defining a "Collector" @@ -378,15 +383,15 @@ For reference, the various collector reason's are: === `rewrite` Section - All queries in Lucene undergo a "rewriting" process. A query (and its sub-queries) may be rewritten one or - more times, and the process continues until the query stops changing. This process allows Lucene to perform - optimizations, such as removing redundant clauses, replacing one query for a more efficient execution path, - etc. For example a Boolean -> Boolean -> TermQuery can be rewritten to a TermQuery, because all the Booleans - are unnecessary in this case. +All queries in Lucene undergo a "rewriting" process. A query (and its sub-queries) may be rewritten one or +more times, and the process continues until the query stops changing. This process allows Lucene to perform +optimizations, such as removing redundant clauses, replacing one query for a more efficient execution path, +etc. For example a Boolean -> Boolean -> TermQuery can be rewritten to a TermQuery, because all the Booleans +are unnecessary in this case. - The rewriting process is complex and difficult to display, since queries can change drastically. Rather than - showing the intermediate results, the total rewrite time is simply displayed as a value (in nanoseconds). This - value is cumulative and contains the total time for all queries being rewritten. +The rewriting process is complex and difficult to display, since queries can change drastically. Rather than +showing the intermediate results, the total rewrite time is simply displayed as a value (in nanoseconds). This +value is cumulative and contains the total time for all queries being rewritten. === A more complex example @@ -553,7 +558,7 @@ represented: 1. The first `TermQuery` (message:search) represents the main `term` query 2. The second `TermQuery` (my_field:foo) represents the `post_filter` query -3. There is a `MatchAllDocsQuery` (*:*) query which is being executed as a second, distinct search. This was +3. There is a `MatchAllDocsQuery` (\*:*) query which is being executed as a second, distinct search. This was not part of the query specified by the user, but is auto-generated by the global aggregation to provide a global query scope The Collector tree is fairly straightforward, showing how a single MultiCollector wraps a FilteredCollector From 6a99796b02383862825687a4fb57080ddd92f616 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 17 Dec 2015 16:57:39 -0800 Subject: [PATCH 145/167] Build: Move test framework under a "test" top level dir This allows adding more test projects, eg integ test fixtures that will be coming soon. --- build.gradle | 6 ++--- .../elasticsearch/gradle/BuildPlugin.groovy | 3 ++- .../gradle/plugin/PluginBuildPlugin.groovy | 2 +- .../test/StandaloneTestBasePlugin.groovy | 2 +- core/build.gradle | 2 +- settings.gradle | 2 +- test/build.gradle | 23 +++++++++++++++++++ .../framework}/build.gradle | 1 - .../bootstrap/BootstrapForTesting.java | 0 .../cache/recycler/MockPageCacheRecycler.java | 0 .../MockInternalClusterInfoService.java | 0 .../cluster/routing/TestShardRouting.java | 0 .../common/cli/CliToolTestCase.java | 0 .../common/io/FileTestUtils.java | 0 .../common/io/PathUtilsForTesting.java | 0 .../common/util/MockBigArrays.java | 0 .../elasticsearch/index/MapperTestUtils.java | 0 .../index/MockEngineFactoryPlugin.java | 0 .../java/org/elasticsearch/node/MockNode.java | 0 .../elasticsearch/node/NodeMocksPlugin.java | 0 .../percolator/PercolatorTestUtil.java | 0 .../elasticsearch/plugins/PluginTestUtil.java | 0 .../script/MockScriptEngine.java | 0 .../search/MockSearchService.java | 0 .../bucket/AbstractTermsTestCase.java | 0 ...NativeSignificanceScoreScriptNoParams.java | 0 ...tiveSignificanceScoreScriptWithParams.java | 0 .../bucket/script/TestScript.java | 0 .../metrics/AbstractNumericTestCase.java | 0 .../elasticsearch/test/BackgroundIndexer.java | 0 .../test/CompositeTestCluster.java | 0 .../elasticsearch/test/CorruptionUtils.java | 0 .../elasticsearch/test/DummyShardLock.java | 0 .../test/ESAllocationTestCase.java | 0 .../test/ESBackcompatTestCase.java | 0 .../elasticsearch/test/ESIntegTestCase.java | 0 .../test/ESSingleNodeTestCase.java | 0 .../org/elasticsearch/test/ESTestCase.java | 0 .../test/ESTokenStreamTestCase.java | 0 .../org/elasticsearch/test/ExternalNode.java | 0 .../test/ExternalTestCluster.java | 0 .../test/FieldMaskingReader.java | 0 .../test/IndexSettingsModule.java | 0 .../test/InternalTestCluster.java | 0 .../test/MockIndexEventListener.java | 0 .../test/NodeConfigurationSource.java | 0 .../org/elasticsearch/test/StreamsUtils.java | 0 .../org/elasticsearch/test/TestCluster.java | 0 .../elasticsearch/test/TestSearchContext.java | 0 .../org/elasticsearch/test/VersionUtils.java | 0 .../elasticsearch/test/XContentTestUtils.java | 0 .../test/client/RandomizingClient.java | 0 .../test/cluster/NoopClusterService.java | 0 .../test/cluster/TestClusterService.java | 0 .../ClusterDiscoveryConfiguration.java | 0 .../BlockClusterStateProcessing.java | 0 .../IntermittentLongGCDisruption.java | 0 .../test/disruption/LongGCDisruption.java | 0 .../disruption/NetworkDelaysPartition.java | 0 .../NetworkDisconnectPartition.java | 0 .../test/disruption/NetworkPartition.java | 0 .../test/disruption/NetworkPartitionIT.java | 0 .../NetworkUnresponsivePartition.java | 0 .../test/disruption/NoOpDisruptionScheme.java | 0 .../disruption/ServiceDisruptionScheme.java | 0 .../test/disruption/SingleNodeDisruption.java | 0 .../SlowClusterStateProcessing.java | 0 .../test/engine/AssertingSearcher.java | 0 .../test/engine/MockEngineFactory.java | 0 .../test/engine/MockEngineSupport.java | 0 .../test/engine/MockInternalEngine.java | 0 .../test/engine/MockShadowEngine.java | 0 .../engine/ThrowingLeafReaderWrapper.java | 0 .../test/gateway/NoopGatewayAllocator.java | 0 .../test/hamcrest/CollectionAssertions.java | 0 .../test/hamcrest/CollectionMatchers.java | 0 .../hamcrest/ElasticsearchAssertions.java | 0 .../test/hamcrest/ElasticsearchMatchers.java | 0 .../test/hamcrest/RegexMatcher.java | 0 .../test/junit/annotations/Network.java | 0 .../test/junit/annotations/TestLogging.java | 0 .../test/junit/listeners/LoggingListener.java | 0 .../junit/listeners/ReproduceInfoPrinter.java | 0 .../junit/rule/RepeatOnExceptionRule.java | 0 .../rest/BlacklistedPathPatternMatcher.java | 0 .../test/rest/ESRestTestCase.java | 0 .../test/rest/FakeRestRequest.java | 0 .../test/rest/RestTestCandidate.java | 0 .../test/rest/RestTestExecutionContext.java | 0 .../org/elasticsearch/test/rest/Stash.java | 0 .../test/rest/client/RestClient.java | 0 .../test/rest/client/RestException.java | 0 .../test/rest/client/RestPath.java | 0 .../test/rest/client/RestResponse.java | 0 .../client/http/HttpDeleteWithEntity.java | 0 .../rest/client/http/HttpGetWithEntity.java | 0 .../rest/client/http/HttpRequestBuilder.java | 0 .../test/rest/client/http/HttpResponse.java | 0 .../test/rest/json/JsonPath.java | 0 .../test/rest/parser/DoSectionParser.java | 0 .../rest/parser/GreaterThanEqualToParser.java | 0 .../test/rest/parser/GreaterThanParser.java | 0 .../test/rest/parser/IsFalseParser.java | 0 .../test/rest/parser/IsTrueParser.java | 0 .../test/rest/parser/LengthParser.java | 0 .../rest/parser/LessThanOrEqualToParser.java | 0 .../test/rest/parser/LessThanParser.java | 0 .../test/rest/parser/MatchParser.java | 0 .../rest/parser/RestTestFragmentParser.java | 0 .../rest/parser/RestTestParseException.java | 0 .../rest/parser/RestTestSectionParser.java | 0 .../parser/RestTestSuiteParseContext.java | 0 .../test/rest/parser/RestTestSuiteParser.java | 0 .../test/rest/parser/SetSectionParser.java | 0 .../test/rest/parser/SetupSectionParser.java | 0 .../test/rest/parser/SkipSectionParser.java | 0 .../test/rest/section/ApiCallSection.java | 0 .../test/rest/section/Assertion.java | 0 .../test/rest/section/DoSection.java | 0 .../test/rest/section/ExecutableSection.java | 0 .../rest/section/GreaterThanAssertion.java | 0 .../section/GreaterThanEqualToAssertion.java | 0 .../test/rest/section/IsFalseAssertion.java | 0 .../test/rest/section/IsTrueAssertion.java | 0 .../test/rest/section/LengthAssertion.java | 0 .../test/rest/section/LessThanAssertion.java | 0 .../section/LessThanOrEqualToAssertion.java | 0 .../test/rest/section/MatchAssertion.java | 0 .../test/rest/section/RestTestSuite.java | 0 .../test/rest/section/SetSection.java | 0 .../test/rest/section/SetupSection.java | 0 .../test/rest/section/SkipSection.java | 0 .../test/rest/section/TestSection.java | 0 .../elasticsearch/test/rest/spec/RestApi.java | 0 .../test/rest/spec/RestApiParser.java | 0 .../test/rest/spec/RestSpec.java | 0 .../test/rest/support/Features.java | 0 .../test/rest/support/FileUtils.java | 0 .../test/store/MockFSDirectoryService.java | 0 .../test/store/MockFSIndexStore.java | 0 .../transport/AssertingLocalTransport.java | 0 .../test/transport/CapturingTransport.java | 0 .../test/transport/MockTransportService.java | 0 .../src/main/resources/log4j.properties | 0 .../BlacklistedPathPatternMatcherTests.java | 0 .../rest/test/AbstractParserTestCase.java | 0 .../test/rest/test/AssertionParsersTests.java | 0 .../test/rest/test/DoSectionParserTests.java | 0 .../test/rest/test/FileUtilsTests.java | 0 .../test/rest/test/JsonPathTests.java | 0 .../rest/test/RestApiParserFailingTests.java | 0 .../test/rest/test/RestApiParserTests.java | 0 .../test/rest/test/RestTestParserTests.java | 0 .../test/rest/test/SetSectionParserTests.java | 0 .../rest/test/SetupSectionParserTests.java | 0 .../rest/test/SkipSectionParserTests.java | 0 .../rest/test/TestSectionParserTests.java | 0 .../test/test/InternalTestClusterTests.java | 0 .../test/test/LoggingListenerTests.java | 0 .../test/test/SuiteScopeClusterIT.java | 0 .../test/test/TestScopeClusterIT.java | 0 .../test/test/VersionUtilsTests.java | 0 .../rest-api-spec/test/suite1/10_basic.yaml | 0 .../test/suite1/20_another_test.yaml | 0 .../rest-api-spec/test/suite2/10_basic.yaml | 0 .../rest-api-spec/test/suite2/15_test2.yaml | 0 166 files changed, 32 insertions(+), 9 deletions(-) create mode 100644 test/build.gradle rename {test-framework => test/framework}/build.gradle (98%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/common/io/FileTestUtils.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/common/util/MockBigArrays.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/index/MapperTestUtils.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/index/MockEngineFactoryPlugin.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/node/MockNode.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/node/NodeMocksPlugin.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/percolator/PercolatorTestUtil.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/script/MockScriptEngine.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/search/MockSearchService.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/BackgroundIndexer.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/CompositeTestCluster.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/CorruptionUtils.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/DummyShardLock.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/ESIntegTestCase.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/ESTestCase.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/ExternalNode.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/ExternalTestCluster.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/FieldMaskingReader.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/IndexSettingsModule.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/InternalTestCluster.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/MockIndexEventListener.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/StreamsUtils.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/TestCluster.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/TestSearchContext.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/VersionUtils.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/XContentTestUtils.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/client/RandomizingClient.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/disruption/NetworkPartition.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/junit/annotations/Network.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcher.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/Stash.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/client/RestClient.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/client/RestException.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/client/RestPath.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/DoSectionParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/IsFalseParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/IsTrueParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/LengthParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/LessThanParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/MatchParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/RestTestParseException.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/SetSectionParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/ApiCallSection.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/Assertion.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/DoSection.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/ExecutableSection.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/LengthAssertion.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/LessThanAssertion.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/MatchAssertion.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/SetSection.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/SetupSection.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/SkipSection.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/section/TestSection.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/spec/RestApi.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/spec/RestApiParser.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/support/Features.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/rest/support/FileUtils.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java (100%) rename {test-framework => test/framework}/src/main/java/org/elasticsearch/test/transport/MockTransportService.java (100%) rename {test-framework => test/framework}/src/main/resources/log4j.properties (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcherTests.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTestCase.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterIT.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/test/TestScopeClusterIT.java (100%) rename {test-framework => test/framework}/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java (100%) rename {test-framework => test/framework}/src/test/resources/rest-api-spec/test/suite1/10_basic.yaml (100%) rename {test-framework => test/framework}/src/test/resources/rest-api-spec/test/suite1/20_another_test.yaml (100%) rename {test-framework => test/framework}/src/test/resources/rest-api-spec/test/suite2/10_basic.yaml (100%) rename {test-framework => test/framework}/src/test/resources/rest-api-spec/test/suite2/15_test2.yaml (100%) diff --git a/build.gradle b/build.gradle index 8a95fa90925..ab40587d5a1 100644 --- a/build.gradle +++ b/build.gradle @@ -109,7 +109,7 @@ subprojects { ext.projectSubstitutions = [ "org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec', "org.elasticsearch:elasticsearch:${version}": ':core', - "org.elasticsearch:test-framework:${version}": ':test-framework', + "org.elasticsearch.test:framework:${version}": ':test:framework', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip', "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip', "org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar', @@ -141,8 +141,8 @@ subprojects { // the dependency is added. gradle.projectsEvaluated { allprojects { - if (project.path == ':test-framework') { - // :test-framework:test cannot run before and after :core:test + if (project.path == ':test:framework') { + // :test:framework:test cannot run before and after :core:test return } configurations.all { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index c4d0ced6b5c..10f479ee100 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -202,7 +202,7 @@ class BuildPlugin implements Plugin { // force all dependencies added directly to compile/testCompile to be non-transitive, except for ES itself Closure disableTransitiveDeps = { ModuleDependency dep -> - if (!(dep instanceof ProjectDependency) && dep.getGroup() != 'org.elasticsearch') { + if (!(dep instanceof ProjectDependency) && dep.group.startsWith('org.elasticsearch') == false) { dep.transitive = false // also create a configuration just for this dependency version, so that later @@ -302,6 +302,7 @@ class BuildPlugin implements Plugin { options.compilerArgs << '-profile' << project.compactProfile } options.encoding = 'UTF-8' + //options.incremental = true } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 0d936ab0e15..9a000ab3296 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -60,7 +60,7 @@ public class PluginBuildPlugin extends BuildPlugin { private static void configureDependencies(Project project) { project.dependencies { provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" - testCompile "org.elasticsearch:test-framework:${project.versions.elasticsearch}" + testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}" // we "upgrade" these optional deps to provided for plugins, since they will run // with a full elasticsearch server that includes optional deps provided "com.spatial4j:spatial4j:${project.versions.spatial4j}" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy index f317254cd45..3063853c871 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy @@ -42,7 +42,7 @@ public class StandaloneTestBasePlugin implements Plugin { // only setup tests to build project.sourceSets.create('test') - project.dependencies.add('testCompile', "org.elasticsearch:test-framework:${VersionProperties.elasticsearch}") + project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}") project.eclipse.classpath.sourceSets = [project.sourceSets.test] project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime] diff --git a/core/build.gradle b/core/build.gradle index f4eb2c0aebd..61cdd12a194 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -82,7 +82,7 @@ dependencies { compile "net.java.dev.jna:jna:${versions.jna}", optional if (isEclipse == false || project.path == ":core-tests") { - testCompile("org.elasticsearch:test-framework:${version}") { + testCompile("org.elasticsearch.test:framework:${version}") { // tests use the locally compiled version of core exclude group: 'org.elasticsearch', module: 'elasticsearch' } diff --git a/settings.gradle b/settings.gradle index e9fb0a043aa..3526c0429ef 100644 --- a/settings.gradle +++ b/settings.gradle @@ -8,7 +8,7 @@ List projects = [ 'distribution:tar', 'distribution:deb', 'distribution:rpm', - 'test-framework', + 'test:framework', 'modules:lang-expression', 'modules:lang-groovy', 'modules:lang-mustache', diff --git a/test/build.gradle b/test/build.gradle new file mode 100644 index 00000000000..037bb8d508e --- /dev/null +++ b/test/build.gradle @@ -0,0 +1,23 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +subprojects { + group = 'org.elasticsearch.test' + apply plugin: 'com.bmuschko.nexus' +} diff --git a/test-framework/build.gradle b/test/framework/build.gradle similarity index 98% rename from test-framework/build.gradle rename to test/framework/build.gradle index 6930abb3d23..a2c568f1d7f 100644 --- a/test-framework/build.gradle +++ b/test/framework/build.gradle @@ -19,7 +19,6 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks apply plugin: 'elasticsearch.build' -apply plugin: 'com.bmuschko.nexus' dependencies { compile "org.elasticsearch:elasticsearch:${version}" diff --git a/test-framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java rename to test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java diff --git a/test-framework/src/main/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java b/test/framework/src/main/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java rename to test/framework/src/main/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java diff --git a/test-framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java rename to test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java diff --git a/test-framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java rename to test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java diff --git a/test-framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java rename to test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java diff --git a/test-framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java b/test/framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java rename to test/framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java diff --git a/test-framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java b/test/framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java rename to test/framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java diff --git a/test-framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java rename to test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java diff --git a/test-framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java rename to test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java diff --git a/test-framework/src/main/java/org/elasticsearch/index/MockEngineFactoryPlugin.java b/test/framework/src/main/java/org/elasticsearch/index/MockEngineFactoryPlugin.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/index/MockEngineFactoryPlugin.java rename to test/framework/src/main/java/org/elasticsearch/index/MockEngineFactoryPlugin.java diff --git a/test-framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/node/MockNode.java rename to test/framework/src/main/java/org/elasticsearch/node/MockNode.java diff --git a/test-framework/src/main/java/org/elasticsearch/node/NodeMocksPlugin.java b/test/framework/src/main/java/org/elasticsearch/node/NodeMocksPlugin.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/node/NodeMocksPlugin.java rename to test/framework/src/main/java/org/elasticsearch/node/NodeMocksPlugin.java diff --git a/test-framework/src/main/java/org/elasticsearch/percolator/PercolatorTestUtil.java b/test/framework/src/main/java/org/elasticsearch/percolator/PercolatorTestUtil.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/percolator/PercolatorTestUtil.java rename to test/framework/src/main/java/org/elasticsearch/percolator/PercolatorTestUtil.java diff --git a/test-framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java b/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java rename to test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java diff --git a/test-framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java rename to test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java diff --git a/test-framework/src/main/java/org/elasticsearch/search/MockSearchService.java b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/search/MockSearchService.java rename to test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java diff --git a/test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.java rename to test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.java diff --git a/test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java rename to test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java diff --git a/test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java rename to test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java diff --git a/test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java rename to test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java diff --git a/test-framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java rename to test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java rename to test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/CompositeTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/CompositeTestCluster.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/CompositeTestCluster.java rename to test/framework/src/main/java/org/elasticsearch/test/CompositeTestCluster.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java rename to test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/DummyShardLock.java b/test/framework/src/main/java/org/elasticsearch/test/DummyShardLock.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/DummyShardLock.java rename to test/framework/src/main/java/org/elasticsearch/test/DummyShardLock.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java rename to test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java rename to test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java rename to test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java rename to test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/ESTestCase.java rename to test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java rename to test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/ExternalNode.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/ExternalNode.java rename to test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java rename to test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java b/test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java rename to test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java b/test/framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java rename to test/framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java rename to test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java b/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java rename to test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java b/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java rename to test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/StreamsUtils.java b/test/framework/src/main/java/org/elasticsearch/test/StreamsUtils.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/StreamsUtils.java rename to test/framework/src/main/java/org/elasticsearch/test/StreamsUtils.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/TestCluster.java rename to test/framework/src/main/java/org/elasticsearch/test/TestCluster.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/TestSearchContext.java rename to test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/VersionUtils.java rename to test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java b/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java rename to test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java rename to test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java b/test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java rename to test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java b/test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java rename to test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java rename to test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java rename to test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java rename to test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java rename to test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java rename to test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java rename to test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartition.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartition.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartition.java rename to test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartition.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java rename to test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java rename to test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java rename to test/framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java rename to test/framework/src/main/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java rename to test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java rename to test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java b/test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java rename to test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java rename to test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java rename to test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java rename to test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java rename to test/framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java b/test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java rename to test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java b/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java rename to test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java rename to test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java rename to test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java rename to test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java rename to test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java rename to test/framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/junit/annotations/Network.java b/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/Network.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/junit/annotations/Network.java rename to test/framework/src/main/java/org/elasticsearch/test/junit/annotations/Network.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java b/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java rename to test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java rename to test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java rename to test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java b/test/framework/src/main/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java rename to test/framework/src/main/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcher.java b/test/framework/src/main/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcher.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcher.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcher.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/Stash.java b/test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/Stash.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/RestPath.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestPath.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/RestPath.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/RestPath.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java b/test/framework/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/DoSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/DoSectionParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/DoSectionParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/DoSectionParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/IsFalseParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsFalseParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/IsFalseParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsFalseParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/IsTrueParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsTrueParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/IsTrueParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsTrueParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/LengthParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LengthParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/LengthParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/LengthParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/MatchParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/MatchParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/MatchParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/MatchParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestParseException.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestParseException.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestParseException.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestParseException.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/SetSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetSectionParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/SetSectionParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetSectionParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/ApiCallSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/ApiCallSection.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/ApiCallSection.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/ApiCallSection.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/Assertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/Assertion.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/Assertion.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/Assertion.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/ExecutableSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/ExecutableSection.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/ExecutableSection.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/ExecutableSection.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/LengthAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/LengthAssertion.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/LengthAssertion.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/LengthAssertion.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/LessThanAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanAssertion.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/LessThanAssertion.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanAssertion.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/MatchAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/MatchAssertion.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/MatchAssertion.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/MatchAssertion.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/SetSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/SetSection.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/SetSection.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/SetSection.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/SetupSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/SetupSection.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/SetupSection.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/SetupSection.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/SkipSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/SkipSection.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/SkipSection.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/SkipSection.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/TestSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/TestSection.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/TestSection.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/TestSection.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/spec/RestApi.java b/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApi.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/spec/RestApi.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApi.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/spec/RestApiParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApiParser.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/spec/RestApiParser.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApiParser.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java b/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/support/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/support/Features.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/support/Features.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/support/Features.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/support/FileUtils.java b/test/framework/src/main/java/org/elasticsearch/test/rest/support/FileUtils.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/rest/support/FileUtils.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/support/FileUtils.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java rename to test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java rename to test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java rename to test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java rename to test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java diff --git a/test-framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java similarity index 100% rename from test-framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java rename to test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java diff --git a/test-framework/src/main/resources/log4j.properties b/test/framework/src/main/resources/log4j.properties similarity index 100% rename from test-framework/src/main/resources/log4j.properties rename to test/framework/src/main/resources/log4j.properties diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcherTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcherTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcherTests.java rename to test/framework/src/test/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcherTests.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTestCase.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTestCase.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTestCase.java rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTestCase.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java rename to test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java rename to test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterIT.java b/test/framework/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterIT.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterIT.java rename to test/framework/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterIT.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/test/TestScopeClusterIT.java b/test/framework/src/test/java/org/elasticsearch/test/test/TestScopeClusterIT.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/test/TestScopeClusterIT.java rename to test/framework/src/test/java/org/elasticsearch/test/test/TestScopeClusterIT.java diff --git a/test-framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java similarity index 100% rename from test-framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java rename to test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java diff --git a/test-framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yaml b/test/framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yaml similarity index 100% rename from test-framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yaml rename to test/framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yaml diff --git a/test-framework/src/test/resources/rest-api-spec/test/suite1/20_another_test.yaml b/test/framework/src/test/resources/rest-api-spec/test/suite1/20_another_test.yaml similarity index 100% rename from test-framework/src/test/resources/rest-api-spec/test/suite1/20_another_test.yaml rename to test/framework/src/test/resources/rest-api-spec/test/suite1/20_another_test.yaml diff --git a/test-framework/src/test/resources/rest-api-spec/test/suite2/10_basic.yaml b/test/framework/src/test/resources/rest-api-spec/test/suite2/10_basic.yaml similarity index 100% rename from test-framework/src/test/resources/rest-api-spec/test/suite2/10_basic.yaml rename to test/framework/src/test/resources/rest-api-spec/test/suite2/10_basic.yaml diff --git a/test-framework/src/test/resources/rest-api-spec/test/suite2/15_test2.yaml b/test/framework/src/test/resources/rest-api-spec/test/suite2/15_test2.yaml similarity index 100% rename from test-framework/src/test/resources/rest-api-spec/test/suite2/15_test2.yaml rename to test/framework/src/test/resources/rest-api-spec/test/suite2/15_test2.yaml From 801de8f2ba296db62afae25ff201de92cb6ddfd7 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Fri, 18 Dec 2015 06:48:52 +0100 Subject: [PATCH 146/167] Fix usage of latch in RetryTests --- .../org/elasticsearch/action/bulk/RetryTests.java | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index 5b9ab898b0e..4d73f932d2f 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -31,6 +31,7 @@ import org.junit.After; import org.junit.Before; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; import static org.hamcrest.Matchers.*; @@ -91,7 +92,6 @@ public class RetryTests extends ESTestCase { assertThat(response.getItems().length, equalTo(bulkRequest.numberOfActions())); } - @AwaitsFix(bugUrl = "spuriously fails on Jenkins. Investigation ongoing.") public void testAsyncRetryBacksOff() throws Exception { BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL); AssertingListener listener = new AssertingListener(); @@ -108,7 +108,6 @@ public class RetryTests extends ESTestCase { listener.assertOnFailureNeverCalled(); } - @AwaitsFix(bugUrl = "spuriously fails on Jenkins. Investigation ongoing.") public void testAsyncRetryFailsAfterBacksOff() throws Exception { BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL - 1); AssertingListener listener = new AssertingListener(); @@ -128,7 +127,7 @@ public class RetryTests extends ESTestCase { private static class AssertingListener implements ActionListener { private final CountDownLatch latch; - private volatile int countOnResponseCalled = 0; + private final AtomicInteger countOnResponseCalled = new AtomicInteger(); private volatile Throwable lastFailure; private volatile BulkResponse response; @@ -142,19 +141,19 @@ public class RetryTests extends ESTestCase { @Override public void onResponse(BulkResponse bulkItemResponses) { - latch.countDown(); this.response = bulkItemResponses; - countOnResponseCalled++; + countOnResponseCalled.incrementAndGet(); + latch.countDown(); } @Override public void onFailure(Throwable e) { - latch.countDown(); this.lastFailure = e; + latch.countDown(); } public void assertOnResponseCalled() { - assertThat(countOnResponseCalled, equalTo(1)); + assertThat(countOnResponseCalled.get(), equalTo(1)); } public void assertResponseWithNumberOfItems(int numItems) { From fbff877ec327df119179b016ac13f95fd9125e5a Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 17 Dec 2015 22:31:40 -0800 Subject: [PATCH 147/167] Fix thirdpartyaudit to ignore all elasticsearch packages --- .../elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy index 83fe9115083..814f9c1e60b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy @@ -98,7 +98,7 @@ public class ThirdPartyAuditTask extends DefaultTask { // we only want third party dependencies. FileCollection jars = project.configurations.testCompile.fileCollection({ dependency -> - dependency.group != "org.elasticsearch" + dependency.group.startsWith("org.elasticsearch") == false }) // we don't want provided dependencies, which we have already scanned. e.g. don't From 2093ea50d5e4b0b9125391f21ede374fd3b964db Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 17 Dec 2015 23:15:18 -0800 Subject: [PATCH 148/167] Docs: Fix nodeSettings example for integ tests to use correct Settings.Builder reference --- docs/reference/testing/testing-framework.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/testing/testing-framework.asciidoc b/docs/reference/testing/testing-framework.asciidoc index 9c0e5f4f10d..e0b27733441 100644 --- a/docs/reference/testing/testing-framework.asciidoc +++ b/docs/reference/testing/testing-framework.asciidoc @@ -116,7 +116,7 @@ public class Mytests extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { - return settingsBuilder().put(super.nodeSettings(nodeOrdinal)) + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) .put("node.mode", "network") .build(); } From 711f949ca2b6c2b6879a1b3ff6cd03d9958b4ae6 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 18 Dec 2015 09:15:01 +0100 Subject: [PATCH 149/167] Convert `transport.tcp.compress` setting --- .../org/elasticsearch/common/settings/ClusterSettings.java | 3 ++- .../src/main/java/org/elasticsearch/transport/Transport.java | 5 +---- .../org/elasticsearch/transport/netty/NettyTransport.java | 2 +- .../java/org/elasticsearch/test/InternalTestCluster.java | 2 +- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index ac976268a0c..ac9631d29b1 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -136,5 +136,6 @@ public final class ClusterSettings extends AbstractScopedSettings { InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING, HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, - Transport.TRANSPORT_PROFILES_SETTING))); + Transport.TRANSPORT_PROFILES_SETTING, + Transport.TRANSPORT_TCP_COMPRESS))); } diff --git a/core/src/main/java/org/elasticsearch/transport/Transport.java b/core/src/main/java/org/elasticsearch/transport/Transport.java index 16270234494..78b07e3aae3 100644 --- a/core/src/main/java/org/elasticsearch/transport/Transport.java +++ b/core/src/main/java/org/elasticsearch/transport/Transport.java @@ -37,10 +37,7 @@ public interface Transport extends LifecycleComponent { Setting TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", true, Setting.Scope.CLUSTER); - - public static class TransportSettings { - public static final String TRANSPORT_TCP_COMPRESS = "transport.tcp.compress"; - } + Setting TRANSPORT_TCP_COMPRESS = Setting.boolSetting("transport.tcp.compress", false, false, Setting.Scope.CLUSTER); void transportServiceAdapter(TransportServiceAdapter service); diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index 37f1dc4fa0b..6a6a6c38011 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -225,7 +225,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem this.connectTimeout = this.settings.getAsTime("transport.netty.connect_timeout", settings.getAsTime("transport.tcp.connect_timeout", settings.getAsTime(TCP_CONNECT_TIMEOUT, TCP_DEFAULT_CONNECT_TIMEOUT))); this.maxCumulationBufferCapacity = this.settings.getAsBytesSize("transport.netty.max_cumulation_buffer_capacity", null); this.maxCompositeBufferComponents = this.settings.getAsInt("transport.netty.max_composite_buffer_components", -1); - this.compress = settings.getAsBoolean(TransportSettings.TRANSPORT_TCP_COMPRESS, false); + this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings); this.connectionsPerNodeRecovery = this.settings.getAsInt("transport.netty.connections_per_node.recovery", settings.getAsInt(CONNECTIONS_PER_NODE_RECOVERY, 2)); this.connectionsPerNodeBulk = this.settings.getAsInt("transport.netty.connections_per_node.bulk", settings.getAsInt(CONNECTIONS_PER_NODE_BULK, 3)); diff --git a/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index d1749399146..10d4482a24c 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -372,7 +372,7 @@ public final class InternalTestCluster extends TestCluster { Builder builder = Settings.settingsBuilder() .put(SETTING_CLUSTER_NODE_SEED, seed); if (isLocalTransportConfigured() == false) { - builder.put(Transport.TransportSettings.TRANSPORT_TCP_COMPRESS, rarely(random)); + builder.put(Transport.TRANSPORT_TCP_COMPRESS.getKey(), rarely(random)); } if (random.nextBoolean()) { builder.put("cache.recycler.page.type", RandomPicks.randomFrom(random, PageCacheRecycler.Type.values())); From 5b991b9d5e90258422964023d49ef2315dc201bd Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 18 Dec 2015 09:43:56 +0100 Subject: [PATCH 150/167] Check for tragic event on all kinds of exceptions not only ACE and IOException It's important to close not matter what exception caused a tragic event. Today we only check on IOException and AlreadyClosedExceptions. The test had a bug and threw an IAE instead causing the translog not to be closed. --- .../index/translog/Translog.java | 5 +- .../index/translog/TranslogTests.java | 59 ++++++++++++++++--- 2 files changed, 53 insertions(+), 11 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index a105f652728..fd5c64f96ac 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -424,6 +424,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC closeOnTragicEvent(ex); throw ex; } catch (Throwable e) { + closeOnTragicEvent(e); throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e); } finally { Releasables.close(out.bytes()); @@ -500,7 +501,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC if (closed.get() == false) { current.sync(); } - } catch (AlreadyClosedException | IOException ex) { + } catch (Throwable ex) { closeOnTragicEvent(ex); throw ex; } @@ -533,7 +534,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC ensureOpen(); return current.syncUpTo(location.translogLocation + location.size); } - } catch (AlreadyClosedException | IOException ex) { + } catch (Throwable ex) { closeOnTragicEvent(ex); throw ex; } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 51de00f74a6..8b3294c15b8 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -63,7 +63,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Predicate; import static org.hamcrest.Matchers.*; @@ -1387,6 +1386,35 @@ public class TranslogTests extends ESTestCase { } } + public void testTragicEventCanBeAnyException() throws IOException { + Path tempDir = createTempDir(); + final AtomicBoolean fail = new AtomicBoolean(); + TranslogConfig config = getTranslogConfig(tempDir); + assumeFalse("this won't work if we sync on any op",config.isSyncOnEachOperation()); + Translog translog = getFailableTranslog(fail, config, false, true); + LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly + translog.add(new Translog.Index("test", "1", lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); + fail.set(true); + try { + Translog.Location location = translog.add(new Translog.Index("test", "2", lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); + if (config.getType() == TranslogWriter.Type.BUFFERED) { // the buffered case will fail on the add if we exceed the buffer or will fail on the flush once we sync + if (randomBoolean()) { + translog.ensureSynced(location); + } else { + translog.sync(); + } + } + //TODO once we have a mock FS that can simulate we can also fail on plain sync + fail("WTF"); + } catch (UnknownException ex) { + // w00t + } catch (TranslogException ex) { + assertTrue(ex.getCause() instanceof UnknownException); + } + assertFalse(translog.isOpen()); + assertTrue(translog.getTragicException() instanceof UnknownException); + } + public void testFatalIOExceptionsWhileWritingConcurrently() throws IOException, InterruptedException { Path tempDir = createTempDir(); final AtomicBoolean fail = new AtomicBoolean(false); @@ -1432,9 +1460,9 @@ public class TranslogTests extends ESTestCase { } boolean atLeastOneFailed = false; for (Throwable ex : threadExceptions) { + assertTrue(ex.toString(), ex instanceof IOException || ex instanceof AlreadyClosedException); if (ex != null) { atLeastOneFailed = true; - break; } } if (atLeastOneFailed == false) { @@ -1477,8 +1505,11 @@ public class TranslogTests extends ESTestCase { } } } - private Translog getFailableTranslog(final AtomicBoolean fail, final TranslogConfig config) throws IOException { + return getFailableTranslog(fail, config, randomBoolean(), false); + } + + private Translog getFailableTranslog(final AtomicBoolean fail, final TranslogConfig config, final boolean paritalWrites, final boolean throwUnknownException) throws IOException { return new Translog(config) { @Override TranslogWriter.ChannelFactory getChannelFactory() { @@ -1488,7 +1519,7 @@ public class TranslogTests extends ESTestCase { @Override public FileChannel open(Path file) throws IOException { FileChannel channel = factory.open(file); - return new ThrowingFileChannel(fail, randomBoolean(), channel); + return new ThrowingFileChannel(fail, paritalWrites, throwUnknownException, channel); } }; } @@ -1498,11 +1529,13 @@ public class TranslogTests extends ESTestCase { public static class ThrowingFileChannel extends FilterFileChannel { private final AtomicBoolean fail; private final boolean partialWrite; + private final boolean throwUnknownException; - public ThrowingFileChannel(AtomicBoolean fail, boolean partialWrite, FileChannel delegate) { + public ThrowingFileChannel(AtomicBoolean fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) { super(delegate); this.fail = fail; this.partialWrite = partialWrite; + this.throwUnknownException = throwUnknownException; } @Override @@ -1519,19 +1552,27 @@ public class TranslogTests extends ESTestCase { public int write(ByteBuffer src) throws IOException { if (fail.get()) { if (partialWrite) { - if (src.limit() > 1) { + if (src.hasRemaining()) { final int pos = src.position(); final int limit = src.limit(); - src.limit(limit / 2); + src.limit(randomIntBetween(pos, limit)); super.write(src); - src.position(pos); src.limit(limit); + src.position(pos); throw new IOException("__FAKE__ no space left on device"); } } - throw new MockDirectoryWrapper.FakeIOException(); + if (throwUnknownException) { + throw new UnknownException(); + } else { + throw new MockDirectoryWrapper.FakeIOException(); + } } return super.write(src); } } + + private static final class UnknownException extends RuntimeException { + + } } From 186242145f98be507ab2041950a8d20458ef245b Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 18 Dec 2015 10:26:57 +0100 Subject: [PATCH 151/167] [TEST] use new settings infra --- .../test/java/org/elasticsearch/gateway/QuorumGatewayIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java index 69c518eb9c6..a817b23949f 100644 --- a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java @@ -51,7 +51,7 @@ public class QuorumGatewayIT extends ESIntegTestCase { logger.info("--> starting 3 nodes"); // we are shutting down nodes - make sure we don't have 2 clusters if we test network internalCluster().startNodesAsync(3, - Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2).build()).get(); + Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2).build()).get(); createIndex("test"); From 887789d1a949ab4912251fd320b35ba4164f044c Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 15 Dec 2015 17:32:21 +0100 Subject: [PATCH 152/167] Gradle idea plugin does not properly mark resources directories --- build.gradle | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/build.gradle b/build.gradle index ab40587d5a1..3a94259db97 100644 --- a/build.gradle +++ b/build.gradle @@ -179,6 +179,30 @@ gradle.projectsEvaluated { // intellij configuration allprojects { apply plugin: 'idea' + + idea { + module { + // same as for the IntelliJ Gradle tooling integration + inheritOutputDirs = false + outputDir = file('build/classes/main') + testOutputDir = file('build/classes/test') + + iml { + // fix so that Gradle idea plugin properly generates support for resource folders + // see also https://issues.gradle.org/browse/GRADLE-2975 + withXml { + it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/main/resources' }.each { + it.attributes().remove('isTestSource') + it.attributes().put('type', 'java-resource') + } + it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/test/resources' }.each { + it.attributes().remove('isTestSource') + it.attributes().put('type', 'java-test-resource') + } + } + } + } + } } idea { From 15588a499104c73b95df831c74cab79db3f3257b Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 17 Dec 2015 17:45:10 +0000 Subject: [PATCH 153/167] Aggregations: Run pipeline aggregations for empty buckets added in the Range Aggregation Closes #15471 --- .../bucket/histogram/InternalHistogram.java | 22 +++-- .../messy/tests/BucketSelectorTests.java | 86 ++++++++++++++++++- .../messy/tests/ScriptedMetricTests.java | 7 +- 3 files changed, 103 insertions(+), 12 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index a87987452b8..faca359d766 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -391,12 +391,14 @@ public class InternalHistogram extends Inter return reducedBuckets; } - private void addEmptyBuckets(List list) { + private void addEmptyBuckets(List list, ReduceContext reduceContext) { B lastBucket = null; ExtendedBounds bounds = emptyBucketInfo.bounds; ListIterator iter = list.listIterator(); // first adding all the empty buckets *before* the actual data (based on th extended_bounds.min the user requested) + InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(Collections.singletonList(emptyBucketInfo.subAggregations), + reduceContext); if (bounds != null) { B firstBucket = iter.hasNext() ? list.get(iter.nextIndex()) : null; if (firstBucket == null) { @@ -404,7 +406,9 @@ public class InternalHistogram extends Inter long key = bounds.min; long max = bounds.max; while (key <= max) { - iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter)); + iter.add(getFactory().createBucket(key, 0, + reducedEmptySubAggs, + keyed, formatter)); key = emptyBucketInfo.rounding.nextRoundingValue(key); } } @@ -413,7 +417,9 @@ public class InternalHistogram extends Inter long key = bounds.min; if (key < firstBucket.key) { while (key < firstBucket.key) { - iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter)); + iter.add(getFactory().createBucket(key, 0, + reducedEmptySubAggs, + keyed, formatter)); key = emptyBucketInfo.rounding.nextRoundingValue(key); } } @@ -428,7 +434,9 @@ public class InternalHistogram extends Inter if (lastBucket != null) { long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key); while (key < nextBucket.key) { - iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter)); + iter.add(getFactory().createBucket(key, 0, + reducedEmptySubAggs, keyed, + formatter)); key = emptyBucketInfo.rounding.nextRoundingValue(key); } assert key == nextBucket.key; @@ -441,7 +449,9 @@ public class InternalHistogram extends Inter long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key); long max = bounds.max; while (key <= max) { - iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter)); + iter.add(getFactory().createBucket(key, 0, + reducedEmptySubAggs, keyed, + formatter)); key = emptyBucketInfo.rounding.nextRoundingValue(key); } } @@ -453,7 +463,7 @@ public class InternalHistogram extends Inter // adding empty buckets if needed if (minDocCount == 0) { - addEmptyBuckets(reducedBuckets); + addEmptyBuckets(reducedBuckets, reduceContext); } if (order == InternalOrder.KEY_ASC) { diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BucketSelectorTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BucketSelectorTests.java index 2883b74cc1d..a1faea0b5e5 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BucketSelectorTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BucketSelectorTests.java @@ -45,12 +45,14 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.having; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; @ESIntegTestCase.SuiteScopeTestCase public class BucketSelectorTests extends ESIntegTestCase { @@ -74,6 +76,7 @@ public class BucketSelectorTests extends ESIntegTestCase { public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); createIndex("idx_unmapped"); + createIndex("idx_with_gaps"); interval = randomIntBetween(1, 50); numDocs = randomIntBetween(10, 500); @@ -84,6 +87,10 @@ public class BucketSelectorTests extends ESIntegTestCase { for (int docs = 0; docs < numDocs; docs++) { builders.add(client().prepareIndex("idx", "type").setSource(newDocBuilder())); } + builders.add(client().prepareIndex("idx_with_gaps", "type").setSource(newDocBuilder(1, 1, 0, 0))); + builders.add(client().prepareIndex("idx_with_gaps", "type").setSource(newDocBuilder(1, 2, 0, 0))); + builders.add(client().prepareIndex("idx_with_gaps", "type").setSource(newDocBuilder(3, 1, 0, 0))); + builders.add(client().prepareIndex("idx_with_gaps", "type").setSource(newDocBuilder(3, 3, 0, 0))); client().preparePutIndexedScript().setId("my_script").setScriptLang(GroovyScriptEngineService.NAME) .setSource("{ \"script\": \"Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)\" }").get(); @@ -93,12 +100,17 @@ public class BucketSelectorTests extends ESIntegTestCase { } private XContentBuilder newDocBuilder() throws IOException { + return newDocBuilder(randomIntBetween(minNumber, maxNumber), randomIntBetween(minNumber, maxNumber), + randomIntBetween(minNumber, maxNumber), randomIntBetween(minNumber, maxNumber)); + } + + private XContentBuilder newDocBuilder(int field1Value, int field2Value, int field3Value, int field4Value) throws IOException { XContentBuilder jsonBuilder = jsonBuilder(); jsonBuilder.startObject(); - jsonBuilder.field(FIELD_1_NAME, randomIntBetween(minNumber, maxNumber)); - jsonBuilder.field(FIELD_2_NAME, randomIntBetween(minNumber, maxNumber)); - jsonBuilder.field(FIELD_3_NAME, randomIntBetween(minNumber, maxNumber)); - jsonBuilder.field(FIELD_4_NAME, randomIntBetween(minNumber, maxNumber)); + jsonBuilder.field(FIELD_1_NAME, field1Value); + jsonBuilder.field(FIELD_2_NAME, field2Value); + jsonBuilder.field(FIELD_3_NAME, field3Value); + jsonBuilder.field(FIELD_4_NAME, field4Value); jsonBuilder.endObject(); return jsonBuilder; } @@ -451,4 +463,70 @@ public class BucketSelectorTests extends ESIntegTestCase { assertThat(field2SumValue + field3SumValue, greaterThan(100.0)); } } + + public void testEmptyBuckets() { + SearchResponse response = client().prepareSearch("idx_with_gaps") + .addAggregation(histogram("histo").field(FIELD_1_NAME).interval(1) + .subAggregation(histogram("inner_histo").field(FIELD_1_NAME).interval(1).extendedBounds(1l, 4l).minDocCount(0) + .subAggregation(derivative("derivative").setBucketsPaths("_count").gapPolicy(GapPolicy.INSERT_ZEROS)))) + .execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("1")); + Histogram innerHisto = bucket.getAggregations().get("inner_histo"); + assertThat(innerHisto, notNullValue()); + List innerBuckets = innerHisto.getBuckets(); + assertThat(innerBuckets, notNullValue()); + assertThat(innerBuckets.size(), equalTo(4)); + for (int i = 0; i < innerBuckets.size(); i++) { + Histogram.Bucket innerBucket = innerBuckets.get(i); + if (i == 0) { + assertThat(innerBucket.getAggregations().get("derivative"), nullValue()); + } else { + assertThat(innerBucket.getAggregations().get("derivative"), notNullValue()); + } + } + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("2")); + innerHisto = bucket.getAggregations().get("inner_histo"); + assertThat(innerHisto, notNullValue()); + innerBuckets = innerHisto.getBuckets(); + assertThat(innerBuckets, notNullValue()); + assertThat(innerBuckets.size(), equalTo(4)); + for (int i = 0; i < innerBuckets.size(); i++) { + Histogram.Bucket innerBucket = innerBuckets.get(i); + if (i == 0) { + assertThat(innerBucket.getAggregations().get("derivative"), nullValue()); + } else { + assertThat(innerBucket.getAggregations().get("derivative"), notNullValue()); + } + } + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("3")); + innerHisto = bucket.getAggregations().get("inner_histo"); + assertThat(innerHisto, notNullValue()); + innerBuckets = innerHisto.getBuckets(); + assertThat(innerBuckets, notNullValue()); + assertThat(innerBuckets.size(), equalTo(4)); + for (int i = 0; i < innerBuckets.size(); i++) { + Histogram.Bucket innerBucket = innerBuckets.get(i); + if (i == 0) { + assertThat(innerBucket.getAggregations().get("derivative"), nullValue()); + } else { + assertThat(innerBucket.getAggregations().get("derivative"), notNullValue()); + } + } + } } diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ScriptedMetricTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ScriptedMetricTests.java index c54510acd4e..98d53c85174 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ScriptedMetricTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ScriptedMetricTests.java @@ -58,7 +58,6 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; @ClusterScope(scope = Scope.SUITE) @@ -739,6 +738,10 @@ public class ScriptedMetricTests extends ESIntegTestCase { ScriptedMetric scriptedMetric = bucket.getAggregations().get("scripted"); assertThat(scriptedMetric, notNullValue()); assertThat(scriptedMetric.getName(), equalTo("scripted")); - assertThat(scriptedMetric.aggregation(), nullValue()); + assertThat(scriptedMetric.aggregation(), notNullValue()); + assertThat(scriptedMetric.aggregation(), instanceOf(List.class)); + List aggregationResult = (List) scriptedMetric.aggregation(); + assertThat(aggregationResult.size(), equalTo(1)); + assertThat(aggregationResult.get(0), equalTo(0)); } } From 9d9b557cea945da771688d22992104060f9988a3 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Fri, 11 Dec 2015 18:33:59 -0500 Subject: [PATCH 154/167] Don't allow nodes with missing custom data types to join cluster --- .../elasticsearch/cluster/ClusterState.java | 2 +- .../cluster/metadata/MetaData.java | 2 +- .../discovery/zen/ZenDiscovery.java | 15 ++- .../zen/membership/MembershipAction.java | 30 ++++-- .../discovery/zen/ZenDiscoveryIT.java | 68 ++++++++++-- .../DedicatedClusterSnapshotRestoreIT.java | 82 +------------- .../test/TestCustomMetaData.java | 102 ++++++++++++++++++ 7 files changed, 196 insertions(+), 105 deletions(-) create mode 100644 test/framework/src/main/java/org/elasticsearch/test/TestCustomMetaData.java diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index e20f21b4cec..34ccfd3b433 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -129,7 +129,7 @@ public class ClusterState implements ToXContent, Diffable { @SuppressWarnings("unchecked") T proto = (T)customPrototypes.get(type); if (proto == null) { - throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "]"); + throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "], node likely missing plugins"); } return proto; } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index c84f5b3690c..55cb8a5d944 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -134,7 +134,7 @@ public class MetaData implements Iterable, Diffable, Fr //noinspection unchecked T proto = (T) customPrototypes.get(type); if (proto == null) { - throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); + throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "], node likely missing plugins"); } return proto; } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index d69227d8f0b..8849a849f97 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -836,8 +836,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } } - void handleJoinRequest(final DiscoveryNode node, final MembershipAction.JoinCallback callback) { - + void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final MembershipAction.JoinCallback callback) { if (!transportService.addressSupported(node.address().getClass())) { // TODO, what should we do now? Maybe inform that node that its crap? logger.warn("received a wrong address type from [{}], ignoring...", node); @@ -849,7 +848,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen // Sanity check: maybe we don't end up here, because serialization may have failed. if (node.getVersion().before(minimumNodeJoinVersion)) { callback.onFailure( - new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]") + new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]") ); return; } @@ -859,7 +858,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen // validate the join request, will throw a failure if it fails, which will get back to the // node calling the join request - membership.sendValidateJoinRequestBlocking(node, joinTimeout); + try { + membership.sendValidateJoinRequestBlocking(node, state, joinTimeout); + } catch (Throwable e) { + logger.warn("failed to validate incoming join request from node [{}]", node); + callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e)); + return; + } nodeJoinController.handleJoinRequest(node, callback); } } @@ -1039,7 +1044,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen private class MembershipListener implements MembershipAction.MembershipListener { @Override public void onJoin(DiscoveryNode node, MembershipAction.JoinCallback callback) { - handleJoinRequest(node, callback); + handleJoinRequest(node, clusterService.state(), callback); } @Override diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java index 4260b992ddb..5a96addc842 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.discovery.zen.membership; import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; @@ -88,10 +89,6 @@ public class MembershipAction extends AbstractComponent { transportService.submitRequest(masterNode, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(timeout.millis(), TimeUnit.MILLISECONDS); } - public void sendJoinRequest(DiscoveryNode masterNode, DiscoveryNode node) { - transportService.sendRequest(masterNode, DISCOVERY_JOIN_ACTION_NAME, new JoinRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME); - } - public void sendJoinRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) { transportService.submitRequest(masterNode, DISCOVERY_JOIN_ACTION_NAME, new JoinRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME) .txGet(timeout.millis(), TimeUnit.MILLISECONDS); @@ -100,8 +97,8 @@ public class MembershipAction extends AbstractComponent { /** * Validates the join request, throwing a failure if it failed. */ - public void sendValidateJoinRequestBlocking(DiscoveryNode node, TimeValue timeout) { - transportService.submitRequest(node, DISCOVERY_JOIN_VALIDATE_ACTION_NAME, new ValidateJoinRequest(), EmptyTransportResponseHandler.INSTANCE_SAME) + public void sendValidateJoinRequestBlocking(DiscoveryNode node, ClusterState state, TimeValue timeout) { + transportService.submitRequest(node, DISCOVERY_JOIN_VALIDATE_ACTION_NAME, new ValidateJoinRequest(state), EmptyTransportResponseHandler.INSTANCE_SAME) .txGet(timeout.millis(), TimeUnit.MILLISECONDS); } @@ -156,9 +153,26 @@ public class MembershipAction extends AbstractComponent { } } - public static class ValidateJoinRequest extends TransportRequest { + class ValidateJoinRequest extends TransportRequest { + private ClusterState state; - public ValidateJoinRequest() { + ValidateJoinRequest() { + } + + ValidateJoinRequest(ClusterState state) { + this.state = state; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.state = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + this.state.writeTo(out); } } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index 3b6708630ee..217e86526cc 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -24,10 +24,8 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Priority; @@ -45,6 +43,7 @@ import org.elasticsearch.discovery.zen.fd.FaultDetection; import org.elasticsearch.discovery.zen.membership.MembershipAction; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.TestCustomMetaData; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BytesTransportRequest; @@ -57,9 +56,7 @@ import org.hamcrest.Matchers; import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; +import java.util.*; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicReference; @@ -228,16 +225,69 @@ public class ZenDiscoveryIT extends ESIntegTestCase { assertThat(ExceptionsHelper.detailedMessage(reference.get()), containsString("cluster state from a different master than the current one, rejecting")); } + public void testHandleNodeJoin_incompatibleClusterState() throws UnknownHostException { + Settings nodeSettings = Settings.settingsBuilder() + .put("discovery.type", "zen") // <-- To override the local setting if set externally + .build(); + String masterOnlyNode = internalCluster().startMasterOnlyNode(nodeSettings); + String node1 = internalCluster().startNode(nodeSettings); + ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, masterOnlyNode); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node1); + final ClusterState state = clusterService.state(); + MetaData.Builder mdBuilder = MetaData.builder(state.metaData()); + mdBuilder.putCustom(CustomMetaData.TYPE, new CustomMetaData("data")); + ClusterState stateWithCustomMetaData = ClusterState.builder(state).metaData(mdBuilder).build(); + + final AtomicReference holder = new AtomicReference<>(); + DiscoveryNode node = state.nodes().localNode(); + zenDiscovery.handleJoinRequest(node, stateWithCustomMetaData, new MembershipAction.JoinCallback() { + @Override + public void onSuccess() { + } + + @Override + public void onFailure(Throwable t) { + holder.set((IllegalStateException) t); + } + }); + + assertThat(holder.get(), notNullValue()); + assertThat(holder.get().getMessage(), equalTo("failure when sending a validation request to node")); + } + + public static class CustomMetaData extends TestCustomMetaData { + public static final String TYPE = "custom_md"; + + CustomMetaData(String data) { + super(data); + } + + @Override + protected TestCustomMetaData newTestCustomMetaData(String data) { + return new CustomMetaData(data); + } + + @Override + public String type() { + return TYPE; + } + + @Override + public EnumSet context() { + return EnumSet.of(MetaData.XContentContext.GATEWAY, MetaData.XContentContext.SNAPSHOT); + } + } + public void testHandleNodeJoin_incompatibleMinVersion() throws UnknownHostException { Settings nodeSettings = Settings.settingsBuilder() .put("discovery.type", "zen") // <-- To override the local setting if set externally .build(); String nodeName = internalCluster().startNode(nodeSettings, Version.V_2_0_0_beta1); ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, nodeName); - + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName); DiscoveryNode node = new DiscoveryNode("_node_id", new InetSocketTransportAddress(InetAddress.getByName("0.0.0.0"), 0), Version.V_1_6_0); final AtomicReference holder = new AtomicReference<>(); - zenDiscovery.handleJoinRequest(node, new MembershipAction.JoinCallback() { + zenDiscovery.handleJoinRequest(node, clusterService.state(), new MembershipAction.JoinCallback() { @Override public void onSuccess() { } diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 9133828c070..7946116f571 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -22,7 +22,6 @@ package org.elasticsearch.snapshots; import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ListenableActionFuture; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -33,24 +32,17 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.index.store.IndexStore; @@ -68,9 +60,9 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.TestCustomMetaData; import org.elasticsearch.test.rest.FakeRestRequest; -import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; @@ -902,78 +894,6 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest )); } - public static abstract class TestCustomMetaData extends AbstractDiffable implements MetaData.Custom { - private final String data; - - protected TestCustomMetaData(String data) { - this.data = data; - } - - public String getData() { - return data; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - TestCustomMetaData that = (TestCustomMetaData) o; - - if (!data.equals(that.data)) return false; - - return true; - } - - @Override - public int hashCode() { - return data.hashCode(); - } - - protected abstract TestCustomMetaData newTestCustomMetaData(String data); - - @Override - public Custom readFrom(StreamInput in) throws IOException { - return newTestCustomMetaData(in.readString()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(getData()); - } - - @Override - public Custom fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token; - String data = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String currentFieldName = parser.currentName(); - if ("data".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, invalid data type"); - } - data = parser.text(); - } else { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, unknown field [{}]", currentFieldName); - } - } else { - throw new ElasticsearchParseException("failed to parse snapshottable metadata"); - } - } - if (data == null) { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, data not found"); - } - return newTestCustomMetaData(data); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.field("data", getData()); - return builder; - } - } - static { MetaData.registerPrototype(SnapshottableMetadata.TYPE, SnapshottableMetadata.PROTO); diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCustomMetaData.java b/test/framework/src/main/java/org/elasticsearch/test/TestCustomMetaData.java new file mode 100644 index 00000000000..92d5b95cfac --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCustomMetaData.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public abstract class TestCustomMetaData extends AbstractDiffable implements MetaData.Custom { + private final String data; + + protected TestCustomMetaData(String data) { + this.data = data; + } + + public String getData() { + return data; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TestCustomMetaData that = (TestCustomMetaData) o; + + if (!data.equals(that.data)) return false; + + return true; + } + + @Override + public int hashCode() { + return data.hashCode(); + } + + protected abstract TestCustomMetaData newTestCustomMetaData(String data); + + @Override + public MetaData.Custom readFrom(StreamInput in) throws IOException { + return newTestCustomMetaData(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(getData()); + } + + @Override + public MetaData.Custom fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + String data = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if ("data".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new ElasticsearchParseException("failed to parse snapshottable metadata, invalid data type"); + } + data = parser.text(); + } else { + throw new ElasticsearchParseException("failed to parse snapshottable metadata, unknown field [{}]", currentFieldName); + } + } else { + throw new ElasticsearchParseException("failed to parse snapshottable metadata"); + } + } + if (data == null) { + throw new ElasticsearchParseException("failed to parse snapshottable metadata, data not found"); + } + return newTestCustomMetaData(data); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("data", getData()); + return builder; + } +} From 94d6b221372810b3d188bc9c6d51ab0302d98f4b Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 18 Dec 2015 11:43:58 -0500 Subject: [PATCH 155/167] add gradle licenseHeaders to precommit This is a port of the logic from apache lucene that uses Rat --- build.gradle | 5 +- .../precommit/LicenseHeadersTask.groovy | 156 ++++++++++++++++++ .../gradle/precommit/PrecommitTasks.groovy | 1 + .../precommit/ThirdPartyAuditTask.groovy | 2 +- 4 files changed, 161 insertions(+), 3 deletions(-) create mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy diff --git a/build.gradle b/build.gradle index ab40587d5a1..204986cea04 100644 --- a/build.gradle +++ b/build.gradle @@ -128,10 +128,11 @@ subprojects { // Only if your buildscript and Ant's optional task need the same library would you have to define it twice. // https://docs.gradle.org/current/userguide/organizing_build_logic.html configurations { - forbiddenApis + buildTools } dependencies { - forbiddenApis 'de.thetaphi:forbiddenapis:2.0' + buildTools 'de.thetaphi:forbiddenapis:2.0' + buildTools 'org.apache.rat:apache-rat:0.11' } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy new file mode 100644 index 00000000000..7c57fd23c23 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.precommit + +import java.nio.file.Files + +import org.gradle.api.DefaultTask +import org.gradle.api.tasks.SourceSet +import org.gradle.api.tasks.TaskAction + +import groovy.xml.NamespaceBuilder +import groovy.xml.NamespaceBuilderSupport + +/** + * Checks files for license headers. + *

+ * This is a port of the apache lucene check + */ +public class LicenseHeadersTask extends DefaultTask { + + LicenseHeadersTask() { + description = "Checks sources for missing, incorrect, or unacceptable license headers" + } + + @TaskAction + public void check() { + // load rat tasks + AntBuilder ant = new AntBuilder() + ant.typedef(resource: "org/apache/rat/anttasks/antlib.xml", + uri: "antlib:org.apache.rat.anttasks", + classpath: project.configurations.buildTools.asPath) + NamespaceBuilderSupport rat = NamespaceBuilder.newInstance(ant, "antlib:org.apache.rat.anttasks") + + // create a file for the log to go to under reports/ + File reportDir = new File(project.buildDir, "reports/licenseHeaders") + reportDir.mkdirs() + File reportFile = new File(reportDir, "rat.log") + Files.deleteIfExists(reportFile.toPath()) + + // run rat, going to the file + rat.report(reportFile: reportFile.absolutePath, addDefaultLicenseMatchers: true) { + // checks all the java sources (allJava) + for (SourceSet set : project.sourceSets) { + for (File dir : set.allJava.srcDirs) { + ant.fileset(dir: dir) + } + } + + // BSD 4-clause stuff (is disallowed below) + substringMatcher(licenseFamilyCategory: "BSD4 ", + licenseFamilyName: "Original BSD License (with advertising clause)") { + pattern(substring: "All advertising materials") + } + + // BSD-like stuff + substringMatcher(licenseFamilyCategory: "BSD ", + licenseFamilyName: "Modified BSD License") { + // brics automaton + pattern(substring: "Copyright (c) 2001-2009 Anders Moeller") + // snowball + pattern(substring: "Copyright (c) 2001, Dr Martin Porter") + // UMASS kstem + pattern(substring: "THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF MASSACHUSETTS AND OTHER CONTRIBUTORS") + // Egothor + pattern(substring: "Egothor Software License version 1.00") + // JaSpell + pattern(substring: "Copyright (c) 2005 Bruno Martins") + // d3.js + pattern(substring: "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS") + // highlight.js + pattern(substring: "THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS") + } + + // MIT-like + substringMatcher(licenseFamilyCategory: "MIT ", + licenseFamilyName: "The MIT License") { + // ICU license + pattern(substring: "Permission is hereby granted, free of charge, to any person obtaining a copy") + } + + // Apache + substringMatcher(licenseFamilyCategory: "AL ", + licenseFamilyName: "Apache") { + // Apache license (ES) + pattern(substring: "Licensed to Elasticsearch under one or more contributor") + // Apache license (ASF) + pattern(substring: "Licensed to the Apache Software Foundation (ASF) under") + // this is the old-school one under some files + pattern(substring: "Licensed under the Apache License, Version 2.0 (the \"License\")") + } + + // Generated resources + substringMatcher(licenseFamilyCategory: "GEN ", + licenseFamilyName: "Generated") { + // svg files generated by gnuplot + pattern(substring: "Produced by GNUPLOT") + // snowball stemmers generated by snowball compiler + pattern(substring: "This file was generated automatically by the Snowball to Java compiler") + // uima tests generated by JCasGen + pattern(substring: "First created by JCasGen") + // parsers generated by antlr + pattern(substring: "ANTLR GENERATED CODE") + } + + // approved categories + approvedLicense(familyName: "Apache") + approvedLicense(familyName: "The MIT License") + approvedLicense(familyName: "Modified BSD License") + approvedLicense(familyName: "Generated") + } + + // check the license file for any errors, this should be fast. + boolean zeroUnknownLicenses = false + boolean foundProblemsWithFiles = false + reportFile.eachLine('UTF-8') { line -> + if (line.startsWith("0 Unknown Licenses")) { + zeroUnknownLicenses = true + } + + if (line.startsWith(" !")) { + foundProblemsWithFiles = true + } + } + + if (zeroUnknownLicenses == false || foundProblemsWithFiles) { + // print the unapproved license section, usually its all you need to fix problems. + int sectionNumber = 0 + reportFile.eachLine('UTF-8') { line -> + if (line.startsWith("*******************************")) { + sectionNumber++ + } else { + if (sectionNumber == 2) { + logger.error(line) + } + } + } + throw new IllegalStateException("License header problems were found! Full details: " + reportFile.absolutePath) + } + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index ef2a49cc444..f99032e1e2d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -34,6 +34,7 @@ class PrecommitTasks { List precommitTasks = [ configureForbiddenApis(project), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), + project.tasks.create('licenseHeaders', LicenseHeadersTask.class), project.tasks.create('jarHell', JarHellTask.class), project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class)] diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy index 814f9c1e60b..45ab2300449 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy @@ -114,7 +114,7 @@ public class ThirdPartyAuditTask extends DefaultTask { ant.taskdef(name: "thirdPartyAudit", classname: "de.thetaphi.forbiddenapis.ant.AntTask", - classpath: project.configurations.forbiddenApis.asPath) + classpath: project.configurations.buildTools.asPath) // print which jars we are going to scan, always // this is not the time to try to be succinct! Forbidden will print plenty on its own! From e82808917c98c9ea2f3c8f9c337e5ec3558673d1 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 18 Dec 2015 11:58:18 -0500 Subject: [PATCH 156/167] don't fail on missing source dirs --- .../elasticsearch/gradle/precommit/LicenseHeadersTask.groovy | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy index 7c57fd23c23..49687d933e3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy @@ -58,7 +58,10 @@ public class LicenseHeadersTask extends DefaultTask { // checks all the java sources (allJava) for (SourceSet set : project.sourceSets) { for (File dir : set.allJava.srcDirs) { - ant.fileset(dir: dir) + // sometimes these dirs don't exist, e.g. site-plugin has no actual java src/main... + if (dir.exists()) { + ant.fileset(dir: dir) + } } } From c4f823903a5afecb20227f5f000696c093e4f163 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 18 Dec 2015 12:00:52 -0500 Subject: [PATCH 157/167] fix indent --- .../elasticsearch/gradle/precommit/LicenseHeadersTask.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy index 49687d933e3..e680a546f5b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy @@ -155,5 +155,5 @@ public class LicenseHeadersTask extends DefaultTask { } throw new IllegalStateException("License header problems were found! Full details: " + reportFile.absolutePath) } - } + } } From 2e2e328879fd712d4b291dff22dd0c6bb8d2ec70 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 18 Dec 2015 13:02:39 -0500 Subject: [PATCH 158/167] add missing license header --- .../mapper/attachments/TikaImplTests.java | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaImplTests.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaImplTests.java index fc17d59603f..f42110c1e62 100644 --- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaImplTests.java +++ b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaImplTests.java @@ -1,5 +1,24 @@ package org.elasticsearch.mapper.attachments; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + import org.elasticsearch.test.ESTestCase; public class TikaImplTests extends ESTestCase { From 447729f0e18186a775ce5f8c279f9ce1b292d2c0 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 18 Dec 2015 13:08:17 -0500 Subject: [PATCH 159/167] add missing license headers --- .../plugin/hadoop/hdfs/Utils.java | 21 ++++++++++++++++++- .../hadoop/hdfs/HdfsRepositoryRestIT.java | 19 +++++++++++++++++ .../plugin/hadoop/hdfs/HdfsTestPlugin.java | 19 +++++++++++++++++ .../plugin/hadoop/hdfs/UtilsTests.java | 19 +++++++++++++++++ 4 files changed, 77 insertions(+), 1 deletion(-) diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/Utils.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/Utils.java index 101025d029e..cf786179787 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/Utils.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/Utils.java @@ -1,5 +1,24 @@ package org.elasticsearch.plugin.hadoop.hdfs; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + import java.net.URL; import java.security.AccessControlContext; import java.security.AccessController; @@ -81,4 +100,4 @@ public abstract class Utils { return base; } -} \ No newline at end of file +} diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsRepositoryRestIT.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsRepositoryRestIT.java index fd87e18cbce..8d8d6755cc3 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsRepositoryRestIT.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsRepositoryRestIT.java @@ -1,5 +1,24 @@ package org.elasticsearch.plugin.hadoop.hdfs; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + import java.io.IOException; import java.util.Collection; diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTestPlugin.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTestPlugin.java index 4b4e2aa05ef..8730a46a084 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTestPlugin.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTestPlugin.java @@ -1,5 +1,24 @@ package org.elasticsearch.plugin.hadoop.hdfs; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + import java.net.URL; import java.util.Collections; import java.util.List; diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/UtilsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/UtilsTests.java index 2f492eee343..37aecb04b9b 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/UtilsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/UtilsTests.java @@ -1,5 +1,24 @@ package org.elasticsearch.plugin.hadoop.hdfs; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + import org.elasticsearch.test.ESTestCase; public class UtilsTests extends ESTestCase { From 6ba374d6326f59b801c01698c3ec9f5a64d20ac0 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 18 Dec 2015 13:15:34 -0500 Subject: [PATCH 160/167] add missing license headers --- .../pipelining/HttpPipeliningHandler.java | 22 +++++++++++++++++++ .../OrderedDownstreamChannelEvent.java | 22 +++++++++++++++++++ .../OrderedUpstreamMessageEvent.java | 22 +++++++++++++++++++ 3 files changed, 66 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java b/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java index 10008c76a54..4bcbf4079c0 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java +++ b/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java @@ -1,5 +1,27 @@ package org.elasticsearch.http.netty.pipelining; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// this file is from netty-http-pipelining, under apache 2.0 license +// see github.com/typesafehub/netty-http-pipelining + import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.jboss.netty.channel.*; diff --git a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java index 6b713a08020..622a3e6ac9f 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java +++ b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java @@ -1,5 +1,27 @@ package org.elasticsearch.http.netty.pipelining; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// this file is from netty-http-pipelining, under apache 2.0 license +// see github.com/typesafehub/netty-http-pipelining + import org.jboss.netty.channel.*; /** diff --git a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java index 7343b29b6c5..cc47b5be320 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java +++ b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java @@ -1,5 +1,27 @@ package org.elasticsearch.http.netty.pipelining; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// this file is from netty-http-pipelining, under apache 2.0 license +// see github.com/typesafehub/netty-http-pipelining + import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.UpstreamMessageEvent; From 266bece2fe7cfbaf66ef91cf635b464fbf403189 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 18 Dec 2015 13:19:09 -0500 Subject: [PATCH 161/167] add missing license headers --- .../profile/InternalProfileShardResults.java | 19 +++++++++++++++++++ .../master/IndexingMasterFailoverIT.java | 19 +++++++++++++++++++ .../cluster/routing/PrimaryAllocationIT.java | 19 +++++++++++++++++++ .../allocation/ActiveAllocationIdTests.java | 19 +++++++++++++++++++ 4 files changed, 76 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java index 2ab3b632119..e6052ff5095 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java +++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java @@ -1,5 +1,24 @@ package org.elasticsearch.search.profile; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; diff --git a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java index 6946e35861c..b0c13f851a6 100644 --- a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java +++ b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java @@ -1,5 +1,24 @@ package org.elasticsearch.action.support.master; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index dcd35303b75..340fdcc3c99 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -1,5 +1,24 @@ package org.elasticsearch.cluster.routing; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayAllocator; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java index 7a7f4722e97..2e54512b95f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java @@ -1,5 +1,24 @@ package org.elasticsearch.cluster.routing.allocation; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; From 56e4752d28a42834ef7e3faccd85b5c4259296e7 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Fri, 18 Dec 2015 08:19:40 +0100 Subject: [PATCH 162/167] Align handling of interrupts in BulkProcessor With this commit we implement a cancellation policy in BulkProcessor which is aligned for the sync and the async case and also document it. Closes #14833. --- .../elasticsearch/action/bulk/BulkProcessor.java | 3 +++ .../action/bulk/BulkRequestHandler.java | 16 +++++++++++----- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index af5af80ac2f..43014cfb759 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -61,6 +61,9 @@ public class BulkProcessor implements Closeable { /** * Callback after a failed execution of bulk request. + * + * Note that in case an instance of InterruptedException is passed, which means that request processing has been + * cancelled externally, the thread's interruption status has been restored prior to calling this method. */ void afterBulk(long executionId, BulkRequest request, Throwable failure); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java index ffc985bd510..dc98a16c578 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java @@ -74,11 +74,17 @@ abstract class BulkRequestHandler { .withSyncBackoff(client, bulkRequest); afterCalled = true; listener.afterBulk(executionId, bulkRequest, bulkResponse); - } catch (Exception e) { + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.info("Bulk request {} has been cancelled.", e, executionId); if (!afterCalled) { - logger.warn("Failed to executed bulk request {}.", e, executionId); listener.afterBulk(executionId, bulkRequest, e); } + } catch (Throwable t) { + logger.warn("Failed to execute bulk request {}.", t, executionId); + if (!afterCalled) { + listener.afterBulk(executionId, bulkRequest, t); + } } } @@ -135,11 +141,11 @@ abstract class BulkRequestHandler { }); bulkRequestSetupSuccessful = true; } catch (InterruptedException e) { - // This is intentionally wrong to avoid changing the behaviour implicitly with this PR. It will be fixed in #14833 - Thread.interrupted(); + Thread.currentThread().interrupt(); + logger.info("Bulk request {} has been cancelled.", e, executionId); listener.afterBulk(executionId, bulkRequest, e); } catch (Throwable t) { - logger.warn("Failed to executed bulk request {}.", t, executionId); + logger.warn("Failed to execute bulk request {}.", t, executionId); listener.afterBulk(executionId, bulkRequest, t); } finally { if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore From e51904fa002c6a8489c1bbfd9e325fa6e10e02bc Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Fri, 18 Dec 2015 07:52:33 +0100 Subject: [PATCH 163/167] Document usage of backoff policy in BulkProcessor With this commit we update the documentation to explain the new backoff feature in BulkProcessor. Relates to #14620. --- docs/java-api/docs/bulk.asciidoc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/java-api/docs/bulk.asciidoc b/docs/java-api/docs/bulk.asciidoc index 6890f7c49d3..248326700c4 100644 --- a/docs/java-api/docs/bulk.asciidoc +++ b/docs/java-api/docs/bulk.asciidoc @@ -47,6 +47,7 @@ To use it, first create a `BulkProcessor` instance: [source,java] -------------------------------------------------- +import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkProcessor; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -73,6 +74,8 @@ BulkProcessor bulkProcessor = BulkProcessor.builder( .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) <6> .setFlushInterval(TimeValue.timeValueSeconds(5)) <7> .setConcurrentRequests(1) <8> + .setBackoffPolicy( + BackoffPolicy.exponentialBackoff(TimeValue.timeValueMillis(100), 3)) <9> .build(); -------------------------------------------------- <1> Add your elasticsearch client @@ -86,6 +89,10 @@ BulkProcessor bulkProcessor = BulkProcessor.builder( <7> We want to flush the bulk every 5 seconds whatever the number of requests <8> Set the number of concurrent requests. A value of 0 means that only a single request will be allowed to be executed. A value of 1 means 1 concurrent request is allowed to be executed while accumulating new bulk requests. +<9> Set a custom backoff policy which will initially wait for 100ms, increase exponentially and retries up to three + times. A retry is attempted whenever one or more bulk item requests have failed with an `EsRejectedExecutionException` + which indicates that there were too little compute resources available for processing the request. To disable backoff, + pass `BackoffPolicy.noBackoff()`. Then you can simply add your requests to the `BulkProcessor`: @@ -101,6 +108,7 @@ By default, `BulkProcessor`: * sets bulkSize to `5mb` * does not set flushInterval * sets concurrentRequests to 1 +* sets backoffPolicy to an exponential backoff with 8 retries and a start delay of 50ms. The total wait time is roughly 5.1 seconds. When all documents are loaded to the `BulkProcessor` it can be closed by using `awaitClose` or `close` methods: From 2ce54640f58942d6c78aa25fc9eebc4f5afbd85a Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 18 Dec 2015 13:46:59 -0500 Subject: [PATCH 164/167] Remove unnecessary license categories/matchers --- .../precommit/LicenseHeadersTask.groovy | 35 +------------------ 1 file changed, 1 insertion(+), 34 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy index e680a546f5b..580c089461a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy @@ -66,37 +66,12 @@ public class LicenseHeadersTask extends DefaultTask { } // BSD 4-clause stuff (is disallowed below) + // we keep this here, in case someone adds BSD code for some reason, it should never be allowed. substringMatcher(licenseFamilyCategory: "BSD4 ", licenseFamilyName: "Original BSD License (with advertising clause)") { pattern(substring: "All advertising materials") } - // BSD-like stuff - substringMatcher(licenseFamilyCategory: "BSD ", - licenseFamilyName: "Modified BSD License") { - // brics automaton - pattern(substring: "Copyright (c) 2001-2009 Anders Moeller") - // snowball - pattern(substring: "Copyright (c) 2001, Dr Martin Porter") - // UMASS kstem - pattern(substring: "THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF MASSACHUSETTS AND OTHER CONTRIBUTORS") - // Egothor - pattern(substring: "Egothor Software License version 1.00") - // JaSpell - pattern(substring: "Copyright (c) 2005 Bruno Martins") - // d3.js - pattern(substring: "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS") - // highlight.js - pattern(substring: "THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS") - } - - // MIT-like - substringMatcher(licenseFamilyCategory: "MIT ", - licenseFamilyName: "The MIT License") { - // ICU license - pattern(substring: "Permission is hereby granted, free of charge, to any person obtaining a copy") - } - // Apache substringMatcher(licenseFamilyCategory: "AL ", licenseFamilyName: "Apache") { @@ -111,20 +86,12 @@ public class LicenseHeadersTask extends DefaultTask { // Generated resources substringMatcher(licenseFamilyCategory: "GEN ", licenseFamilyName: "Generated") { - // svg files generated by gnuplot - pattern(substring: "Produced by GNUPLOT") - // snowball stemmers generated by snowball compiler - pattern(substring: "This file was generated automatically by the Snowball to Java compiler") - // uima tests generated by JCasGen - pattern(substring: "First created by JCasGen") // parsers generated by antlr pattern(substring: "ANTLR GENERATED CODE") } // approved categories approvedLicense(familyName: "Apache") - approvedLicense(familyName: "The MIT License") - approvedLicense(familyName: "Modified BSD License") approvedLicense(familyName: "Generated") } From 9f1dfdbaeaf7bdcd7aa4bb0679d5bb4d8dd096fb Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 18 Dec 2015 10:59:07 -0800 Subject: [PATCH 165/167] Build: Add AntTask to simplify controlling logging when running ant from gradle This new task allows setting code, similar to a doLast or doFirst, except it is specifically geared at running ant (and thus called doAnt). It adjusts the ant logging while running the ant so that the log level/behavior can be tweaked, and automatically buffers based on gradle logging level, and dumps the ant output upon failure. --- build.gradle | 11 -- buildSrc/build.gradle | 1 + .../org/elasticsearch/gradle/AntTask.groovy | 111 ++++++++++++++++++ .../precommit/LicenseHeadersTask.groovy | 31 +++-- .../precommit/ThirdPartyAuditTask.groovy | 73 ++++++------ 5 files changed, 166 insertions(+), 61 deletions(-) create mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy diff --git a/build.gradle b/build.gradle index 3d4067c1703..c31fe88f5d2 100644 --- a/build.gradle +++ b/build.gradle @@ -123,17 +123,6 @@ subprojects { } } } - // For reasons we don't fully understand yet, external dependencies are not picked up by Ant's optional tasks. - // But you can easily do it in another way. - // Only if your buildscript and Ant's optional task need the same library would you have to define it twice. - // https://docs.gradle.org/current/userguide/organizing_build_logic.html - configurations { - buildTools - } - dependencies { - buildTools 'de.thetaphi:forbiddenapis:2.0' - buildTools 'org.apache.rat:apache-rat:0.11' - } } // Ensure similar tasks in dependent projects run first. The projectsEvaluated here is diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index e46f9cb33c0..a0f06343d30 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -63,6 +63,7 @@ dependencies { compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE.... compile 'de.thetaphi:forbiddenapis:2.0' compile 'com.bmuschko:gradle-nexus-plugin:2.3.1' + compile 'org.apache.rat:apache-rat:0.11' } processResources { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy new file mode 100644 index 00000000000..1b904150077 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle + +import org.apache.tools.ant.BuildException +import org.apache.tools.ant.BuildListener +import org.apache.tools.ant.BuildLogger +import org.apache.tools.ant.DefaultLogger +import org.apache.tools.ant.Project +import org.gradle.api.DefaultTask +import org.gradle.api.GradleException +import org.gradle.api.tasks.Input +import org.gradle.api.tasks.Optional +import org.gradle.api.tasks.TaskAction + +import java.nio.charset.Charset + +/** + * A task which will run ant commands. + * + * Logging for the task is customizable for subclasses by overriding makeLogger. + */ +public class AntTask extends DefaultTask { + + /** + * A buffer that will contain the output of the ant code run, + * if the output was not already written directly to stdout. + */ + public final ByteArrayOutputStream outputBuffer = new ByteArrayOutputStream() + + @TaskAction + final void executeTask() { + // capture the current loggers + List savedLoggers = new ArrayList<>(); + for (BuildListener l : project.ant.project.getBuildListeners()) { + if (l instanceof BuildLogger) { + savedLoggers.add(l); + } + } + // remove them + for (BuildLogger l : savedLoggers) { + project.ant.project.removeBuildListener(l) + } + + final int outputLevel = logger.isDebugEnabled() ? Project.MSG_DEBUG : Project.MSG_INFO + final PrintStream stream = useStdout() ? System.out : new PrintStream(outputBuffer, true, Charset.defaultCharset().name()) + BuildLogger antLogger = makeLogger(stream, outputLevel) + + // now run the command with just our logger + project.ant.project.addBuildListener(antLogger) + try { + runAnt(project.ant) + } catch (BuildException e) { + // ant failed, so see if we have buffered output to emit, then rethrow the failure + String buffer = outputBuffer.toString() + if (buffer.isEmpty() == false) { + logger.error("=== Ant output ===\n${buffer}") + } + throw e + } finally { + project.ant.project.removeBuildListener(antLogger) + // add back the old loggers before returning + for (BuildLogger l : savedLoggers) { + project.ant.project.addBuildListener(l) + } + } + } + + /** Runs the doAnt closure. This can be overridden by subclasses instead of having to set a closure. */ + protected void runAnt(AntBuilder ant) { + if (doAnt == null) { + throw new GradleException("Missing doAnt for ${name}") + } + doAnt(ant) + } + + /** Create the logger the ant runner will use, with the given stream for error/output. */ + protected BuildLogger makeLogger(PrintStream stream, int outputLevel) { + return new DefaultLogger( + errorPrintStream: stream, + outputPrintStream: stream, + messageOutputLevel: outputLevel) + } + + /** + * Returns true if the ant logger should write to stdout, or false if to the buffer. + * The default implementation writes to the buffer when gradle info logging is disabled. + */ + protected boolean useStdout() { + return logger.isInfoEnabled() + } + + +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy index 580c089461a..de36af886b8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy @@ -18,34 +18,33 @@ */ package org.elasticsearch.gradle.precommit -import java.nio.file.Files - -import org.gradle.api.DefaultTask +import org.apache.rat.anttasks.Report +import org.apache.rat.anttasks.SubstringLicenseMatcher +import org.apache.rat.license.SimpleLicenseFamily +import org.elasticsearch.gradle.AntTask import org.gradle.api.tasks.SourceSet -import org.gradle.api.tasks.TaskAction -import groovy.xml.NamespaceBuilder -import groovy.xml.NamespaceBuilderSupport +import java.nio.file.Files /** * Checks files for license headers. *

* This is a port of the apache lucene check */ -public class LicenseHeadersTask extends DefaultTask { +public class LicenseHeadersTask extends AntTask { LicenseHeadersTask() { description = "Checks sources for missing, incorrect, or unacceptable license headers" + + if (ant.project.taskDefinitions.contains('ratReport') == false) { + ant.project.addTaskDefinition('ratReport', Report) + ant.project.addDataTypeDefinition('substringMatcher', SubstringLicenseMatcher) + ant.project.addDataTypeDefinition('approvedLicense', SimpleLicenseFamily) + } } - @TaskAction - public void check() { - // load rat tasks - AntBuilder ant = new AntBuilder() - ant.typedef(resource: "org/apache/rat/anttasks/antlib.xml", - uri: "antlib:org.apache.rat.anttasks", - classpath: project.configurations.buildTools.asPath) - NamespaceBuilderSupport rat = NamespaceBuilder.newInstance(ant, "antlib:org.apache.rat.anttasks") + @Override + protected void runAnt(AntBuilder ant) { // create a file for the log to go to under reports/ File reportDir = new File(project.buildDir, "reports/licenseHeaders") @@ -54,7 +53,7 @@ public class LicenseHeadersTask extends DefaultTask { Files.deleteIfExists(reportFile.toPath()) // run rat, going to the file - rat.report(reportFile: reportFile.absolutePath, addDefaultLicenseMatchers: true) { + ant.ratReport(reportFile: reportFile.absolutePath, addDefaultLicenseMatchers: true) { // checks all the java sources (allJava) for (SourceSet set : project.sourceSets) { for (File dir : set.allJava.srcDirs) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy index 45ab2300449..9ff348f20bb 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy @@ -18,6 +18,10 @@ */ package org.elasticsearch.gradle.precommit +import org.apache.tools.ant.DefaultLogger +import org.elasticsearch.gradle.AntTask +import org.gradle.api.artifacts.Configuration + import java.nio.file.Files import java.nio.file.FileVisitResult import java.nio.file.Path @@ -35,7 +39,7 @@ import org.apache.tools.ant.Project /** * Basic static checking to keep tabs on third party JARs */ -public class ThirdPartyAuditTask extends DefaultTask { +public class ThirdPartyAuditTask extends AntTask { // true to be lenient about MISSING CLASSES private boolean missingClasses; @@ -46,6 +50,10 @@ public class ThirdPartyAuditTask extends DefaultTask { ThirdPartyAuditTask() { dependsOn(project.configurations.testCompile) description = "Checks third party JAR bytecode for missing classes, use of internal APIs, and other horrors'" + + if (ant.project.taskDefinitions.contains('thirdPartyAudit') == false) { + ant.project.addTaskDefinition('thirdPartyAudit', de.thetaphi.forbiddenapis.ant.AntTask) + } } /** @@ -84,38 +92,35 @@ public class ThirdPartyAuditTask extends DefaultTask { return excludes; } - @TaskAction - public void check() { - AntBuilder ant = new AntBuilder() + @Override + protected BuildLogger makeLogger(PrintStream stream, int outputLevel) { + return new DefaultLogger( + errorPrintStream: stream, + outputPrintStream: stream, + // ignore passed in outputLevel for now, until we are filtering warning messages + messageOutputLevel: Project.MSG_ERR) + } - // we are noisy for many reasons, working around performance problems with forbidden-apis, dealing - // with warnings about missing classes, etc. so we use our own "quiet" AntBuilder - ant.project.buildListeners.each { listener -> - if (listener instanceof BuildLogger) { - listener.messageOutputLevel = Project.MSG_ERR; - } - }; - + @Override + protected void runAnt(AntBuilder ant) { // we only want third party dependencies. - FileCollection jars = project.configurations.testCompile.fileCollection({ dependency -> + FileCollection jars = project.configurations.testCompile.fileCollection({ dependency -> dependency.group.startsWith("org.elasticsearch") == false }) - + // we don't want provided dependencies, which we have already scanned. e.g. don't // scan ES core's dependencies for every single plugin - try { - jars -= project.configurations.getByName("provided") - } catch (UnknownConfigurationException ignored) {} - + Configuration provided = project.configurations.findByName('provided') + if (provided != null) { + jars -= provided + } + // no dependencies matched, we are done if (jars.isEmpty()) { return; } - - ant.taskdef(name: "thirdPartyAudit", - classname: "de.thetaphi.forbiddenapis.ant.AntTask", - classpath: project.configurations.buildTools.asPath) - + + // print which jars we are going to scan, always // this is not the time to try to be succinct! Forbidden will print plenty on its own! Set names = new HashSet<>() @@ -123,26 +128,26 @@ public class ThirdPartyAuditTask extends DefaultTask { names.add(jar.getName()) } logger.error("[thirdPartyAudit] Scanning: " + names) - + // warn that classes are missing // TODO: move these to excludes list! if (missingClasses) { logger.warn("[thirdPartyAudit] WARNING: CLASSES ARE MISSING! Expect NoClassDefFoundError in bug reports from users!") } - - // TODO: forbidden-apis + zipfileset gives O(n^2) behavior unless we dump to a tmpdir first, + + // TODO: forbidden-apis + zipfileset gives O(n^2) behavior unless we dump to a tmpdir first, // and then remove our temp dir afterwards. don't complain: try it yourself. // we don't use gradle temp dir handling, just google it, or try it yourself. - + File tmpDir = new File(project.buildDir, 'tmp/thirdPartyAudit') - + // clean up any previous mess (if we failed), then unzip everything to one directory ant.delete(dir: tmpDir.getAbsolutePath()) tmpDir.mkdirs() for (File jar : jars) { ant.unzip(src: jar.getAbsolutePath(), dest: tmpDir.getAbsolutePath()) } - + // convert exclusion class names to binary file names String[] excludedFiles = new String[excludes.length]; for (int i = 0; i < excludes.length; i++) { @@ -152,12 +157,12 @@ public class ThirdPartyAuditTask extends DefaultTask { throw new IllegalStateException("bogus thirdPartyAudit exclusion: '" + excludes[i] + "', not found in any dependency") } } - + // jarHellReprise checkSheistyClasses(tmpDir.toPath(), new HashSet<>(Arrays.asList(excludedFiles))); - - ant.thirdPartyAudit(internalRuntimeForbidden: true, - failOnUnsupportedJava: false, + + ant.thirdPartyAudit(internalRuntimeForbidden: true, + failOnUnsupportedJava: false, failOnMissingClasses: !missingClasses, classpath: project.configurations.testCompile.asPath) { fileset(dir: tmpDir, excludes: excludedFiles.join(',')) @@ -169,7 +174,7 @@ public class ThirdPartyAuditTask extends DefaultTask { /** * check for sheisty classes: if they also exist in the extensions classloader, its jar hell with the jdk! */ - private void checkSheistyClasses(Path root, Set excluded) { + protected void checkSheistyClasses(Path root, Set excluded) { // system.parent = extensions loader. // note: for jigsaw, this evilness will need modifications (e.g. use jrt filesystem!). // but groovy/gradle needs to work at all first! From 5b9bf8e73862461d9a1c9fcdf9fd8e7f4af7bf3b Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 18 Dec 2015 12:01:54 -0800 Subject: [PATCH 166/167] Make a new ant builder per AntTask invocation --- .../org/elasticsearch/gradle/AntTask.groovy | 28 ++++++++----------- .../precommit/LicenseHeadersTask.groovy | 9 ++---- .../precommit/ThirdPartyAuditTask.groovy | 19 ++++--------- 3 files changed, 20 insertions(+), 36 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy index 1b904150077..7cd9b5b91a7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy @@ -47,26 +47,26 @@ public class AntTask extends DefaultTask { @TaskAction final void executeTask() { - // capture the current loggers - List savedLoggers = new ArrayList<>(); - for (BuildListener l : project.ant.project.getBuildListeners()) { + AntBuilder ant = new AntBuilder() + + // remove existing loggers, we add our own + List toRemove = new ArrayList<>(); + for (BuildListener l : ant.project.getBuildListeners()) { if (l instanceof BuildLogger) { - savedLoggers.add(l); + toRemove.add(l); } } - // remove them - for (BuildLogger l : savedLoggers) { - project.ant.project.removeBuildListener(l) + for (BuildLogger l : toRemove) { + ant.project.removeBuildListener(l) } - final int outputLevel = logger.isDebugEnabled() ? Project.MSG_DEBUG : Project.MSG_INFO + final int outputLevel = logger.isDebugEnabled() ? Project.MSG_DEBUG : (logger.isInfoEnabled() ? Project.MSG_INFO : Project.MSG_WARN) final PrintStream stream = useStdout() ? System.out : new PrintStream(outputBuffer, true, Charset.defaultCharset().name()) BuildLogger antLogger = makeLogger(stream, outputLevel) - // now run the command with just our logger - project.ant.project.addBuildListener(antLogger) + ant.project.addBuildListener(antLogger) try { - runAnt(project.ant) + runAnt(ant) } catch (BuildException e) { // ant failed, so see if we have buffered output to emit, then rethrow the failure String buffer = outputBuffer.toString() @@ -74,12 +74,6 @@ public class AntTask extends DefaultTask { logger.error("=== Ant output ===\n${buffer}") } throw e - } finally { - project.ant.project.removeBuildListener(antLogger) - // add back the old loggers before returning - for (BuildLogger l : savedLoggers) { - project.ant.project.addBuildListener(l) - } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy index de36af886b8..39cf55c905b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy @@ -35,16 +35,13 @@ public class LicenseHeadersTask extends AntTask { LicenseHeadersTask() { description = "Checks sources for missing, incorrect, or unacceptable license headers" - - if (ant.project.taskDefinitions.contains('ratReport') == false) { - ant.project.addTaskDefinition('ratReport', Report) - ant.project.addDataTypeDefinition('substringMatcher', SubstringLicenseMatcher) - ant.project.addDataTypeDefinition('approvedLicense', SimpleLicenseFamily) - } } @Override protected void runAnt(AntBuilder ant) { + ant.project.addTaskDefinition('ratReport', Report) + ant.project.addDataTypeDefinition('substringMatcher', SubstringLicenseMatcher) + ant.project.addDataTypeDefinition('approvedLicense', SimpleLicenseFamily) // create a file for the log to go to under reports/ File reportDir = new File(project.buildDir, "reports/licenseHeaders") diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy index 9ff348f20bb..69db1e773e8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy @@ -18,24 +18,19 @@ */ package org.elasticsearch.gradle.precommit +import org.apache.tools.ant.BuildLogger import org.apache.tools.ant.DefaultLogger +import org.apache.tools.ant.Project import org.elasticsearch.gradle.AntTask import org.gradle.api.artifacts.Configuration +import org.gradle.api.file.FileCollection -import java.nio.file.Files import java.nio.file.FileVisitResult +import java.nio.file.Files import java.nio.file.Path import java.nio.file.SimpleFileVisitor import java.nio.file.attribute.BasicFileAttributes -import org.gradle.api.DefaultTask -import org.gradle.api.artifacts.UnknownConfigurationException -import org.gradle.api.file.FileCollection -import org.gradle.api.tasks.TaskAction - -import org.apache.tools.ant.BuildLogger -import org.apache.tools.ant.Project - /** * Basic static checking to keep tabs on third party JARs */ @@ -50,10 +45,6 @@ public class ThirdPartyAuditTask extends AntTask { ThirdPartyAuditTask() { dependsOn(project.configurations.testCompile) description = "Checks third party JAR bytecode for missing classes, use of internal APIs, and other horrors'" - - if (ant.project.taskDefinitions.contains('thirdPartyAudit') == false) { - ant.project.addTaskDefinition('thirdPartyAudit', de.thetaphi.forbiddenapis.ant.AntTask) - } } /** @@ -103,6 +94,8 @@ public class ThirdPartyAuditTask extends AntTask { @Override protected void runAnt(AntBuilder ant) { + ant.project.addTaskDefinition('thirdPartyAudit', de.thetaphi.forbiddenapis.ant.AntTask) + // we only want third party dependencies. FileCollection jars = project.configurations.testCompile.fileCollection({ dependency -> dependency.group.startsWith("org.elasticsearch") == false From 44edac0081edf8c889e9d7cb2ef50785056bac20 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 18 Dec 2015 12:07:30 -0800 Subject: [PATCH 167/167] use better variable name for build listener, and change access back to private for internal method of thirdPartyAudit --- .../main/groovy/org/elasticsearch/gradle/AntTask.groovy | 8 ++++---- .../gradle/precommit/ThirdPartyAuditTask.groovy | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy index 7cd9b5b91a7..b713c00ed8d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy @@ -51,13 +51,13 @@ public class AntTask extends DefaultTask { // remove existing loggers, we add our own List toRemove = new ArrayList<>(); - for (BuildListener l : ant.project.getBuildListeners()) { + for (BuildListener listener : ant.project.getBuildListeners()) { if (l instanceof BuildLogger) { - toRemove.add(l); + toRemove.add(listener); } } - for (BuildLogger l : toRemove) { - ant.project.removeBuildListener(l) + for (BuildLogger listener : toRemove) { + ant.project.removeBuildListener(listener) } final int outputLevel = logger.isDebugEnabled() ? Project.MSG_DEBUG : (logger.isInfoEnabled() ? Project.MSG_INFO : Project.MSG_WARN) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy index 69db1e773e8..2ee4c29d614 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy @@ -167,7 +167,7 @@ public class ThirdPartyAuditTask extends AntTask { /** * check for sheisty classes: if they also exist in the extensions classloader, its jar hell with the jdk! */ - protected void checkSheistyClasses(Path root, Set excluded) { + private void checkSheistyClasses(Path root, Set excluded) { // system.parent = extensions loader. // note: for jigsaw, this evilness will need modifications (e.g. use jrt filesystem!). // but groovy/gradle needs to work at all first!