From 2051817780a32c23bc7f15c8f10ef20c560e51ad Mon Sep 17 00:00:00 2001 From: gmarz Date: Mon, 8 Feb 2016 13:25:50 -0500 Subject: [PATCH 01/22] Set meta data for pipeline aggregations Closes #16484 --- .../elasticsearch/search/aggregations/AggregatorParsers.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java index f38138f2aa3..9fb9762a22d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java @@ -232,6 +232,9 @@ public class AggregatorParsers { pipelineAggregatorFactory .validate(null, factories.getAggregatorFactories(), factories.getPipelineAggregatorFactories()); } + if (metaData != null) { + pipelineAggregatorFactory.setMetaData(metaData); + } factories.addPipelineAggregator(pipelineAggregatorFactory); } } From bb61cdb330b549c23650c3722a91cc7ee7f5a478 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 28 Jan 2016 17:40:29 -0500 Subject: [PATCH 02/22] Name eclipse projects to gradle path This groups like projects together which is nice. It creates two weirdly named projects: 1. buildSrc - its still just called buildSrc and it doesn't match. I don't see why we import it into Eclipse anyway. Its groovy and easier to just edit in vim or whatever. 2. elasticsearch - this is the name of the root project. It's also not particularly useful to import into eclipse but we've always named it this way and the name ':' was even more confusing so we just kept the name. --- build.gradle | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/build.gradle b/build.gradle index c31fe88f5d2..77d29cab637 100644 --- a/build.gradle +++ b/build.gradle @@ -220,6 +220,11 @@ tasks.idea.dependsOn(buildSrcIdea) // eclipse configuration allprojects { apply plugin: 'eclipse' + // Name all the non-root projects after their path so that paths get grouped together when imported into eclipse. + if (path != ':') { + System.err.println(eclipse.project.name + ' ' + path) + eclipse.project.name = path + } plugins.withType(JavaBasePlugin) { eclipse.classpath.defaultOutputDir = new File(project.buildDir, 'eclipse') From 2adce71d329d2c281c274c3e4ec975b9e6aab449 Mon Sep 17 00:00:00 2001 From: gmarz Date: Wed, 10 Feb 2016 17:52:57 -0500 Subject: [PATCH 03/22] Fix MetricsAggregationBuilder missing the ability to set meta data --- .../metrics/MetricsAggregationBuilder.java | 17 +++- .../search/aggregations/MetaDataIT.java | 86 ++++++++++--------- 2 files changed, 63 insertions(+), 40 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregationBuilder.java index 56ae24bbd73..33c9a404a50 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregationBuilder.java @@ -23,19 +23,34 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import java.io.IOException; +import java.util.Map; /** * Base builder for metrics aggregations. */ public abstract class MetricsAggregationBuilder> extends AbstractAggregationBuilder { + private Map metaData; + public MetricsAggregationBuilder(String name, String type) { super(name, type); } + /** + * Sets the meta data to be included in the metric aggregator's response + */ + public B setMetaData(Map metaData) { + this.metaData = metaData; + return (B) this; + } + @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(getName()).startObject(type); + builder.startObject(getName()); + if (this.metaData != null) { + builder.field("meta", this.metaData); + } + builder.startObject(type); internalXContent(builder, params); return builder.endObject().endObject(); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java index ee19f14293a..43ae33d6b2e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java @@ -19,56 +19,35 @@ package org.elasticsearch.search.aggregations; -import com.carrotsearch.hppc.IntIntHashMap; -import com.carrotsearch.hppc.IntIntMap; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.search.aggregations.bucket.missing.Missing; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; import org.elasticsearch.test.ESIntegTestCase; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.search.aggregations.AggregationBuilders.missing; +import static org.elasticsearch.search.aggregations.AggregationBuilders.*; +import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.maxBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.CoreMatchers.equalTo; -/** - * - */ + public class MetaDataIT extends ESIntegTestCase { - /** - * Making sure that if there are multiple aggregations, working on the same field, yet require different - * value source type, they can all still work. It used to fail as we used to cache the ValueSource by the - * field name. If the cached value source was of type "bytes" and another aggregation on the field required to see - * it as "numeric", it didn't work. Now we cache the Value Sources by a custom key (field name + ValueSource type) - * so there's no conflict there. - */ public void testMetaDataSetOnAggregationResult() throws Exception { - createIndex("idx"); IndexRequestBuilder[] builders = new IndexRequestBuilder[randomInt(30)]; - IntIntMap values = new IntIntHashMap(); - long missingValues = 0; for (int i = 0; i < builders.length; i++) { String name = "name_" + randomIntBetween(1, 10); - if (rarely()) { - missingValues++; - builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder() - .startObject() - .field("name", name) - .endObject()); - } else { - int value = randomIntBetween(1, 10); - values.put(value, values.getOrDefault(value, 0) + 1); - builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder() - .startObject() - .field("name", name) - .field("value", value) - .endObject()); - } + builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder() + .startObject() + .field("name", name) + .field("value", randomInt()) + .endObject()); } indexRandom(true, builders); ensureSearchable(); @@ -77,7 +56,7 @@ public class MetaDataIT extends ESIntegTestCase { put("nested", "value"); }}; - Map missingValueMetaData = new HashMap() {{ + Map metaData = new HashMap() {{ put("key", "value"); put("numeric", 1.2); put("bool", true); @@ -85,7 +64,21 @@ public class MetaDataIT extends ESIntegTestCase { }}; SearchResponse response = client().prepareSearch("idx") - .addAggregation(missing("missing_values").field("value").setMetaData(missingValueMetaData)) + .addAggregation( + terms("the_terms") + .setMetaData(metaData) + .field("name") + .subAggregation( + sum("the_sum") + .setMetaData(metaData) + .field("value") + ) + ) + .addAggregation( + maxBucket("the_max_bucket") + .setMetaData(metaData) + .setBucketsPaths("the_terms>the_sum") + ) .execute().actionGet(); assertSearchResponse(response); @@ -93,11 +86,26 @@ public class MetaDataIT extends ESIntegTestCase { Aggregations aggs = response.getAggregations(); assertNotNull(aggs); - Missing missing = aggs.get("missing_values"); - assertNotNull(missing); - assertThat(missing.getDocCount(), equalTo(missingValues)); + Terms terms = aggs.get("the_terms"); + assertNotNull(terms); + assertMetaData(terms.getMetaData()); - Map returnedMetaData = missing.getMetaData(); + List buckets = terms.getBuckets(); + for (Terms.Bucket bucket : buckets) { + Aggregations subAggs = bucket.getAggregations(); + assertNotNull(subAggs); + + Sum sum = subAggs.get("the_sum"); + assertNotNull(sum); + assertMetaData(sum.getMetaData()); + } + + InternalBucketMetricValue maxBucket = aggs.get("the_max_bucket"); + assertNotNull(maxBucket); + assertMetaData(maxBucket.getMetaData()); + } + + private void assertMetaData(Map returnedMetaData) { assertNotNull(returnedMetaData); assertEquals(4, returnedMetaData.size()); assertEquals("value", returnedMetaData.get("key")); From 70db1b3c44cb72d1b3d0ff7353ef008ad7673181 Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Tue, 9 Feb 2016 12:36:24 -0800 Subject: [PATCH 04/22] Fix typos in error messages and comments of dev_tools module. --- dev-tools/build_randomization.rb | 8 ++++---- dev-tools/prepare_release_candidate.py | 2 +- dev-tools/smoke_test_rc.py | 2 +- dev-tools/upgrade-tests.py | 2 +- dev-tools/validate-maven-repository.py | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/dev-tools/build_randomization.rb b/dev-tools/build_randomization.rb index 57559921266..4e10e5889d5 100644 --- a/dev-tools/build_randomization.rb +++ b/dev-tools/build_randomization.rb @@ -22,11 +22,11 @@ # build_randomization.rb [-d] [-l|t] # # DESCRIPTION -# This script takes the randomization choices described in RANDOM_CHOICE and generates apporpriate JAVA property file 'prop.txt' +# This script takes the randomization choices described in RANDOM_CHOICE and generates appropriate JAVA property file 'prop.txt' # This property file also contain the appropriate JDK selection, randomized. JDK randomization is based on what is available on the Jenkins tools # directory. This script is used by Jenkins test system to conduct Elasticsearch server randomization testing. # -# In hash RANDOM_CHOISES, the key of randomization hash maps to key of java property. The value of the hash describes the possible value of the randomization +# In hash RANDOM_CHOICES, the key of randomization hash maps to key of java property. The value of the hash describes the possible value of the randomization # # For example RANDOM_CHOICES = { 'es.node.mode' => {:choices => ['local', 'network'], :method => :get_random_one} } means # es.node.mode will be set to either 'local' or 'network', each with 50% of probability @@ -36,7 +36,7 @@ # # -d, --debug Increase logging verbosity for debugging purpose # -t, --test Run in test mode. The script will execute unit tests. -# -l, --local Run in local mode. In this mode, directory structure will be created under current directory to mimick +# -l, --local Run in local mode. In this mode, directory structure will be created under current directory to mimic # Jenkins' server directory layout. This mode is mainly used for development. require 'enumerator' require 'getoptlong' @@ -70,7 +70,7 @@ C = {:local => false, :test => false} OptionParser.new do |opts| - opts.banner = "Usage: build_ranodimzatin.rb [options]" + opts.banner = "Usage: build_randomization.rb [options]" opts.on("-d", "--debug", "Debug mode") do |d| L.level = DEBUG diff --git a/dev-tools/prepare_release_candidate.py b/dev-tools/prepare_release_candidate.py index 544ec72d876..450106a3bc9 100644 --- a/dev-tools/prepare_release_candidate.py +++ b/dev-tools/prepare_release_candidate.py @@ -260,7 +260,7 @@ if __name__ == "__main__": parser.add_argument('--check', dest='check', action='store_true', help='Checks and reports for all requirements and then exits') - # by default, we only run mvn install and dont push anything repo + # by default, we only run mvn install and don't push anything repo parser.set_defaults(deploy_sonatype=False) parser.set_defaults(deploy_s3=False) parser.set_defaults(deploy_s3_repos=False) diff --git a/dev-tools/smoke_test_rc.py b/dev-tools/smoke_test_rc.py index 0ad2d4075ec..b9049549d9a 100644 --- a/dev-tools/smoke_test_rc.py +++ b/dev-tools/smoke_test_rc.py @@ -223,7 +223,7 @@ def smoke_test_release(release, files, expected_hash, plugins): node_plugins = node['plugins'] for node_plugin in node_plugins: if not plugin_names.get(node_plugin['name'].strip(), False): - raise RuntimeError('Unexpeced plugin %s' % node_plugin['name']) + raise RuntimeError('Unexpected plugin %s' % node_plugin['name']) del plugin_names[node_plugin['name']] if plugin_names: raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys())) diff --git a/dev-tools/upgrade-tests.py b/dev-tools/upgrade-tests.py index 69bf1cff573..69e3c6a3bbd 100644 --- a/dev-tools/upgrade-tests.py +++ b/dev-tools/upgrade-tests.py @@ -37,7 +37,7 @@ except ImportError as e: '''This file executes a basic upgrade test by running a full cluster restart. -The upgrade test starts 2 or more nodes of an old elasticserach version, indexes +The upgrade test starts 2 or more nodes of an old elasticsearch version, indexes a random number of documents into the running nodes and executes a full cluster restart. After the nodes are recovered a small set of basic checks are executed to ensure all documents are still searchable and field data can be loaded etc. diff --git a/dev-tools/validate-maven-repository.py b/dev-tools/validate-maven-repository.py index cd457e118e9..6bf84a3a185 100644 --- a/dev-tools/validate-maven-repository.py +++ b/dev-tools/validate-maven-repository.py @@ -77,7 +77,7 @@ if __name__ == "__main__": for root, dirs, files in os.walk(localMavenRepo): for file in files: # no metadata files (they get renamed from maven-metadata-local.xml to maven-metadata.xml while deploying) - # no .properties and .repositories files (they dont get uploaded) + # no .properties and .repositories files (they don't get uploaded) if not file.startswith('maven-metadata') and not file.endswith('.properties') and not file.endswith('.repositories'): filesToCheck.append(os.path.join(root, file)) if file.endswith('.asc'): @@ -123,7 +123,7 @@ if __name__ == "__main__": print if len(errors) != 0: - print 'The following errors occured (%s out of %s files)' % (len(errors), len(filesToCheck)) + print 'The following errors occurred (%s out of %s files)' % (len(errors), len(filesToCheck)) print for error in errors: print error From 7835525f454bc08038ffe50bda4866e076bb89d6 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 11 Feb 2016 12:02:21 +0100 Subject: [PATCH 05/22] Move IndicesQueryCache and IndicesRequestCache into IndicesService this is a minor cleanup that detaches `IndicesRequestCache` and `IndicesQueryCache` from guice and moves it into `IndicesService`. It also decouples the `IndexShard` and `IndexService` from these caches which are unnecessary dependencies. --- .../stats/TransportClusterStatsAction.java | 2 +- .../TransportClearIndicesCacheAction.java | 9 ++---- .../admin/indices/stats/CommonStats.java | 9 ++++-- .../stats/TransportIndicesStatsAction.java | 2 +- .../index/CompositeIndexEventListener.java | 12 ++++++++ .../org/elasticsearch/index/IndexModule.java | 4 +-- .../org/elasticsearch/index/IndexService.java | 3 +- .../index/NodeServicesProvider.java | 9 +----- .../index/shard/IndexEventListener.java | 8 ++++++ .../elasticsearch/index/shard/IndexShard.java | 8 ------ .../elasticsearch/indices/IndicesModule.java | 4 --- .../elasticsearch/indices/IndicesService.java | 27 ++++++++++++++++-- .../cache/query/IndicesQueryCache.java | 1 - .../cache/request/IndicesRequestCache.java | 28 +++++++------------ .../java/org/elasticsearch/node/Node.java | 3 -- .../elasticsearch/search/SearchService.java | 5 ++-- .../elasticsearch/index/IndexModuleTests.java | 24 ++++++++-------- .../index/shard/IndexShardTests.java | 2 +- .../search/MockSearchService.java | 5 ++-- 19 files changed, 87 insertions(+), 78 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 3fc2f4b631e..0c883ccb377 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -105,7 +105,7 @@ public class TransportClusterStatsAction extends TransportNodesAction { private final IndicesService indicesService; - private final IndicesRequestCache indicesRequestCache; @Inject public TransportClearIndicesCacheAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, IndicesService indicesService, - IndicesRequestCache indicesQueryCache, ActionFilters actionFilters, + TransportService transportService, IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, ClearIndicesCacheRequest::new, ThreadPool.Names.MANAGEMENT); this.indicesService = indicesService; - this.indicesRequestCache = indicesQueryCache; } @Override @@ -101,7 +98,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc } if (request.requestCache()) { clearedAtLeastOne = true; - indicesRequestCache.clear(shard); + indicesService.getIndicesRequestCache().clear(shard); } if (request.recycler()) { logger.debug("Clear CacheRecycler on index [{}]", service.index()); @@ -117,7 +114,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc } else { service.cache().clear("api"); service.fieldData().clear(); - indicesRequestCache.clear(shard); + indicesService.getIndicesRequestCache().clear(shard); } } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index 85644e8523e..47fb8d8356a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.cache.request.RequestCacheStats; import org.elasticsearch.index.engine.SegmentsStats; @@ -40,10 +41,13 @@ import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.index.suggest.stats.SuggestStats; import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.index.warmer.WarmerStats; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.search.suggest.completion.CompletionStats; import java.io.IOException; @@ -122,7 +126,8 @@ public class CommonStats implements Streamable, ToXContent { } - public CommonStats(IndexShard indexShard, CommonStatsFlags flags) { + public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) { + CommonStatsFlags.Flag[] setFlags = flags.getFlags(); for (CommonStatsFlags.Flag flag : setFlags) { @@ -155,7 +160,7 @@ public class CommonStats implements Streamable, ToXContent { warmer = indexShard.warmerStats(); break; case QueryCache: - queryCache = indexShard.queryCacheStats(); + queryCache = indicesQueryCache.getStats(indexShard.shardId()); break; case FieldData: fieldData = indexShard.fieldDataStats(flags.fieldDataFields()); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index d5de67da478..4bed5f918a4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -162,6 +162,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction< flags.set(CommonStatsFlags.Flag.Recovery); } - return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indexShard, flags), indexShard.commitStats()); + return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats()); } } diff --git a/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index 1ee1f1cc4a9..acc0d3f8370 100644 --- a/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -258,4 +258,16 @@ final class CompositeIndexEventListener implements IndexEventListener { } } } + + @Override + public void onStoreClosed(ShardId shardId) { + for (IndexEventListener listener : listeners) { + try { + listener.onStoreClosed(shardId); + } catch (Throwable t) { + logger.warn("failed to invoke on store closed", t); + throw t; + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/index/IndexModule.java b/core/src/main/java/org/elasticsearch/index/IndexModule.java index b45f29f752a..f23441fa908 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/core/src/main/java/org/elasticsearch/index/IndexModule.java @@ -241,7 +241,7 @@ public final class IndexModule { IndexSearcherWrapper newWrapper(final IndexService indexService); } - public IndexService newIndexService(NodeEnvironment environment, IndexService.ShardStoreDeleter shardStoreDeleter, NodeServicesProvider servicesProvider, MapperRegistry mapperRegistry, IndicesFieldDataCache indicesFieldDataCache, + public IndexService newIndexService(NodeEnvironment environment, IndexService.ShardStoreDeleter shardStoreDeleter, NodeServicesProvider servicesProvider, IndicesQueryCache indicesQueryCache, MapperRegistry mapperRegistry, IndicesFieldDataCache indicesFieldDataCache, IndexingOperationListener... listeners) throws IOException { IndexSearcherWrapperFactory searcherWrapperFactory = indexSearcherWrapper.get() == null ? (shard) -> null : indexSearcherWrapper.get(); IndexEventListener eventListener = freeze(); @@ -263,7 +263,7 @@ public final class IndexModule { indexSettings.getScopedSettings().addSettingsUpdateConsumer(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, store::setMaxRate); final String queryCacheType = indexSettings.getValue(INDEX_QUERY_CACHE_TYPE_SETTING); final BiFunction queryCacheProvider = queryCaches.get(queryCacheType); - final QueryCache queryCache = queryCacheProvider.apply(indexSettings, servicesProvider.getIndicesQueryCache()); + final QueryCache queryCache = queryCacheProvider.apply(indexSettings, indicesQueryCache); return new IndexService(indexSettings, environment, new SimilarityService(indexSettings, similarities), shardStoreDeleter, analysisRegistry, engineFactory.get(), servicesProvider, queryCache, store, eventListener, searcherWrapperFactory, mapperRegistry, indicesFieldDataCache, listeners); } diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index 9ffb6c4d56c..ffac5d2a21b 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -321,8 +321,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC warmer.warm(searcher, shard, IndexService.this.indexSettings, toLevel); } }; - - store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> nodeServicesProvider.getIndicesQueryCache().onClose(shardId))); + store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> eventListener.onStoreClosed(shardId))); if (useShadowEngine(primary, indexSettings)) { indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, searchSlowLog, engineWarmer); // no indexing listeners - shadow engines don't index } else { diff --git a/core/src/main/java/org/elasticsearch/index/NodeServicesProvider.java b/core/src/main/java/org/elasticsearch/index/NodeServicesProvider.java index 4bb25214708..fa245352ae7 100644 --- a/core/src/main/java/org/elasticsearch/index/NodeServicesProvider.java +++ b/core/src/main/java/org/elasticsearch/index/NodeServicesProvider.java @@ -23,7 +23,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; @@ -36,7 +35,6 @@ import org.elasticsearch.threadpool.ThreadPool; public final class NodeServicesProvider { private final ThreadPool threadPool; - private final IndicesQueryCache indicesQueryCache; private final BigArrays bigArrays; private final Client client; private final IndicesQueriesRegistry indicesQueriesRegistry; @@ -44,9 +42,8 @@ public final class NodeServicesProvider { private final CircuitBreakerService circuitBreakerService; @Inject - public NodeServicesProvider(ThreadPool threadPool, IndicesQueryCache indicesQueryCache, BigArrays bigArrays, Client client, ScriptService scriptService, IndicesQueriesRegistry indicesQueriesRegistry, CircuitBreakerService circuitBreakerService) { + public NodeServicesProvider(ThreadPool threadPool, BigArrays bigArrays, Client client, ScriptService scriptService, IndicesQueriesRegistry indicesQueriesRegistry, CircuitBreakerService circuitBreakerService) { this.threadPool = threadPool; - this.indicesQueryCache = indicesQueryCache; this.bigArrays = bigArrays; this.client = client; this.indicesQueriesRegistry = indicesQueriesRegistry; @@ -58,10 +55,6 @@ public final class NodeServicesProvider { return threadPool; } - public IndicesQueryCache getIndicesQueryCache() { - return indicesQueryCache; - } - public BigArrays getBigArrays() { return bigArrays; } public Client getClient() { diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java b/core/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java index 8d3523a18b1..f5c6dca7d2f 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java @@ -177,4 +177,12 @@ public interface IndexEventListener { */ default void beforeIndexAddedToCluster(Index index, Settings indexSettings) { } + + /** + * Called when the given shards store is closed. The store is closed once all resource have been released on the store. + * This implies that all index readers are closed and no recoveries are running. + * + * @param shardId the shard ID the store belongs to + */ + default void onStoreClosed(ShardId shardId) {} } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index f2f1add2594..0f47eec6b25 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -58,7 +58,6 @@ import org.elasticsearch.index.SearchSlowLog; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.cache.bitset.ShardBitsetFilterCache; -import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.cache.request.ShardRequestCache; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.CommitStats; @@ -105,7 +104,6 @@ import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.index.warmer.ShardIndexWarmerService; import org.elasticsearch.index.warmer.WarmerStats; import org.elasticsearch.indices.IndexingMemoryController; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.percolator.PercolatorService; @@ -154,7 +152,6 @@ public class IndexShard extends AbstractIndexShardComponent { private final SimilarityService similarityService; private final EngineConfig engineConfig; private final TranslogConfig translogConfig; - private final IndicesQueryCache indicesQueryCache; private final IndexEventListener indexEventListener; private final IndexSettings idxSettings; @@ -227,7 +224,6 @@ public class IndexShard extends AbstractIndexShardComponent { this.getService = new ShardGetService(indexSettings, this, mapperService); this.searchService = new ShardSearchStats(slowLog); this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings); - this.indicesQueryCache = provider.getIndicesQueryCache(); this.shardQueryCache = new ShardRequestCache(shardId, indexSettings); this.shardFieldData = new ShardFieldData(); this.indexFieldDataService = indexFieldDataService; @@ -652,10 +648,6 @@ public class IndexShard extends AbstractIndexShardComponent { return shardWarmerService.stats(); } - public QueryCacheStats queryCacheStats() { - return indicesQueryCache.getStats(shardId); - } - public FieldDataStats fieldDataStats(String... fields) { return shardFieldData.stats(fields); } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index f14d708c0f9..b94ef19ec23 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -55,8 +55,6 @@ import org.elasticsearch.index.mapper.internal.VersionFieldMapper; import org.elasticsearch.index.mapper.ip.IpFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.percolator.PercolatorFieldMapper; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; -import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.mapper.MapperRegistry; @@ -160,8 +158,6 @@ public class IndicesModule extends AbstractModule { bind(IndicesStore.class).asEagerSingleton(); bind(IndicesClusterStateService.class).asEagerSingleton(); bind(SyncedFlushService.class).asEagerSingleton(); - bind(IndicesQueryCache.class).asEagerSingleton(); - bind(IndicesRequestCache.class).asEagerSingleton(); bind(TransportNodesListShardStoreMetaData.class).asEagerSingleton(); bind(IndicesTTLService.class).asEagerSingleton(); bind(UpdateHelper.class).asEagerSingleton(); diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index f092789ae30..7b2bc89e646 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -71,6 +71,8 @@ import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry; @@ -124,6 +126,8 @@ public class IndicesService extends AbstractLifecycleComponent i private final MapperRegistry mapperRegistry; private final IndexingMemoryController indexingMemoryController; private final TimeValue cleanInterval; + private final IndicesRequestCache indicesRequestCache; + private final IndicesQueryCache indicesQueryCache; @Override protected void doStart() { @@ -146,6 +150,8 @@ public class IndicesService extends AbstractLifecycleComponent i this.indicesQueriesRegistry = indicesQueriesRegistry; this.clusterService = clusterService; this.indexNameExpressionResolver = indexNameExpressionResolver; + this.indicesRequestCache = new IndicesRequestCache(settings, threadPool); + this.indicesQueryCache = new IndicesQueryCache(settings); this.mapperRegistry = mapperRegistry; clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, indexStoreConfig::setRateLimitingType); clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, indexStoreConfig::setRateLimitingThrottle); @@ -196,7 +202,7 @@ public class IndicesService extends AbstractLifecycleComponent i @Override protected void doClose() { - IOUtils.closeWhileHandlingException(analysisRegistry, indexingMemoryController, indicesFieldDataCache, fieldDataCacheCleaner); + IOUtils.closeWhileHandlingException(analysisRegistry, indexingMemoryController, indicesFieldDataCache, fieldDataCacheCleaner, indicesRequestCache, indicesQueryCache); } /** @@ -247,7 +253,7 @@ public class IndicesService extends AbstractLifecycleComponent i if (indexShard.routingEntry() == null) { continue; } - IndexShardStats indexShardStats = new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indexShard, flags), indexShard.commitStats()) }); + IndexShardStats indexShardStats = new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesQueryCache, indexShard, flags), indexShard.commitStats()) }); if (!statsByShard.containsKey(indexService.index())) { statsByShard.put(indexService.index(), arrayAsArrayList(indexShardStats)); } else { @@ -348,10 +354,17 @@ public class IndicesService extends AbstractLifecycleComponent i for (IndexEventListener listener : builtInListeners) { indexModule.addIndexEventListener(listener); } + final IndexEventListener onStoreClose = new IndexEventListener() { + @Override + public void onStoreClosed(ShardId shardId) { + indicesQueryCache.onClose(shardId); + } + }; + indexModule.addIndexEventListener(onStoreClose); indexModule.addIndexEventListener(oldShardsStats); final IndexEventListener listener = indexModule.freeze(); listener.beforeIndexCreated(index, idxSettings.getSettings()); - final IndexService indexService = indexModule.newIndexService(nodeEnv, this, nodeServicesProvider, mapperRegistry, indicesFieldDataCache, indexingMemoryController); + final IndexService indexService = indexModule.newIndexService(nodeEnv, this, nodeServicesProvider, indicesQueryCache, mapperRegistry, indicesFieldDataCache, indexingMemoryController); boolean success = false; try { assert indexService.getIndexEventListener() == listener; @@ -420,6 +433,14 @@ public class IndicesService extends AbstractLifecycleComponent i return circuitBreakerService; } + public IndicesRequestCache getIndicesRequestCache() { + return indicesRequestCache; + } + + public IndicesQueryCache getIndicesQueryCache() { + return indicesQueryCache; + } + static class OldShardsStats implements IndexEventListener { final SearchStats searchStats = new SearchStats(); diff --git a/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java b/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java index 58c2cd5a953..718f4db9c4e 100644 --- a/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java +++ b/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java @@ -64,7 +64,6 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache, // See onDocIdSetEviction for more info private final Map stats2 = new IdentityHashMap<>(); - @Inject public IndicesQueryCache(Settings settings) { super(settings); final ByteSizeValue size = INDICES_CACHE_QUERY_SIZE_SETTING.get(settings); diff --git a/core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java b/core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java index 36ac787855b..d58c1c13994 100644 --- a/core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java +++ b/core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java @@ -45,6 +45,8 @@ import org.elasticsearch.common.unit.MemorySizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.search.internal.SearchContext; @@ -53,6 +55,7 @@ import org.elasticsearch.search.query.QueryPhase; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.threadpool.ThreadPool; +import java.io.Closeable; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; @@ -74,7 +77,7 @@ import java.util.concurrent.TimeUnit; * There are still several TODOs left in this class, some easily addressable, some more complex, but the support * is functional. */ -public class IndicesRequestCache extends AbstractComponent implements RemovalListener { +public class IndicesRequestCache extends AbstractComponent implements RemovalListener, Closeable { /** * A setting to enable or disable request caching on an index level. Its dynamic by default @@ -89,7 +92,6 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis private static final Set CACHEABLE_SEARCH_TYPES = EnumSet.of(SearchType.QUERY_THEN_FETCH, SearchType.QUERY_AND_FETCH); private final ThreadPool threadPool; - private final ClusterService clusterService; private final TimeValue cleanInterval; private final Reaper reaper; @@ -104,18 +106,13 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis private volatile Cache cache; - @Inject - public IndicesRequestCache(Settings settings, ClusterService clusterService, ThreadPool threadPool) { + public IndicesRequestCache(Settings settings, ThreadPool threadPool) { super(settings); - this.clusterService = clusterService; this.threadPool = threadPool; this.cleanInterval = INDICES_CACHE_REQUEST_CLEAN_INTERVAL.get(settings); - this.size = INDICES_CACHE_QUERY_SIZE.get(settings); - this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null; buildCache(); - this.reaper = new Reaper(); threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, reaper); } @@ -123,10 +120,8 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis private void buildCache() { long sizeInBytes = size.bytes(); - CacheBuilder cacheBuilder = CacheBuilder.builder() .setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this); - // cacheBuilder.concurrencyLevel(concurrencyLevel); if (expire != null) { cacheBuilder.setExpireAfterAccess(TimeUnit.MILLISECONDS.toNanos(expire.millis())); @@ -135,6 +130,7 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis cache = cacheBuilder.build(); } + @Override public void close() { reaper.close(); cache.invalidateAll(); @@ -174,21 +170,17 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis if (!CACHEABLE_SEARCH_TYPES.contains(context.searchType())) { return false; } - - IndexMetaData index = clusterService.state().getMetaData().index(request.index()); - if (index == null) { // in case we didn't yet have the cluster state, or it just got deleted - return false; - } + IndexSettings settings = context.indexShard().getIndexSettings(); // if not explicitly set in the request, use the index setting, if not, use the request if (request.requestCache() == null) { - if (INDEX_CACHE_REQUEST_ENABLED_SETTING.get(index.getSettings()) == false) { + if (settings.getValue(INDEX_CACHE_REQUEST_ENABLED_SETTING) == false) { return false; } - } else if (!request.requestCache()) { + } else if (request.requestCache() == false) { return false; } // if the reader is not a directory reader, we can't get the version from it - if (!(context.searcher().getIndexReader() instanceof DirectoryReader)) { + if ((context.searcher().getIndexReader() instanceof DirectoryReader) == false) { return false; } // if now in millis is used (or in the future, a more generic "isDeterministic" flag diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 88d21764abb..ee523e975a1 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -70,9 +70,7 @@ import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.breaker.CircuitBreakerModule; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.cluster.IndicesClusterStateService; -import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.ttl.IndicesTTLService; import org.elasticsearch.monitor.MonitorService; @@ -391,7 +389,6 @@ public class Node implements Closeable { toClose.add(injector.getInstance(IndicesTTLService.class)); toClose.add(injector.getInstance(IndicesService.class)); // close filter/fielddata caches after indices - toClose.add(injector.getInstance(IndicesQueryCache.class)); toClose.add(injector.getInstance(IndicesStore.class)); toClose.add(() ->stopWatch.stop().start("routing")); toClose.add(injector.getInstance(RoutingService.class)); diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index e0b30a2e346..4afef7117f5 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -149,8 +149,7 @@ public class SearchService extends AbstractLifecycleComponent imp @Inject public SearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, - ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, - IndicesRequestCache indicesQueryCache) { + ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase) { super(settings); this.parseFieldMatcher = new ParseFieldMatcher(settings); this.threadPool = threadPool; @@ -162,7 +161,7 @@ public class SearchService extends AbstractLifecycleComponent imp this.dfsPhase = dfsPhase; this.queryPhase = queryPhase; this.fetchPhase = fetchPhase; - this.indicesQueryCache = indicesQueryCache; + this.indicesQueryCache = indicesService.getIndicesRequestCache(); TimeValue keepAliveInterval = KEEPALIVE_INTERVAL_SETTING.get(settings); this.defaultKeepAlive = DEFAULT_KEEPALIVE_SETTING.get(settings).millis(); diff --git a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 0b0691bc588..01cd056ed8e 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -88,6 +88,8 @@ public class IndexModuleTests extends ESTestCase { private Environment environment; private NodeEnvironment nodeEnvironment; private NodeServicesProvider nodeServicesProvider; + private IndicesQueryCache indicesQueryCache = new IndicesQueryCache(settings); + private IndexService.ShardStoreDeleter deleter = new IndexService.ShardStoreDeleter() { @Override public void deleteShardStore(String reason, ShardLock lock, IndexSettings indexSettings) throws IOException { @@ -103,7 +105,6 @@ public class IndexModuleTests extends ESTestCase { static NodeServicesProvider newNodeServiceProvider(Settings settings, Environment environment, Client client, ScriptEngineService... scriptEngineServices) throws IOException { // TODO this can be used in other place too - lets first refactor the IndicesQueriesRegistry ThreadPool threadPool = new ThreadPool("test"); - IndicesQueryCache indicesQueryCache = new IndicesQueryCache(settings); CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); PageCacheRecycler recycler = new PageCacheRecycler(settings, threadPool); BigArrays bigArrays = new BigArrays(recycler, circuitBreakerService); @@ -114,7 +115,7 @@ public class IndexModuleTests extends ESTestCase { ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry); ScriptService scriptService = new ScriptService(settings, environment, scriptEngines, new ResourceWatcherService(settings, threadPool), scriptEngineRegistry, scriptContextRegistry, scriptSettings); IndicesQueriesRegistry indicesQueriesRegistry = new IndicesQueriesRegistry(settings, emptyMap()); - return new NodeServicesProvider(threadPool, indicesQueryCache, bigArrays, client, scriptService, indicesQueriesRegistry, circuitBreakerService); + return new NodeServicesProvider(threadPool, bigArrays, client, scriptService, indicesQueriesRegistry, circuitBreakerService); } @Override @@ -143,7 +144,7 @@ public class IndexModuleTests extends ESTestCase { IndexModule module = new IndexModule(indexSettings, null, new AnalysisRegistry(null, environment)); module.setSearcherWrapper((s) -> new Wrapper()); module.engineFactory.set(new MockEngineFactory(AssertingDirectoryReader.class)); - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry, new IndicesFieldDataCache(settings, listener)); + IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, new IndicesFieldDataCache(settings, listener)); assertTrue(indexService.getSearcherWrapper() instanceof Wrapper); assertSame(indexService.getEngineFactory(), module.engineFactory.get()); indexService.close("simon says", false); @@ -158,10 +159,9 @@ public class IndexModuleTests extends ESTestCase { .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "foo_store") .build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings); - final Index index = indexSettings.getIndex(); IndexModule module = new IndexModule(indexSettings, null, new AnalysisRegistry(null, environment)); module.addIndexStore("foo_store", FooStore::new); - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry, new IndicesFieldDataCache(settings, listener)); + IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, new IndicesFieldDataCache(settings, listener)); assertTrue(indexService.getIndexStore() instanceof FooStore); try { module.addIndexStore("foo_store", FooStore::new); @@ -184,7 +184,7 @@ public class IndexModuleTests extends ESTestCase { IndexModule module = new IndexModule(indexSettings, null, new AnalysisRegistry(null, environment)); Consumer listener = (s) -> {}; module.addIndexEventListener(eventListener); - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry, + IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, new IndicesFieldDataCache(settings, this.listener)); IndexSettings x = indexService.getIndexSettings(); assertEquals(x.getSettings().getAsMap(), indexSettings.getSettings().getAsMap()); @@ -209,7 +209,7 @@ public class IndexModuleTests extends ESTestCase { } - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry, + IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, new IndicesFieldDataCache(settings, listener)); assertSame(booleanSetting, indexService.getIndexSettings().getScopedSettings().get(booleanSetting.getKey())); @@ -236,7 +236,7 @@ public class IndexModuleTests extends ESTestCase { } }); - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry, + IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, new IndicesFieldDataCache(settings, listener)); SimilarityService similarityService = indexService.similarityService(); assertNotNull(similarityService.getSimilarity("my_similarity")); @@ -254,7 +254,7 @@ public class IndexModuleTests extends ESTestCase { .build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), null, new AnalysisRegistry(null, environment)); try { - module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry, + module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, new IndicesFieldDataCache(settings, listener)); } catch (IllegalArgumentException ex) { assertEquals("Unknown Similarity type [test_similarity] for [my_similarity]", ex.getMessage()); @@ -269,7 +269,7 @@ public class IndexModuleTests extends ESTestCase { .build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), null, new AnalysisRegistry(null, environment)); try { - module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry, + module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, new IndicesFieldDataCache(settings, listener)); } catch (IllegalArgumentException ex) { assertEquals("Similarity [my_similarity] must have an associated type", ex.getMessage()); @@ -317,7 +317,7 @@ public class IndexModuleTests extends ESTestCase { assertEquals(e.getMessage(), "Can't register the same [query_cache] more than once for [custom]"); } - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry, + IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, new IndicesFieldDataCache(settings, listener)); assertTrue(indexService.cache().query() instanceof CustomQueryCache); indexService.close("simon says", false); @@ -328,7 +328,7 @@ public class IndexModuleTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), null, new AnalysisRegistry(null, environment)); - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry, + IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, new IndicesFieldDataCache(settings, listener)); assertTrue(indexService.cache().query() instanceof IndexQueryCache); indexService.close("simon says", false); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index e5f0ade2d50..b5a38553a8c 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -564,7 +564,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService("test"); IndexShard shard = test.getShardOrNull(0); - ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(shard, new CommonStatsFlags()), shard.commitStats()); + ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), shard, new CommonStatsFlags()), shard.commitStats()); assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath()); assertEquals(shard.shardPath().getRootStatePath().toString(), stats.getStatePath()); assertEquals(shard.shardPath().isCustomDataPath(), stats.isCustomDataPath()); diff --git a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java index 7cc583273b9..e9d78e4e58a 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java +++ b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.dfs.DfsPhase; @@ -68,9 +67,9 @@ public class MockSearchService extends SearchService { @Inject public MockSearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, - DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) { + DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase) { super(settings, clusterSettings, clusterService, indicesService, threadPool, scriptService, pageCacheRecycler, bigArrays, dfsPhase, - queryPhase, fetchPhase, indicesQueryCache); + queryPhase, fetchPhase); } @Override From 37b0fc4f1080f7b891dd15f494279c0b2ac328a1 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 29 Jan 2016 18:05:18 +0100 Subject: [PATCH 06/22] Migrate AWS settings to new settings infrastructure Reintroducing commit fb7723c but now deals with setting names conflicts Also adds java documentation for each setting Closes #16293. Related to https://github.com/elastic/elasticsearch/pull/16477#discussion_r52469084 --- .../common/settings/SettingsModule.java | 12 + .../cloud/aws/AwsEc2Service.java | 197 ++++++++++-- .../cloud/aws/AwsEc2ServiceImpl.java | 57 +--- .../aws/node/Ec2CustomNodeAttributes.java | 3 +- .../ec2/AwsEc2UnicastHostsProvider.java | 33 +- .../discovery/ec2/Ec2DiscoveryPlugin.java | 62 +++- .../cloud/aws/AWSSignersTests.java | 13 + .../cloud/aws/AbstractAwsTestCase.java | 11 +- .../ec2/Ec2DiscoverySettingsTests.java | 70 ++++ .../discovery/ec2/Ec2DiscoveryTests.java | 17 +- .../ec2/Ec2DiscoveryUpdateSettingsTests.java | 3 - .../plugin-descriptor.properties | 48 +++ .../elasticsearch/cloud/aws/AwsS3Service.java | 163 +++++++--- .../cloud/aws/InternalAwsS3Service.java | 86 ++--- .../repository/s3/S3RepositoryPlugin.java | 84 ++++- .../repositories/s3/S3Repository.java | 228 ++++++++++--- .../cloud/aws/AWSSignersTests.java | 13 + .../cloud/aws/AbstractAwsTestCase.java | 13 +- .../cloud/aws/RepositoryS3SettingsTests.java | 302 ++++++++++++++++++ .../cloud/aws/TestAwsS3Service.java | 14 +- .../s3/AbstractS3SnapshotRestoreTest.java | 111 ++++--- 21 files changed, 1196 insertions(+), 344 deletions(-) create mode 100644 plugins/repository-s3/generated-resources/plugin-descriptor.properties create mode 100644 plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/RepositoryS3SettingsTests.java diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 23c67609f1b..d6858834318 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -108,6 +108,18 @@ public class SettingsModule extends AbstractModule { } } + /** + * Check if a setting has already been registered + */ + public boolean exists(Setting setting) { + switch (setting.getScope()) { + case CLUSTER: + return clusterSettings.containsKey(setting.getKey()); + case INDEX: + return indexSettings.containsKey(setting.getKey()); + } + throw new IllegalArgumentException("setting scope is unknown. This should never happen!"); + } private void validateTribeSettings(Settings settings, ClusterSettings clusterSettings) { Map groups = settings.filter(TRIBE_CLIENT_NODE_SETTINGS_PREDICATE).getGroups("tribe.", true); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2Service.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2Service.java index e97dd94137b..a90d3573468 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2Service.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2Service.java @@ -19,42 +19,179 @@ package org.elasticsearch.cloud.aws; +import com.amazonaws.Protocol; import com.amazonaws.services.ec2.AmazonEC2; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.component.LifecycleComponent; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.function.Function; -public interface AwsEc2Service extends LifecycleComponent { - final class CLOUD_AWS { - public static final String KEY = "cloud.aws.access_key"; - public static final String SECRET = "cloud.aws.secret_key"; - public static final String PROTOCOL = "cloud.aws.protocol"; - public static final String PROXY_HOST = "cloud.aws.proxy.host"; - public static final String PROXY_PORT = "cloud.aws.proxy.port"; - public static final String PROXY_USERNAME = "cloud.aws.proxy.username"; - public static final String PROXY_PASSWORD = "cloud.aws.proxy.password"; - public static final String SIGNER = "cloud.aws.signer"; - public static final String REGION = "cloud.aws.region"; +public interface AwsEc2Service { + Setting AUTO_ATTRIBUTE_SETTING = Setting.boolSetting("cloud.node.auto_attributes", false, false, Setting.Scope.CLUSTER); + + // Global AWS settings (shared between discovery-ec2 and repository-s3) + // Each setting starting with `cloud.aws` also exists in repository-s3 project. Don't forget to update + // the code there if you change anything here. + /** + * cloud.aws.access_key: AWS Access key. Shared with repository-s3 plugin + */ + Setting KEY_SETTING = Setting.simpleString("cloud.aws.access_key", false, Setting.Scope.CLUSTER); + /** + * cloud.aws.secret_key: AWS Secret key. Shared with repository-s3 plugin + */ + Setting SECRET_SETTING = Setting.simpleString("cloud.aws.secret_key", false, Setting.Scope.CLUSTER); + /** + * cloud.aws.protocol: Protocol for AWS API: http or https. Defaults to https. Shared with repository-s3 plugin + */ + Setting PROTOCOL_SETTING = new Setting<>("cloud.aws.protocol", "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), + false, Setting.Scope.CLUSTER); + /** + * cloud.aws.proxy.host: In case of proxy, define its hostname/IP. Shared with repository-s3 plugin + */ + Setting PROXY_HOST_SETTING = Setting.simpleString("cloud.aws.proxy.host", false, Setting.Scope.CLUSTER); + /** + * cloud.aws.proxy.port: In case of proxy, define its port. Defaults to 80. Shared with repository-s3 plugin + */ + Setting PROXY_PORT_SETTING = Setting.intSetting("cloud.aws.proxy.port", 80, 0, 1<<16, false, Setting.Scope.CLUSTER); + /** + * cloud.aws.proxy.username: In case of proxy with auth, define the username. Shared with repository-s3 plugin + */ + Setting PROXY_USERNAME_SETTING = Setting.simpleString("cloud.aws.proxy.username", false, Setting.Scope.CLUSTER); + /** + * cloud.aws.proxy.password: In case of proxy with auth, define the password. Shared with repository-s3 plugin + */ + Setting PROXY_PASSWORD_SETTING = Setting.simpleString("cloud.aws.proxy.password", false, Setting.Scope.CLUSTER); + /** + * cloud.aws.signer: If you are using an old AWS API version, you can define a Signer. Shared with repository-s3 plugin + */ + Setting SIGNER_SETTING = Setting.simpleString("cloud.aws.signer", false, Setting.Scope.CLUSTER); + /** + * cloud.aws.region: Region. Shared with repository-s3 plugin + */ + Setting REGION_SETTING = new Setting<>("cloud.aws.region", "", s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + + /** + * Defines specific ec2 settings starting with cloud.aws.ec2. + */ + interface CLOUD_EC2 { + /** + * cloud.aws.ec2.access_key: AWS Access key specific for EC2 API calls. Defaults to cloud.aws.access_key. + * @see AwsEc2Service#KEY_SETTING + */ + Setting KEY_SETTING = new Setting<>("cloud.aws.ec2.access_key", AwsEc2Service.KEY_SETTING, Function.identity(), false, + Setting.Scope.CLUSTER); + /** + * cloud.aws.ec2.secret_key: AWS Secret key specific for EC2 API calls. Defaults to cloud.aws.secret_key. + * @see AwsEc2Service#SECRET_SETTING + */ + Setting SECRET_SETTING = new Setting<>("cloud.aws.ec2.secret_key", AwsEc2Service.SECRET_SETTING, Function.identity(), false, + Setting.Scope.CLUSTER); + /** + * cloud.aws.ec2.protocol: Protocol for AWS API specific for EC2 API calls: http or https. Defaults to cloud.aws.protocol. + * @see AwsEc2Service#PROTOCOL_SETTING + */ + Setting PROTOCOL_SETTING = new Setting<>("cloud.aws.ec2.protocol", AwsEc2Service.PROTOCOL_SETTING, + s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), false, Setting.Scope.CLUSTER); + /** + * cloud.aws.ec2.proxy.host: In case of proxy, define its hostname/IP specific for EC2 API calls. Defaults to cloud.aws.proxy.host. + * @see AwsEc2Service#PROXY_HOST_SETTING + */ + Setting PROXY_HOST_SETTING = new Setting<>("cloud.aws.ec2.proxy.host", AwsEc2Service.PROXY_HOST_SETTING, + Function.identity(), false, Setting.Scope.CLUSTER); + /** + * cloud.aws.ec2.proxy.port: In case of proxy, define its port specific for EC2 API calls. Defaults to cloud.aws.proxy.port. + * @see AwsEc2Service#PROXY_PORT_SETTING + */ + Setting PROXY_PORT_SETTING = new Setting<>("cloud.aws.ec2.proxy.port", AwsEc2Service.PROXY_PORT_SETTING, + s -> Setting.parseInt(s, 0, 1<<16, "cloud.aws.ec2.proxy.port"), false, Setting.Scope.CLUSTER); + /** + * cloud.aws.ec2.proxy.username: In case of proxy with auth, define the username specific for EC2 API calls. + * Defaults to cloud.aws.proxy.username. + * @see AwsEc2Service#PROXY_USERNAME_SETTING + */ + Setting PROXY_USERNAME_SETTING = new Setting<>("cloud.aws.ec2.proxy.username", AwsEc2Service.PROXY_USERNAME_SETTING, + Function.identity(), false, Setting.Scope.CLUSTER); + /** + * cloud.aws.ec2.proxy.password: In case of proxy with auth, define the password specific for EC2 API calls. + * Defaults to cloud.aws.proxy.password. + * @see AwsEc2Service#PROXY_PASSWORD_SETTING + */ + Setting PROXY_PASSWORD_SETTING = new Setting<>("cloud.aws.ec2.proxy.password", AwsEc2Service.PROXY_PASSWORD_SETTING, + Function.identity(), false, Setting.Scope.CLUSTER); + /** + * cloud.aws.ec2.signer: If you are using an old AWS API version, you can define a Signer. Specific for EC2 API calls. + * Defaults to cloud.aws.signer. + * @see AwsEc2Service#SIGNER_SETTING + */ + Setting SIGNER_SETTING = new Setting<>("cloud.aws.ec2.signer", AwsEc2Service.SIGNER_SETTING, Function.identity(), + false, Setting.Scope.CLUSTER); + /** + * cloud.aws.ec2.region: Region specific for EC2 API calls. Defaults to cloud.aws.region. + * @see AwsEc2Service#REGION_SETTING + */ + Setting REGION_SETTING = new Setting<>("cloud.aws.ec2.region", AwsEc2Service.REGION_SETTING, + s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + /** + * cloud.aws.ec2.endpoint: Endpoint. If not set, endpoint will be guessed based on region setting. + */ + Setting ENDPOINT_SETTING = Setting.simpleString("cloud.aws.ec2.endpoint", false, Setting.Scope.CLUSTER); } - final class CLOUD_EC2 { - public static final String KEY = "cloud.aws.ec2.access_key"; - public static final String SECRET = "cloud.aws.ec2.secret_key"; - public static final String PROTOCOL = "cloud.aws.ec2.protocol"; - public static final String PROXY_HOST = "cloud.aws.ec2.proxy.host"; - public static final String PROXY_PORT = "cloud.aws.ec2.proxy.port"; - public static final String PROXY_USERNAME = "cloud.aws.ec2.proxy.username"; - public static final String PROXY_PASSWORD = "cloud.aws.ec2.proxy.password"; - public static final String SIGNER = "cloud.aws.ec2.signer"; - public static final String ENDPOINT = "cloud.aws.ec2.endpoint"; - } + /** + * Defines discovery settings for ec2. Starting with discovery.ec2. + */ + interface DISCOVERY_EC2 { + enum HostType { + PRIVATE_IP, + PUBLIC_IP, + PRIVATE_DNS, + PUBLIC_DNS + } - final class DISCOVERY_EC2 { - public static final String HOST_TYPE = "discovery.ec2.host_type"; - public static final String ANY_GROUP = "discovery.ec2.any_group"; - public static final String GROUPS = "discovery.ec2.groups"; - public static final String TAG_PREFIX = "discovery.ec2.tag."; - public static final String AVAILABILITY_ZONES = "discovery.ec2.availability_zones"; - public static final String NODE_CACHE_TIME = "discovery.ec2.node_cache_time"; + /** + * discovery.ec2.host_type: The type of host type to use to communicate with other instances. + * Can be one of private_ip, public_ip, private_dns, public_dns. Defaults to private_ip. + */ + Setting HOST_TYPE_SETTING = + new Setting<>("discovery.ec2.host_type", HostType.PRIVATE_IP.name(), s -> HostType.valueOf(s.toUpperCase(Locale.ROOT)), false, + Setting.Scope.CLUSTER); + /** + * discovery.ec2.any_group: If set to false, will require all security groups to be present for the instance to be used for the + * discovery. Defaults to true. + */ + Setting ANY_GROUP_SETTING = + Setting.boolSetting("discovery.ec2.any_group", true, false, Setting.Scope.CLUSTER); + /** + * discovery.ec2.groups: Either a comma separated list or array based list of (security) groups. Only instances with the provided + * security groups will be used in the cluster discovery. (NOTE: You could provide either group NAME or group ID.) + */ + Setting> GROUPS_SETTING = + Setting.listSetting("discovery.ec2.groups", new ArrayList<>(), s -> s.toString(), false, Setting.Scope.CLUSTER); + /** + * discovery.ec2.availability_zones: Either a comma separated list or array based list of availability zones. Only instances within + * the provided availability zones will be used in the cluster discovery. + */ + Setting> AVAILABILITY_ZONES_SETTING = + Setting.listSetting("discovery.ec2.availability_zones", Collections.emptyList(), s -> s.toString(), false, + Setting.Scope.CLUSTER); + /** + * discovery.ec2.node_cache_time: How long the list of hosts is cached to prevent further requests to the AWS API. Defaults to 10s. + */ + Setting NODE_CACHE_TIME_SETTING = + Setting.timeSetting("discovery.ec2.node_cache_time", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER); + + /** + * discovery.ec2.tag.*: The ec2 discovery can filter machines to include in the cluster based on tags (and not just groups). + * The settings to use include the discovery.ec2.tag. prefix. For example, setting discovery.ec2.tag.stage to dev will only filter + * instances with a tag key set to stage, and a value of dev. Several tags set will require all of those tags to be set for the + * instance to be included. + */ + Setting TAG_SETTING = Setting.groupSetting("discovery.ec2.tag.", false,Setting.Scope.CLUSTER); } AmazonEC2 client(); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java index 3c300e47722..bccead9be00 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java @@ -22,7 +22,6 @@ package org.elasticsearch.cloud.aws; import com.amazonaws.AmazonClientException; import com.amazonaws.AmazonWebServiceRequest; import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AWSCredentialsProviderChain; import com.amazonaws.auth.BasicAWSCredentials; @@ -33,18 +32,17 @@ import com.amazonaws.internal.StaticCredentialsProvider; import com.amazonaws.retry.RetryPolicy; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.AmazonEC2Client; - import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cloud.aws.network.Ec2NameResolver; import org.elasticsearch.cloud.aws.node.Ec2CustomNodeAttributes; import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; -import java.util.Locale; import java.util.Random; /** @@ -74,30 +72,15 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. clientConfiguration.setResponseMetadataCacheSize(0); - String protocol = settings.get(CLOUD_EC2.PROTOCOL, settings.get(CLOUD_AWS.PROTOCOL, "https")).toLowerCase(Locale.ROOT); - if ("http".equals(protocol)) { - clientConfiguration.setProtocol(Protocol.HTTP); - } else if ("https".equals(protocol)) { - clientConfiguration.setProtocol(Protocol.HTTPS); - } else { - throw new IllegalArgumentException("No protocol supported [" + protocol + "], can either be [http] or [https]"); - } - String account = settings.get(CLOUD_EC2.KEY, settings.get(CLOUD_AWS.KEY)); - String key = settings.get(CLOUD_EC2.SECRET, settings.get(CLOUD_AWS.SECRET)); + clientConfiguration.setProtocol(CLOUD_EC2.PROTOCOL_SETTING.get(settings)); + String key = CLOUD_EC2.KEY_SETTING.get(settings); + String secret = CLOUD_EC2.SECRET_SETTING.get(settings); - String proxyHost = settings.get(CLOUD_AWS.PROXY_HOST); - proxyHost = settings.get(CLOUD_EC2.PROXY_HOST, proxyHost); + String proxyHost = CLOUD_EC2.PROXY_HOST_SETTING.get(settings); if (proxyHost != null) { - String portString = settings.get(CLOUD_AWS.PROXY_PORT, "80"); - portString = settings.get(CLOUD_EC2.PROXY_PORT, portString); - Integer proxyPort; - try { - proxyPort = Integer.parseInt(portString, 10); - } catch (NumberFormatException ex) { - throw new IllegalArgumentException("The configured proxy port value [" + portString + "] is invalid", ex); - } - String proxyUsername = settings.get(CLOUD_EC2.PROXY_USERNAME, settings.get(CLOUD_AWS.PROXY_USERNAME)); - String proxyPassword = settings.get(CLOUD_EC2.PROXY_PASSWORD, settings.get(CLOUD_AWS.PROXY_PASSWORD)); + Integer proxyPort = CLOUD_EC2.PROXY_PORT_SETTING.get(settings); + String proxyUsername = CLOUD_EC2.PROXY_USERNAME_SETTING.get(settings); + String proxyPassword = CLOUD_EC2.PROXY_PASSWORD_SETTING.get(settings); clientConfiguration .withProxyHost(proxyHost) @@ -107,15 +90,10 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent } // #155: we might have 3rd party users using older EC2 API version - String awsSigner = settings.get(CLOUD_EC2.SIGNER, settings.get(CLOUD_AWS.SIGNER)); - if (awsSigner != null) { + String awsSigner = CLOUD_EC2.SIGNER_SETTING.get(settings); + if (Strings.hasText(awsSigner)) { logger.debug("using AWS API signer [{}]", awsSigner); - try { - AwsSigner.configureSigner(awsSigner, clientConfiguration); - } catch (IllegalArgumentException e) { - logger.warn("wrong signer set for [{}] or [{}]: [{}]", - CLOUD_EC2.SIGNER, CLOUD_AWS.SIGNER, awsSigner); - } + AwsSigner.configureSigner(awsSigner, clientConfiguration); } // Increase the number of retries in case of 5xx API responses @@ -138,7 +116,7 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent AWSCredentialsProvider credentials; - if (account == null && key == null) { + if (key == null && secret == null) { credentials = new AWSCredentialsProviderChain( new EnvironmentVariableCredentialsProvider(), new SystemPropertiesCredentialsProvider(), @@ -146,19 +124,18 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent ); } else { credentials = new AWSCredentialsProviderChain( - new StaticCredentialsProvider(new BasicAWSCredentials(account, key)) + new StaticCredentialsProvider(new BasicAWSCredentials(key, secret)) ); } this.client = new AmazonEC2Client(credentials, clientConfiguration); - if (settings.get(CLOUD_EC2.ENDPOINT) != null) { - String endpoint = settings.get(CLOUD_EC2.ENDPOINT); + String endpoint = CLOUD_EC2.ENDPOINT_SETTING.get(settings); + if (endpoint != null) { logger.debug("using explicit ec2 endpoint [{}]", endpoint); client.setEndpoint(endpoint); - } else if (settings.get(CLOUD_AWS.REGION) != null) { - String region = settings.get(CLOUD_AWS.REGION).toLowerCase(Locale.ROOT); - String endpoint; + } else if (CLOUD_EC2.REGION_SETTING.exists(settings)) { + String region = CLOUD_EC2.REGION_SETTING.get(settings); if (region.equals("us-east-1") || region.equals("us-east")) { endpoint = "ec2.us-east-1.amazonaws.com"; } else if (region.equals("us-west") || region.equals("us-west-1")) { diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/node/Ec2CustomNodeAttributes.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/node/Ec2CustomNodeAttributes.java index fcac113ebcc..9ba1ce650e8 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/node/Ec2CustomNodeAttributes.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/node/Ec2CustomNodeAttributes.java @@ -20,6 +20,7 @@ package org.elasticsearch.cloud.aws.node; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.cloud.aws.AwsEc2Service; import org.elasticsearch.cloud.aws.AwsEc2ServiceImpl; import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.common.component.AbstractComponent; @@ -45,7 +46,7 @@ public class Ec2CustomNodeAttributes extends AbstractComponent implements Discov @Override public Map buildAttributes() { - if (!settings.getAsBoolean("cloud.node.auto_attributes", false)) { + if (AwsEc2Service.AUTO_ATTRIBUTE_SETTING.get(settings) == false) { return null; } Map ec2Attributes = new HashMap<>(); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java index cafbae2671f..a0ba6caf741 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java @@ -31,7 +31,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cloud.aws.AwsEc2Service; import org.elasticsearch.cloud.aws.AwsEc2Service.DISCOVERY_EC2; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -42,11 +41,9 @@ import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Set; @@ -55,13 +52,6 @@ import java.util.Set; */ public class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { - private static enum HostType { - PRIVATE_IP, - PUBLIC_IP, - PRIVATE_DNS, - PUBLIC_DNS - } - private final TransportService transportService; private final AmazonEC2 client; @@ -76,7 +66,7 @@ public class AwsEc2UnicastHostsProvider extends AbstractComponent implements Uni private final Set availabilityZones; - private final HostType hostType; + private final DISCOVERY_EC2.HostType hostType; private final DiscoNodesCache discoNodes; @@ -87,24 +77,17 @@ public class AwsEc2UnicastHostsProvider extends AbstractComponent implements Uni this.client = awsEc2Service.client(); this.version = version; - this.hostType = HostType.valueOf(settings.get(DISCOVERY_EC2.HOST_TYPE, "private_ip") - .toUpperCase(Locale.ROOT)); + this.hostType = DISCOVERY_EC2.HOST_TYPE_SETTING.get(settings); + this.discoNodes = new DiscoNodesCache(DISCOVERY_EC2.NODE_CACHE_TIME_SETTING.get(settings)); - this.discoNodes = new DiscoNodesCache(this.settings.getAsTime(DISCOVERY_EC2.NODE_CACHE_TIME, - TimeValue.timeValueMillis(10_000L))); - - this.bindAnyGroup = settings.getAsBoolean(DISCOVERY_EC2.ANY_GROUP, true); + this.bindAnyGroup = DISCOVERY_EC2.ANY_GROUP_SETTING.get(settings); this.groups = new HashSet<>(); - groups.addAll(Arrays.asList(settings.getAsArray(DISCOVERY_EC2.GROUPS))); + this.groups.addAll(DISCOVERY_EC2.GROUPS_SETTING.get(settings)); - this.tags = settings.getByPrefix(DISCOVERY_EC2.TAG_PREFIX).getAsMap(); + this.tags = DISCOVERY_EC2.TAG_SETTING.get(settings).getAsMap(); - Set availabilityZones = new HashSet<>(); - availabilityZones.addAll(Arrays.asList(settings.getAsArray(DISCOVERY_EC2.AVAILABILITY_ZONES))); - if (settings.get(DISCOVERY_EC2.AVAILABILITY_ZONES) != null) { - availabilityZones.addAll(Strings.commaDelimitedListToSet(settings.get(DISCOVERY_EC2.AVAILABILITY_ZONES))); - } - this.availabilityZones = availabilityZones; + this.availabilityZones = new HashSet<>(); + availabilityZones.addAll(DISCOVERY_EC2.AVAILABILITY_ZONES_SETTING.get(settings)); if (logger.isDebugEnabled()) { logger.debug("using host_type [{}], tags [{}], groups [{}] with any_group [{}], availability_zones [{}]", hostType, tags, groups, bindAnyGroup, availabilityZones); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java index 2e689d9d26c..baad869a0aa 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java @@ -19,11 +19,6 @@ package org.elasticsearch.plugin.discovery.ec2; -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.util.ArrayList; -import java.util.Collection; - import org.elasticsearch.SpecialPermission; import org.elasticsearch.cloud.aws.AwsEc2Service; import org.elasticsearch.cloud.aws.AwsEc2ServiceImpl; @@ -32,6 +27,7 @@ import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.discovery.DiscoveryModule; @@ -39,6 +35,11 @@ import org.elasticsearch.discovery.ec2.AwsEc2UnicastHostsProvider; import org.elasticsearch.discovery.ec2.Ec2Discovery; import org.elasticsearch.plugins.Plugin; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.ArrayList; +import java.util.Collection; + /** * */ @@ -104,12 +105,51 @@ public class Ec2DiscoveryPlugin extends Plugin { } public void onModule(SettingsModule settingsModule) { + // Register global cloud aws settings: cloud.aws (might have been registered in ec2 plugin) + registerSettingIfMissing(settingsModule, AwsEc2Service.KEY_SETTING); + registerSettingIfMissing(settingsModule, AwsEc2Service.SECRET_SETTING); + registerSettingIfMissing(settingsModule, AwsEc2Service.PROTOCOL_SETTING); + registerSettingIfMissing(settingsModule, AwsEc2Service.PROXY_HOST_SETTING); + registerSettingIfMissing(settingsModule, AwsEc2Service.PROXY_PORT_SETTING); + registerSettingIfMissing(settingsModule, AwsEc2Service.PROXY_USERNAME_SETTING); + registerSettingIfMissing(settingsModule, AwsEc2Service.PROXY_PASSWORD_SETTING); + registerSettingIfMissing(settingsModule, AwsEc2Service.SIGNER_SETTING); + registerSettingIfMissing(settingsModule, AwsEc2Service.REGION_SETTING); + + // Register EC2 specific settings: cloud.aws.ec2 + settingsModule.registerSetting(AwsEc2Service.CLOUD_EC2.KEY_SETTING); + settingsModule.registerSetting(AwsEc2Service.CLOUD_EC2.SECRET_SETTING); + settingsModule.registerSetting(AwsEc2Service.CLOUD_EC2.PROTOCOL_SETTING); + settingsModule.registerSetting(AwsEc2Service.CLOUD_EC2.PROXY_HOST_SETTING); + settingsModule.registerSetting(AwsEc2Service.CLOUD_EC2.PROXY_PORT_SETTING); + settingsModule.registerSetting(AwsEc2Service.CLOUD_EC2.PROXY_USERNAME_SETTING); + settingsModule.registerSetting(AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING); + settingsModule.registerSetting(AwsEc2Service.CLOUD_EC2.SIGNER_SETTING); + settingsModule.registerSetting(AwsEc2Service.CLOUD_EC2.REGION_SETTING); + settingsModule.registerSetting(AwsEc2Service.CLOUD_EC2.ENDPOINT_SETTING); + + // Register EC2 discovery settings: discovery.ec2 + settingsModule.registerSetting(AwsEc2Service.DISCOVERY_EC2.HOST_TYPE_SETTING); + settingsModule.registerSetting(AwsEc2Service.DISCOVERY_EC2.ANY_GROUP_SETTING); + settingsModule.registerSetting(AwsEc2Service.DISCOVERY_EC2.GROUPS_SETTING); + settingsModule.registerSetting(AwsEc2Service.DISCOVERY_EC2.AVAILABILITY_ZONES_SETTING); + settingsModule.registerSetting(AwsEc2Service.DISCOVERY_EC2.NODE_CACHE_TIME_SETTING); + // Filter global settings - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_AWS.KEY); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_AWS.SECRET); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_AWS.PROXY_PASSWORD); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_EC2.KEY); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_EC2.SECRET); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD); + settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.KEY_SETTING.getKey()); + settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.SECRET_SETTING.getKey()); + settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.PROXY_PASSWORD_SETTING.getKey()); + settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_EC2.KEY_SETTING.getKey()); + settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_EC2.SECRET_SETTING.getKey()); + settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING.getKey()); + } + + /** + * We manage potential duplicates between s3 and ec2 plugins (cloud.aws.xxx) + */ + private void registerSettingIfMissing(SettingsModule settingsModule, Setting setting) { + if (settingsModule.exists(setting) == false) { + settingsModule.registerSetting(setting); + } } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/AWSSignersTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/AWSSignersTests.java index baaeb9b1b01..555e9f5c10a 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/AWSSignersTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/AWSSignersTests.java @@ -20,11 +20,24 @@ package org.elasticsearch.cloud.aws; import com.amazonaws.ClientConfiguration; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugin.discovery.ec2.Ec2DiscoveryPlugin; import org.elasticsearch.test.ESTestCase; +import org.junit.BeforeClass; import static org.hamcrest.CoreMatchers.is; public class AWSSignersTests extends ESTestCase { + + /** + * Starts Ec2DiscoveryPlugin. It's a workaround when you run test from IntelliJ. Otherwise it generates + * java.security.AccessControlException: access denied ("java.lang.RuntimePermission" "accessDeclaredMembers") + */ + @BeforeClass + public static void instantiatePlugin() { + new Ec2DiscoveryPlugin(Settings.EMPTY); + } + public void testSigners() { assertThat(signerTester(null), is(false)); assertThat(signerTester("QueryStringSignerType"), is(true)); diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java index e5931dc8b8e..cc9b0897600 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java @@ -25,9 +25,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.env.Environment; import org.elasticsearch.plugin.discovery.ec2.Ec2DiscoveryPlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ThirdParty; +import java.util.Collection; + /** * Base class for AWS tests that require credentials. *

@@ -42,7 +45,6 @@ public abstract class AbstractAwsTestCase extends ESIntegTestCase { Settings.Builder settings = Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .extendArray("plugin.types", Ec2DiscoveryPlugin.class.getName()) .put("cloud.aws.test.random", randomInt()) .put("cloud.aws.test.write_failures", 0.1) .put("cloud.aws.test.read_failures", 0.1); @@ -52,11 +54,16 @@ public abstract class AbstractAwsTestCase extends ESIntegTestCase { if (Strings.hasText(System.getProperty("tests.config"))) { settings.loadFromPath(PathUtils.get(System.getProperty("tests.config"))); } else { - throw new IllegalStateException("to run integration tests, you need to set -Dtest.thirdparty=true and -Dtests.config=/path/to/elasticsearch.yml"); + throw new IllegalStateException("to run integration tests, you need to set -Dtests.thirdparty=true and -Dtests.config=/path/to/elasticsearch.yml"); } } catch (SettingsException exception) { throw new IllegalStateException("your test configuration file is incorrect: " + System.getProperty("tests.config"), exception); } return settings.build(); } + + @Override + protected Collection> nodePlugins() { + return pluginList(Ec2DiscoveryPlugin.class); + } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoverySettingsTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoverySettingsTests.java index f0dfe960c80..97a33c54a68 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoverySettingsTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoverySettingsTests.java @@ -19,11 +19,14 @@ package org.elasticsearch.discovery.ec2; +import com.amazonaws.Protocol; +import org.elasticsearch.cloud.aws.AwsEc2Service; import org.elasticsearch.cloud.aws.Ec2Module; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isEmptyString; public class Ec2DiscoverySettingsTests extends ESTestCase { @@ -41,4 +44,71 @@ public class Ec2DiscoverySettingsTests extends ESTestCase { assertThat(discoveryReady, is(false)); } + + private static final Settings AWS = Settings.builder() + .put(AwsEc2Service.KEY_SETTING.getKey(), "global-key") + .put(AwsEc2Service.SECRET_SETTING.getKey(), "global-secret") + .put(AwsEc2Service.PROTOCOL_SETTING.getKey(), "https") + .put(AwsEc2Service.PROXY_HOST_SETTING.getKey(), "global-proxy-host") + .put(AwsEc2Service.PROXY_PORT_SETTING.getKey(), 10000) + .put(AwsEc2Service.PROXY_USERNAME_SETTING.getKey(), "global-proxy-username") + .put(AwsEc2Service.PROXY_PASSWORD_SETTING.getKey(), "global-proxy-password") + .put(AwsEc2Service.SIGNER_SETTING.getKey(), "global-signer") + .put(AwsEc2Service.REGION_SETTING.getKey(), "global-region") + .build(); + + private static final Settings EC2 = Settings.builder() + .put(AwsEc2Service.CLOUD_EC2.KEY_SETTING.getKey(), "ec2-key") + .put(AwsEc2Service.CLOUD_EC2.SECRET_SETTING.getKey(), "ec2-secret") + .put(AwsEc2Service.CLOUD_EC2.PROTOCOL_SETTING.getKey(), "http") + .put(AwsEc2Service.CLOUD_EC2.PROXY_HOST_SETTING.getKey(), "ec2-proxy-host") + .put(AwsEc2Service.CLOUD_EC2.PROXY_PORT_SETTING.getKey(), 20000) + .put(AwsEc2Service.CLOUD_EC2.PROXY_USERNAME_SETTING.getKey(), "ec2-proxy-username") + .put(AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING.getKey(), "ec2-proxy-password") + .put(AwsEc2Service.CLOUD_EC2.SIGNER_SETTING.getKey(), "ec2-signer") + .put(AwsEc2Service.CLOUD_EC2.REGION_SETTING.getKey(), "ec2-region") + .put(AwsEc2Service.CLOUD_EC2.ENDPOINT_SETTING.getKey(), "ec2-endpoint") + .build(); + + /** + * We test when only cloud.aws settings are set + */ + public void testRepositorySettingsGlobalOnly() { + Settings nodeSettings = buildSettings(AWS); + assertThat(AwsEc2Service.CLOUD_EC2.KEY_SETTING.get(nodeSettings), is("global-key")); + assertThat(AwsEc2Service.CLOUD_EC2.SECRET_SETTING.get(nodeSettings), is("global-secret")); + assertThat(AwsEc2Service.CLOUD_EC2.PROTOCOL_SETTING.get(nodeSettings), is(Protocol.HTTPS)); + assertThat(AwsEc2Service.CLOUD_EC2.PROXY_HOST_SETTING.get(nodeSettings), is("global-proxy-host")); + assertThat(AwsEc2Service.CLOUD_EC2.PROXY_PORT_SETTING.get(nodeSettings), is(10000)); + assertThat(AwsEc2Service.CLOUD_EC2.PROXY_USERNAME_SETTING.get(nodeSettings), is("global-proxy-username")); + assertThat(AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING.get(nodeSettings), is("global-proxy-password")); + assertThat(AwsEc2Service.CLOUD_EC2.SIGNER_SETTING.get(nodeSettings), is("global-signer")); + assertThat(AwsEc2Service.CLOUD_EC2.REGION_SETTING.get(nodeSettings), is("global-region")); + assertThat(AwsEc2Service.CLOUD_EC2.ENDPOINT_SETTING.get(nodeSettings), isEmptyString()); + } + + /** + * We test when cloud.aws settings are overloaded by cloud.aws.ec2 settings + */ + public void testRepositorySettingsGlobalOverloadedByEC2() { + Settings nodeSettings = buildSettings(AWS, EC2); + assertThat(AwsEc2Service.CLOUD_EC2.KEY_SETTING.get(nodeSettings), is("ec2-key")); + assertThat(AwsEc2Service.CLOUD_EC2.SECRET_SETTING.get(nodeSettings), is("ec2-secret")); + assertThat(AwsEc2Service.CLOUD_EC2.PROTOCOL_SETTING.get(nodeSettings), is(Protocol.HTTP)); + assertThat(AwsEc2Service.CLOUD_EC2.PROXY_HOST_SETTING.get(nodeSettings), is("ec2-proxy-host")); + assertThat(AwsEc2Service.CLOUD_EC2.PROXY_PORT_SETTING.get(nodeSettings), is(20000)); + assertThat(AwsEc2Service.CLOUD_EC2.PROXY_USERNAME_SETTING.get(nodeSettings), is("ec2-proxy-username")); + assertThat(AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING.get(nodeSettings), is("ec2-proxy-password")); + assertThat(AwsEc2Service.CLOUD_EC2.SIGNER_SETTING.get(nodeSettings), is("ec2-signer")); + assertThat(AwsEc2Service.CLOUD_EC2.REGION_SETTING.get(nodeSettings), is("ec2-region")); + assertThat(AwsEc2Service.CLOUD_EC2.ENDPOINT_SETTING.get(nodeSettings), is("ec2-endpoint")); + } + + private Settings buildSettings(Settings... global) { + Settings.Builder builder = Settings.builder(); + for (Settings settings : global) { + builder.put(settings); + } + return builder.build(); + } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index 36de36d0164..5063d59b40e 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.ec2; import com.amazonaws.services.ec2.model.Tag; - import org.elasticsearch.Version; import org.elasticsearch.cloud.aws.AwsEc2Service; import org.elasticsearch.cloud.aws.AwsEc2Service.DISCOVERY_EC2; @@ -95,7 +94,7 @@ public class Ec2DiscoveryTests extends ESTestCase { public void testPrivateIp() throws InterruptedException { int nodes = randomInt(10); Settings nodeSettings = Settings.builder() - .put(DISCOVERY_EC2.HOST_TYPE, "private_ip") + .put(DISCOVERY_EC2.HOST_TYPE_SETTING.getKey(), "private_ip") .build(); List discoveryNodes = buildDynamicNodes(nodeSettings, nodes); assertThat(discoveryNodes, hasSize(nodes)); @@ -111,7 +110,7 @@ public class Ec2DiscoveryTests extends ESTestCase { public void testPublicIp() throws InterruptedException { int nodes = randomInt(10); Settings nodeSettings = Settings.builder() - .put(DISCOVERY_EC2.HOST_TYPE, "public_ip") + .put(DISCOVERY_EC2.HOST_TYPE_SETTING.getKey(), "public_ip") .build(); List discoveryNodes = buildDynamicNodes(nodeSettings, nodes); assertThat(discoveryNodes, hasSize(nodes)); @@ -127,7 +126,7 @@ public class Ec2DiscoveryTests extends ESTestCase { public void testPrivateDns() throws InterruptedException { int nodes = randomInt(10); Settings nodeSettings = Settings.builder() - .put(DISCOVERY_EC2.HOST_TYPE, "private_dns") + .put(DISCOVERY_EC2.HOST_TYPE_SETTING.getKey(), "private_dns") .build(); List discoveryNodes = buildDynamicNodes(nodeSettings, nodes); assertThat(discoveryNodes, hasSize(nodes)); @@ -145,7 +144,7 @@ public class Ec2DiscoveryTests extends ESTestCase { public void testPublicDns() throws InterruptedException { int nodes = randomInt(10); Settings nodeSettings = Settings.builder() - .put(DISCOVERY_EC2.HOST_TYPE, "public_dns") + .put(DISCOVERY_EC2.HOST_TYPE_SETTING.getKey(), "public_dns") .build(); List discoveryNodes = buildDynamicNodes(nodeSettings, nodes); assertThat(discoveryNodes, hasSize(nodes)); @@ -162,7 +161,7 @@ public class Ec2DiscoveryTests extends ESTestCase { public void testInvalidHostType() throws InterruptedException { Settings nodeSettings = Settings.builder() - .put(DISCOVERY_EC2.HOST_TYPE, "does_not_exist") + .put(DISCOVERY_EC2.HOST_TYPE_SETTING.getKey(), "does_not_exist") .build(); try { buildDynamicNodes(nodeSettings, 1); @@ -175,7 +174,7 @@ public class Ec2DiscoveryTests extends ESTestCase { public void testFilterByTags() throws InterruptedException { int nodes = randomIntBetween(5, 10); Settings nodeSettings = Settings.builder() - .put(DISCOVERY_EC2.TAG_PREFIX + "stage", "prod") + .put(DISCOVERY_EC2.TAG_SETTING.getKey() + "stage", "prod") .build(); int prodInstances = 0; @@ -200,7 +199,7 @@ public class Ec2DiscoveryTests extends ESTestCase { public void testFilterByMultipleTags() throws InterruptedException { int nodes = randomIntBetween(5, 10); Settings nodeSettings = Settings.builder() - .putArray(DISCOVERY_EC2.TAG_PREFIX + "stage", "prod", "preprod") + .putArray(DISCOVERY_EC2.TAG_SETTING.getKey() + "stage", "prod", "preprod") .build(); int prodInstances = 0; @@ -252,7 +251,7 @@ public class Ec2DiscoveryTests extends ESTestCase { public void testGetNodeListCached() throws Exception { Settings.Builder builder = Settings.settingsBuilder() - .put(DISCOVERY_EC2.NODE_CACHE_TIME, "500ms"); + .put(DISCOVERY_EC2.NODE_CACHE_TIME_SETTING.getKey(), "500ms"); AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(Settings.EMPTY, 1, null); DummyEc2HostProvider provider = new DummyEc2HostProvider(builder.build(), transportService, awsEc2Service, Version.CURRENT) { @Override diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java index 68596ce2ace..2af1b47f622 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java @@ -23,7 +23,6 @@ package org.elasticsearch.discovery.ec2; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.cloud.aws.AbstractAwsTestCase; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugin.discovery.ec2.Ec2DiscoveryPlugin; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -39,8 +38,6 @@ import static org.hamcrest.CoreMatchers.is; public class Ec2DiscoveryUpdateSettingsTests extends AbstractAwsTestCase { public void testMinimumMasterNodesStart() { Settings nodeSettings = settingsBuilder() - .put("plugin.types", Ec2DiscoveryPlugin.class.getName()) - .put("cloud.enabled", true) .put("discovery.type", "ec2") .build(); internalCluster().startNode(nodeSettings); diff --git a/plugins/repository-s3/generated-resources/plugin-descriptor.properties b/plugins/repository-s3/generated-resources/plugin-descriptor.properties new file mode 100644 index 00000000000..ff42cdf7543 --- /dev/null +++ b/plugins/repository-s3/generated-resources/plugin-descriptor.properties @@ -0,0 +1,48 @@ +# Elasticsearch plugin descriptor file +# This file must exist as 'plugin-descriptor.properties' at +# the root directory of all plugins. +# +### example plugin for "foo" +# +# foo.zip <-- zip file for the plugin, with this structure: +# .jar <-- classes, resources, dependencies +# .jar <-- any number of jars +# plugin-descriptor.properties <-- example contents below: +# +# classname=foo.bar.BazPlugin +# description=My cool plugin +# version=2.0 +# elasticsearch.version=2.0 +# java.version=1.7 +# +### mandatory elements for all plugins: +# +# 'description': simple summary of the plugin +description=The S3 repository plugin adds S3 repositories. +# +# 'version': plugin's version +version=3.0.0-SNAPSHOT +# +# 'name': the plugin name +name=repository-s3 +# +# 'classname': the name of the class to load, fully-qualified. +classname=org.elasticsearch.plugin.repository.s3.S3RepositoryPlugin +# +# 'java.version' version of java the code is built against +# use the system property java.specification.version +# version string must be a sequence of nonnegative decimal integers +# separated by "."'s and may have leading zeros +java.version=1.8 +# +# 'elasticsearch.version' version of elasticsearch compiled against +elasticsearch.version=3.0.0-SNAPSHOT +# +### deprecated elements for jvm plugins : +# +# 'isolated': true if the plugin should have its own classloader. +# passing false is deprecated, and only intended to support plugins +# that have hard dependencies against each other. If this is +# not specified, then the plugin is isolated by default. +isolated=true +# \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java index 55c4b58e6dd..3ccd6d7987f 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java @@ -19,59 +19,132 @@ package org.elasticsearch.cloud.aws; +import com.amazonaws.Protocol; import com.amazonaws.services.s3.AmazonS3; - import org.elasticsearch.common.component.LifecycleComponent; +import org.elasticsearch.common.settings.Setting; + +import java.util.Locale; +import java.util.function.Function; /** * */ public interface AwsS3Service extends LifecycleComponent { - final class CLOUD_AWS { - public static final String KEY = "cloud.aws.access_key"; - public static final String SECRET = "cloud.aws.secret_key"; - public static final String PROTOCOL = "cloud.aws.protocol"; - public static final String PROXY_HOST = "cloud.aws.proxy.host"; - public static final String PROXY_PORT = "cloud.aws.proxy.port"; - public static final String PROXY_USERNAME = "cloud.aws.proxy.username"; - public static final String PROXY_PASSWORD = "cloud.aws.proxy.password"; - public static final String SIGNER = "cloud.aws.signer"; - public static final String REGION = "cloud.aws.region"; + // Global AWS settings (shared between discovery-ec2 and repository-s3) + // Each setting starting with `cloud.aws` also exists in discovery-ec2 project. Don't forget to update + // the code there if you change anything here. + /** + * cloud.aws.access_key: AWS Access key. Shared with discovery-ec2 plugin + */ + Setting KEY_SETTING = Setting.simpleString("cloud.aws.access_key", false, Setting.Scope.CLUSTER); + /** + * cloud.aws.secret_key: AWS Secret key. Shared with discovery-ec2 plugin + */ + Setting SECRET_SETTING = Setting.simpleString("cloud.aws.secret_key", false, Setting.Scope.CLUSTER); + /** + * cloud.aws.protocol: Protocol for AWS API: http or https. Defaults to https. Shared with discovery-ec2 plugin + */ + Setting PROTOCOL_SETTING = new Setting<>("cloud.aws.protocol", "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), + false, Setting.Scope.CLUSTER); + /** + * cloud.aws.proxy.host: In case of proxy, define its hostname/IP. Shared with discovery-ec2 plugin + */ + Setting PROXY_HOST_SETTING = Setting.simpleString("cloud.aws.proxy.host", false, Setting.Scope.CLUSTER); + /** + * cloud.aws.proxy.port: In case of proxy, define its port. Defaults to 80. Shared with discovery-ec2 plugin + */ + Setting PROXY_PORT_SETTING = Setting.intSetting("cloud.aws.proxy.port", 80, 0, 1<<16, false, Setting.Scope.CLUSTER); + /** + * cloud.aws.proxy.username: In case of proxy with auth, define the username. Shared with discovery-ec2 plugin + */ + Setting PROXY_USERNAME_SETTING = Setting.simpleString("cloud.aws.proxy.username", false, Setting.Scope.CLUSTER); + /** + * cloud.aws.proxy.password: In case of proxy with auth, define the password. Shared with discovery-ec2 plugin + */ + Setting PROXY_PASSWORD_SETTING = Setting.simpleString("cloud.aws.proxy.password", false, Setting.Scope.CLUSTER); + /** + * cloud.aws.signer: If you are using an old AWS API version, you can define a Signer. Shared with discovery-ec2 plugin + */ + Setting SIGNER_SETTING = Setting.simpleString("cloud.aws.signer", false, Setting.Scope.CLUSTER); + /** + * cloud.aws.region: Region. Shared with discovery-ec2 plugin + */ + Setting REGION_SETTING = new Setting<>("cloud.aws.region", "", s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + + /** + * Defines specific s3 settings starting with cloud.aws.s3. + */ + interface CLOUD_S3 { + /** + * cloud.aws.s3.access_key: AWS Access key specific for S3 API calls. Defaults to cloud.aws.access_key. + * @see AwsS3Service#KEY_SETTING + */ + Setting KEY_SETTING = + new Setting<>("cloud.aws.s3.access_key", AwsS3Service.KEY_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + /** + * cloud.aws.s3.secret_key: AWS Secret key specific for S3 API calls. Defaults to cloud.aws.secret_key. + * @see AwsS3Service#SECRET_SETTING + */ + Setting SECRET_SETTING = + new Setting<>("cloud.aws.s3.secret_key", AwsS3Service.SECRET_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + /** + * cloud.aws.s3.protocol: Protocol for AWS API specific for S3 API calls: http or https. Defaults to cloud.aws.protocol. + * @see AwsS3Service#PROTOCOL_SETTING + */ + Setting PROTOCOL_SETTING = + new Setting<>("cloud.aws.s3.protocol", AwsS3Service.PROTOCOL_SETTING, s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), false, + Setting.Scope.CLUSTER); + /** + * cloud.aws.s3.proxy.host: In case of proxy, define its hostname/IP specific for S3 API calls. Defaults to cloud.aws.proxy.host. + * @see AwsS3Service#PROXY_HOST_SETTING + */ + Setting PROXY_HOST_SETTING = + new Setting<>("cloud.aws.s3.proxy.host", AwsS3Service.PROXY_HOST_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + /** + * cloud.aws.s3.proxy.port: In case of proxy, define its port specific for S3 API calls. Defaults to cloud.aws.proxy.port. + * @see AwsS3Service#PROXY_PORT_SETTING + */ + Setting PROXY_PORT_SETTING = + new Setting<>("cloud.aws.s3.proxy.port", AwsS3Service.PROXY_PORT_SETTING, + s -> Setting.parseInt(s, 0, 1<<16, "cloud.aws.s3.proxy.port"), false, Setting.Scope.CLUSTER); + /** + * cloud.aws.s3.proxy.username: In case of proxy with auth, define the username specific for S3 API calls. + * Defaults to cloud.aws.proxy.username. + * @see AwsS3Service#PROXY_USERNAME_SETTING + */ + Setting PROXY_USERNAME_SETTING = + new Setting<>("cloud.aws.s3.proxy.username", AwsS3Service.PROXY_USERNAME_SETTING, Function.identity(), false, + Setting.Scope.CLUSTER); + /** + * cloud.aws.s3.proxy.password: In case of proxy with auth, define the password specific for S3 API calls. + * Defaults to cloud.aws.proxy.password. + * @see AwsS3Service#PROXY_PASSWORD_SETTING + */ + Setting PROXY_PASSWORD_SETTING = + new Setting<>("cloud.aws.s3.proxy.password", AwsS3Service.PROXY_PASSWORD_SETTING, Function.identity(), false, + Setting.Scope.CLUSTER); + /** + * cloud.aws.s3.signer: If you are using an old AWS API version, you can define a Signer. Specific for S3 API calls. + * Defaults to cloud.aws.signer. + * @see AwsS3Service#SIGNER_SETTING + */ + Setting SIGNER_SETTING = + new Setting<>("cloud.aws.s3.signer", AwsS3Service.SIGNER_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + /** + * cloud.aws.s3.region: Region specific for S3 API calls. Defaults to cloud.aws.region. + * @see AwsS3Service#REGION_SETTING + */ + Setting REGION_SETTING = + new Setting<>("cloud.aws.s3.region", AwsS3Service.REGION_SETTING, s -> s.toLowerCase(Locale.ROOT), false, + Setting.Scope.CLUSTER); + /** + * cloud.aws.s3.endpoint: Endpoint. If not set, endpoint will be guessed based on region setting. + */ + Setting ENDPOINT_SETTING = + Setting.simpleString("cloud.aws.s3.endpoint", false, Setting.Scope.CLUSTER); } - final class CLOUD_S3 { - public static final String KEY = "cloud.aws.s3.access_key"; - public static final String SECRET = "cloud.aws.s3.secret_key"; - public static final String PROTOCOL = "cloud.aws.s3.protocol"; - public static final String PROXY_HOST = "cloud.aws.s3.proxy.host"; - public static final String PROXY_PORT = "cloud.aws.s3.proxy.port"; - public static final String PROXY_USERNAME = "cloud.aws.s3.proxy.username"; - public static final String PROXY_PASSWORD = "cloud.aws.s3.proxy.password"; - public static final String SIGNER = "cloud.aws.s3.signer"; - public static final String ENDPOINT = "cloud.aws.s3.endpoint"; - } - - final class REPOSITORY_S3 { - public static final String BUCKET = "repositories.s3.bucket"; - public static final String ENDPOINT = "repositories.s3.endpoint"; - public static final String PROTOCOL = "repositories.s3.protocol"; - public static final String REGION = "repositories.s3.region"; - public static final String SERVER_SIDE_ENCRYPTION = "repositories.s3.server_side_encryption"; - public static final String BUFFER_SIZE = "repositories.s3.buffer_size"; - public static final String MAX_RETRIES = "repositories.s3.max_retries"; - public static final String CHUNK_SIZE = "repositories.s3.chunk_size"; - public static final String COMPRESS = "repositories.s3.compress"; - public static final String STORAGE_CLASS = "repositories.s3.storage_class"; - public static final String CANNED_ACL = "repositories.s3.canned_acl"; - public static final String BASE_PATH = "repositories.s3.base_path"; - } - - - - AmazonS3 client(); - - AmazonS3 client(String endpoint, String protocol, String region, String account, String key); - - AmazonS3 client(String endpoint, String protocol, String region, String account, String key, Integer maxRetries); + AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java index 5da3b33585c..81b6463a746 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java @@ -31,16 +31,14 @@ import com.amazonaws.http.IdleConnectionReaper; import com.amazonaws.internal.StaticCredentialsProvider; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3Client; - import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; import java.util.HashMap; -import java.util.Locale; import java.util.Map; /** @@ -51,7 +49,7 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent, AmazonS3Client> clients = new HashMap, AmazonS3Client>(); + private Map, AmazonS3Client> clients = new HashMap<>(); @Inject public InternalAwsS3Service(Settings settings) { @@ -59,36 +57,23 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent clientDescriptor = new Tuple(endpoint, account); + private synchronized AmazonS3 getClient(String endpoint, Protocol protocol, String account, String key, Integer maxRetries) { + Tuple clientDescriptor = new Tuple<>(endpoint, account); AmazonS3Client client = clients.get(clientDescriptor); if (client != null) { return client; @@ -98,32 +83,13 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent setting) { + if (settingsModule.exists(setting) == false) { + settingsModule.registerSetting(setting); + } } } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 612f8a9eea7..3edead0765e 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -19,14 +19,15 @@ package org.elasticsearch.repositories.s3; +import com.amazonaws.Protocol; import org.elasticsearch.cloud.aws.AwsS3Service; -import org.elasticsearch.cloud.aws.AwsS3Service.CLOUD_AWS; -import org.elasticsearch.cloud.aws.AwsS3Service.REPOSITORY_S3; +import org.elasticsearch.cloud.aws.AwsS3Service.CLOUD_S3; import org.elasticsearch.cloud.aws.blobstore.S3BlobStore; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.snapshots.IndexShardRepository; @@ -37,6 +38,7 @@ import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import java.io.IOException; import java.util.Locale; +import java.util.function.Function; /** * Shared file system implementation of the BlobStoreRepository @@ -55,6 +57,157 @@ public class S3Repository extends BlobStoreRepository { public final static String TYPE = "s3"; + /** + * Global S3 repositories settings. Starting with: repositories.s3 + */ + public interface Repositories { + /** + * repositories.s3.access_key: AWS Access key specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.access_key. + * @see CLOUD_S3#KEY_SETTING + */ + Setting KEY_SETTING = new Setting<>("repositories.s3.access_key", CLOUD_S3.KEY_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + /** + * repositories.s3.secret_key: AWS Secret key specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.secret_key. + * @see CLOUD_S3#SECRET_SETTING + */ + Setting SECRET_SETTING = new Setting<>("repositories.s3.secret_key", CLOUD_S3.SECRET_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + /** + * repositories.s3.region: Region specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.region. + * @see CLOUD_S3#REGION_SETTING + */ + Setting REGION_SETTING = new Setting<>("repositories.s3.region", CLOUD_S3.REGION_SETTING, s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + /** + * repositories.s3.endpoint: Endpoint specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.endpoint. + * @see CLOUD_S3#ENDPOINT_SETTING + */ + Setting ENDPOINT_SETTING = new Setting<>("repositories.s3.endpoint", CLOUD_S3.ENDPOINT_SETTING, s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + /** + * repositories.s3.protocol: Protocol specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.protocol. + * @see CLOUD_S3#PROTOCOL_SETTING + */ + Setting PROTOCOL_SETTING = new Setting<>("repositories.s3.protocol", CLOUD_S3.PROTOCOL_SETTING, s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), false, Setting.Scope.CLUSTER); + /** + * repositories.s3.bucket: The name of the bucket to be used for snapshots. + */ + Setting BUCKET_SETTING = Setting.simpleString("repositories.s3.bucket", false, Setting.Scope.CLUSTER); + /** + * repositories.s3.server_side_encryption: When set to true files are encrypted on server side using AES256 algorithm. + * Defaults to false. + */ + Setting SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("repositories.s3.server_side_encryption", false, false, Setting.Scope.CLUSTER); + /** + * repositories.s3.buffer_size: Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold, + * the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and + * to upload each part in its own request. Note that setting a buffer size lower than 5mb is not allowed since it will prevents the + * use of the Multipart API and may result in upload errors. Defaults to 5mb. + */ + Setting BUFFER_SIZE_SETTING = Setting.byteSizeSetting("repositories.s3.buffer_size", S3BlobStore.MIN_BUFFER_SIZE, false, Setting.Scope.CLUSTER); + /** + * repositories.s3.max_retries: Number of retries in case of S3 errors. Defaults to 3. + */ + Setting MAX_RETRIES_SETTING = Setting.intSetting("repositories.s3.max_retries", 3, false, Setting.Scope.CLUSTER); + /** + * repositories.s3.chunk_size: Big files can be broken down into chunks during snapshotting if needed. Defaults to 100m. + */ + Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("repositories.s3.chunk_size", new ByteSizeValue(100, ByteSizeUnit.MB), false, Setting.Scope.CLUSTER); + /** + * repositories.s3.compress: When set to true metadata files are stored in compressed format. This setting doesn’t affect index + * files that are already compressed by default. Defaults to false. + */ + Setting COMPRESS_SETTING = Setting.boolSetting("repositories.s3.compress", false, false, Setting.Scope.CLUSTER); + /** + * repositories.s3.storage_class: Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy, + * standard_ia. Defaults to standard. + */ + Setting STORAGE_CLASS_SETTING = Setting.simpleString("repositories.s3.storage_class", false, Setting.Scope.CLUSTER); + /** + * repositories.s3.canned_acl: The S3 repository supports all S3 canned ACLs : private, public-read, public-read-write, + * authenticated-read, log-delivery-write, bucket-owner-read, bucket-owner-full-control. Defaults to private. + */ + Setting CANNED_ACL_SETTING = Setting.simpleString("repositories.s3.canned_acl", false, Setting.Scope.CLUSTER); + /** + * repositories.s3.base_path: Specifies the path within bucket to repository data. Defaults to root directory. + */ + Setting BASE_PATH_SETTING = Setting.simpleString("repositories.s3.base_path", false, Setting.Scope.CLUSTER); + } + + /** + * Per S3 repository specific settings. Same settings as Repositories settings but without the repositories.s3 prefix. + * If undefined, they use the repositories.s3.xxx equivalent setting. + */ + public interface Repository { + /** + * access_key + * @see Repositories#KEY_SETTING + */ + Setting KEY_SETTING = Setting.simpleString("access_key", false, Setting.Scope.CLUSTER); + /** + * secret_key + * @see Repositories#SECRET_SETTING + */ + Setting SECRET_SETTING = Setting.simpleString("secret_key", false, Setting.Scope.CLUSTER); + /** + * bucket + * @see Repositories#BUCKET_SETTING + */ + Setting BUCKET_SETTING = Setting.simpleString("bucket", false, Setting.Scope.CLUSTER); + /** + * endpoint + * @see Repositories#ENDPOINT_SETTING + */ + Setting ENDPOINT_SETTING = Setting.simpleString("endpoint", false, Setting.Scope.CLUSTER); + /** + * protocol + * @see Repositories#PROTOCOL_SETTING + */ + Setting PROTOCOL_SETTING = new Setting<>("protocol", "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), false, Setting.Scope.CLUSTER); + /** + * region + * @see Repositories#REGION_SETTING + */ + Setting REGION_SETTING = new Setting<>("region", "", s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + /** + * server_side_encryption + * @see Repositories#SERVER_SIDE_ENCRYPTION_SETTING + */ + Setting SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("server_side_encryption", false, false, Setting.Scope.CLUSTER); + /** + * buffer_size + * @see Repositories#BUFFER_SIZE_SETTING + */ + Setting BUFFER_SIZE_SETTING = Setting.byteSizeSetting("buffer_size", S3BlobStore.MIN_BUFFER_SIZE, false, Setting.Scope.CLUSTER); + /** + * max_retries + * @see Repositories#MAX_RETRIES_SETTING + */ + Setting MAX_RETRIES_SETTING = Setting.intSetting("max_retries", 3, false, Setting.Scope.CLUSTER); + /** + * chunk_size + * @see Repositories#CHUNK_SIZE_SETTING + */ + Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", "-1", false, Setting.Scope.CLUSTER); + /** + * compress + * @see Repositories#COMPRESS_SETTING + */ + Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, false, Setting.Scope.CLUSTER); + /** + * storage_class + * @see Repositories#STORAGE_CLASS_SETTING + */ + Setting STORAGE_CLASS_SETTING = Setting.simpleString("storage_class", false, Setting.Scope.CLUSTER); + /** + * canned_acl + * @see Repositories#CANNED_ACL_SETTING + */ + Setting CANNED_ACL_SETTING = Setting.simpleString("canned_acl", false, Setting.Scope.CLUSTER); + /** + * base_path + * @see Repositories#BASE_PATH_SETTING + */ + Setting BASE_PATH_SETTING = Setting.simpleString("base_path", false, Setting.Scope.CLUSTER); + } + private final S3BlobStore blobStore; private final BlobPath basePath; @@ -75,62 +228,40 @@ public class S3Repository extends BlobStoreRepository { public S3Repository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository, AwsS3Service s3Service) throws IOException { super(name.getName(), repositorySettings, indexShardRepository); - String bucket = repositorySettings.settings().get("bucket", settings.get(REPOSITORY_S3.BUCKET)); + String bucket = getValue(repositorySettings, Repository.BUCKET_SETTING, Repositories.BUCKET_SETTING); if (bucket == null) { throw new RepositoryException(name.name(), "No bucket defined for s3 gateway"); } - String endpoint = repositorySettings.settings().get("endpoint", settings.get(REPOSITORY_S3.ENDPOINT)); - String protocol = repositorySettings.settings().get("protocol", settings.get(REPOSITORY_S3.PROTOCOL)); - - String region = repositorySettings.settings().get("region", settings.get(REPOSITORY_S3.REGION)); - if (region == null) { - // InternalBucket setting is not set - use global region setting - String regionSetting = settings.get(CLOUD_AWS.REGION); - if (regionSetting != null) { - regionSetting = regionSetting.toLowerCase(Locale.ENGLISH); - if ("us-east".equals(regionSetting) || "us-east-1".equals(regionSetting)) { - // Default bucket - setting region to null - region = null; - } else if ("us-west".equals(regionSetting) || "us-west-1".equals(regionSetting)) { - region = "us-west-1"; - } else if ("us-west-2".equals(regionSetting)) { - region = "us-west-2"; - } else if ("ap-southeast".equals(regionSetting) || "ap-southeast-1".equals(regionSetting)) { - region = "ap-southeast-1"; - } else if ("ap-southeast-2".equals(regionSetting)) { - region = "ap-southeast-2"; - } else if ("ap-northeast".equals(regionSetting) || "ap-northeast-1".equals(regionSetting)) { - region = "ap-northeast-1"; - } else if ("eu-west".equals(regionSetting) || "eu-west-1".equals(regionSetting)) { - region = "eu-west-1"; - } else if ("eu-central".equals(regionSetting) || "eu-central-1".equals(regionSetting)) { - region = "eu-central-1"; - } else if ("sa-east".equals(regionSetting) || "sa-east-1".equals(regionSetting)) { - region = "sa-east-1"; - } else if ("cn-north".equals(regionSetting) || "cn-north-1".equals(regionSetting)) { - region = "cn-north-1"; - } - } + String endpoint = getValue(repositorySettings, Repository.ENDPOINT_SETTING, Repositories.ENDPOINT_SETTING); + Protocol protocol = getValue(repositorySettings, Repository.PROTOCOL_SETTING, Repositories.PROTOCOL_SETTING); + String region = getValue(repositorySettings, Repository.REGION_SETTING, Repositories.REGION_SETTING); + // If no region is defined either in region, repositories.s3.region, cloud.aws.s3.region or cloud.aws.region + // we fallback to Default bucket - null + if (Strings.isEmpty(region)) { + region = null; } - boolean serverSideEncryption = repositorySettings.settings().getAsBoolean("server_side_encryption", settings.getAsBoolean(REPOSITORY_S3.SERVER_SIDE_ENCRYPTION, false)); - ByteSizeValue bufferSize = repositorySettings.settings().getAsBytesSize("buffer_size", settings.getAsBytesSize(REPOSITORY_S3.BUFFER_SIZE, null)); - Integer maxRetries = repositorySettings.settings().getAsInt("max_retries", settings.getAsInt(REPOSITORY_S3.MAX_RETRIES, 3)); - this.chunkSize = repositorySettings.settings().getAsBytesSize("chunk_size", settings.getAsBytesSize(REPOSITORY_S3.CHUNK_SIZE, new ByteSizeValue(100, ByteSizeUnit.MB))); - this.compress = repositorySettings.settings().getAsBoolean("compress", settings.getAsBoolean(REPOSITORY_S3.COMPRESS, false)); + boolean serverSideEncryption = getValue(repositorySettings, Repository.SERVER_SIDE_ENCRYPTION_SETTING, Repositories.SERVER_SIDE_ENCRYPTION_SETTING); + ByteSizeValue bufferSize = getValue(repositorySettings, Repository.BUFFER_SIZE_SETTING, Repositories.BUFFER_SIZE_SETTING); + Integer maxRetries = getValue(repositorySettings, Repository.MAX_RETRIES_SETTING, Repositories.MAX_RETRIES_SETTING); + this.chunkSize = getValue(repositorySettings, Repository.CHUNK_SIZE_SETTING, Repositories.CHUNK_SIZE_SETTING); + this.compress = getValue(repositorySettings, Repository.COMPRESS_SETTING, Repositories.COMPRESS_SETTING); // Parse and validate the user's S3 Storage Class setting - String storageClass = repositorySettings.settings().get("storage_class", settings.get(REPOSITORY_S3.STORAGE_CLASS, null)); - String cannedACL = repositorySettings.settings().get("canned_acl", settings.get(REPOSITORY_S3.CANNED_ACL, null)); + String storageClass = getValue(repositorySettings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING); + String cannedACL = getValue(repositorySettings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING); logger.debug("using bucket [{}], region [{}], endpoint [{}], protocol [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], max_retries [{}], cannedACL [{}], storageClass [{}]", bucket, region, endpoint, protocol, chunkSize, serverSideEncryption, bufferSize, maxRetries, cannedACL, storageClass); - blobStore = new S3BlobStore(settings, s3Service.client(endpoint, protocol, region, repositorySettings.settings().get("access_key"), repositorySettings.settings().get("secret_key"), maxRetries), + String key = getValue(repositorySettings, Repository.KEY_SETTING, Repositories.KEY_SETTING); + String secret = getValue(repositorySettings, Repository.SECRET_SETTING, Repositories.SECRET_SETTING); + + blobStore = new S3BlobStore(settings, s3Service.client(endpoint, protocol, region, key, secret, maxRetries), bucket, region, serverSideEncryption, bufferSize, maxRetries, cannedACL, storageClass); - String basePath = repositorySettings.settings().get("base_path", settings.get(REPOSITORY_S3.BASE_PATH)); + String basePath = getValue(repositorySettings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING); if (Strings.hasLength(basePath)) { BlobPath path = new BlobPath(); for(String elem : Strings.splitStringToArray(basePath, '/')) { @@ -171,4 +302,13 @@ public class S3Repository extends BlobStoreRepository { return chunkSize; } + public static T getValue(RepositorySettings repositorySettings, + Setting repositorySetting, + Setting repositoriesSetting) { + if (repositorySetting.exists(repositorySettings.settings())) { + return repositorySetting.get(repositorySettings.settings()); + } else { + return repositoriesSetting.get(repositorySettings.globalSettings()); + } + } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AWSSignersTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AWSSignersTests.java index 6346ffe57d4..2e13e04f3c7 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AWSSignersTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AWSSignersTests.java @@ -20,11 +20,24 @@ package org.elasticsearch.cloud.aws; import com.amazonaws.ClientConfiguration; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugin.repository.s3.S3RepositoryPlugin; import org.elasticsearch.test.ESTestCase; +import org.junit.BeforeClass; import static org.hamcrest.CoreMatchers.is; public class AWSSignersTests extends ESTestCase { + + /** + * Starts S3RepositoryPlugin. It's a workaround when you run test from IntelliJ. Otherwise it generates + * java.security.AccessControlException: access denied ("java.lang.RuntimePermission" "accessDeclaredMembers") + */ + @BeforeClass + public static void instantiatePlugin() { + new S3RepositoryPlugin(); + } + public void testSigners() { assertThat(signerTester(null), is(false)); assertThat(signerTester("QueryStringSignerType"), is(true)); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java index bc3706263f7..ec8fb902d66 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java @@ -25,9 +25,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.env.Environment; import org.elasticsearch.plugin.repository.s3.S3RepositoryPlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ThirdParty; +import java.util.Collection; + /** * Base class for AWS tests that require credentials. *

@@ -39,10 +42,9 @@ public abstract class AbstractAwsTestCase extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { - Settings.Builder settings = Settings.builder() + Settings.Builder settings = Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .extendArray("plugin.types", S3RepositoryPlugin.class.getName(), TestAwsS3Service.TestPlugin.class.getName()) .put("cloud.aws.test.random", randomInt()) .put("cloud.aws.test.write_failures", 0.1) .put("cloud.aws.test.read_failures", 0.1); @@ -52,11 +54,16 @@ public abstract class AbstractAwsTestCase extends ESIntegTestCase { if (Strings.hasText(System.getProperty("tests.config"))) { settings.loadFromPath(PathUtils.get(System.getProperty("tests.config"))); } else { - throw new IllegalStateException("to run integration tests, you need to set -Dtest.thirdparty=true and -Dtests.config=/path/to/elasticsearch.yml"); + throw new IllegalStateException("to run integration tests, you need to set -Dtests.thirdparty=true and -Dtests.config=/path/to/elasticsearch.yml"); } } catch (SettingsException exception) { throw new IllegalStateException("your test configuration file is incorrect: " + System.getProperty("tests.config"), exception); } return settings.build(); } + + @Override + protected Collection> nodePlugins() { + return pluginList(S3RepositoryPlugin.class); + } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/RepositoryS3SettingsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/RepositoryS3SettingsTests.java new file mode 100644 index 00000000000..7d881e0dd30 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/RepositoryS3SettingsTests.java @@ -0,0 +1,302 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.aws; + +import com.amazonaws.Protocol; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.RepositorySettings; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.repositories.s3.S3Repository.Repositories; +import static org.elasticsearch.repositories.s3.S3Repository.Repository; +import static org.elasticsearch.repositories.s3.S3Repository.getValue; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isEmptyString; + +public class RepositoryS3SettingsTests extends ESTestCase { + + private static final Settings AWS = Settings.builder() + .put(AwsS3Service.KEY_SETTING.getKey(), "global-key") + .put(AwsS3Service.SECRET_SETTING.getKey(), "global-secret") + .put(AwsS3Service.PROTOCOL_SETTING.getKey(), "https") + .put(AwsS3Service.PROXY_HOST_SETTING.getKey(), "global-proxy-host") + .put(AwsS3Service.PROXY_PORT_SETTING.getKey(), 10000) + .put(AwsS3Service.PROXY_USERNAME_SETTING.getKey(), "global-proxy-username") + .put(AwsS3Service.PROXY_PASSWORD_SETTING.getKey(), "global-proxy-password") + .put(AwsS3Service.SIGNER_SETTING.getKey(), "global-signer") + .put(AwsS3Service.REGION_SETTING.getKey(), "global-region") + .build(); + + private static final Settings S3 = Settings.builder() + .put(AwsS3Service.CLOUD_S3.KEY_SETTING.getKey(), "s3-key") + .put(AwsS3Service.CLOUD_S3.SECRET_SETTING.getKey(), "s3-secret") + .put(AwsS3Service.CLOUD_S3.PROTOCOL_SETTING.getKey(), "http") + .put(AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING.getKey(), "s3-proxy-host") + .put(AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING.getKey(), 20000) + .put(AwsS3Service.CLOUD_S3.PROXY_USERNAME_SETTING.getKey(), "s3-proxy-username") + .put(AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING.getKey(), "s3-proxy-password") + .put(AwsS3Service.CLOUD_S3.SIGNER_SETTING.getKey(), "s3-signer") + .put(AwsS3Service.CLOUD_S3.REGION_SETTING.getKey(), "s3-region") + .put(AwsS3Service.CLOUD_S3.ENDPOINT_SETTING.getKey(), "s3-endpoint") + .build(); + + private static final Settings REPOSITORIES = Settings.builder() + .put(Repositories.KEY_SETTING.getKey(), "repositories-key") + .put(Repositories.SECRET_SETTING.getKey(), "repositories-secret") + .put(Repositories.BUCKET_SETTING.getKey(), "repositories-bucket") + .put(Repositories.PROTOCOL_SETTING.getKey(), "https") + .put(Repositories.REGION_SETTING.getKey(), "repositories-region") + .put(Repositories.ENDPOINT_SETTING.getKey(), "repositories-endpoint") + .put(Repositories.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), true) + .put(Repositories.BUFFER_SIZE_SETTING.getKey(), "6mb") + .put(Repositories.MAX_RETRIES_SETTING.getKey(), 4) + .put(Repositories.CHUNK_SIZE_SETTING.getKey(), "110mb") + .put(Repositories.COMPRESS_SETTING.getKey(), true) + .put(Repositories.STORAGE_CLASS_SETTING.getKey(), "repositories-class") + .put(Repositories.CANNED_ACL_SETTING.getKey(), "repositories-acl") + .put(Repositories.BASE_PATH_SETTING.getKey(), "repositories-basepath") + .build(); + + private static final Settings REPOSITORY = Settings.builder() + .put(Repository.KEY_SETTING.getKey(), "repository-key") + .put(Repository.SECRET_SETTING.getKey(), "repository-secret") + .put(Repository.BUCKET_SETTING.getKey(), "repository-bucket") + .put(Repository.PROTOCOL_SETTING.getKey(), "https") + .put(Repository.REGION_SETTING.getKey(), "repository-region") + .put(Repository.ENDPOINT_SETTING.getKey(), "repository-endpoint") + .put(Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), false) + .put(Repository.BUFFER_SIZE_SETTING.getKey(), "7mb") + .put(Repository.MAX_RETRIES_SETTING.getKey(), 5) + .put(Repository.CHUNK_SIZE_SETTING.getKey(), "120mb") + .put(Repository.COMPRESS_SETTING.getKey(), false) + .put(Repository.STORAGE_CLASS_SETTING.getKey(), "repository-class") + .put(Repository.CANNED_ACL_SETTING.getKey(), "repository-acl") + .put(Repository.BASE_PATH_SETTING.getKey(), "repository-basepath") + .build(); + + /** + * We test when only cloud.aws settings are set + */ + public void testRepositorySettingsGlobalOnly() { + Settings nodeSettings = buildSettings(AWS); + RepositorySettings repositorySettings = new RepositorySettings(nodeSettings, Settings.EMPTY); + assertThat(getValue(repositorySettings, Repository.KEY_SETTING, Repositories.KEY_SETTING), is("global-key")); + assertThat(getValue(repositorySettings, Repository.SECRET_SETTING, Repositories.SECRET_SETTING), is("global-secret")); + assertThat(getValue(repositorySettings, Repository.BUCKET_SETTING, Repositories.BUCKET_SETTING), isEmptyString()); + assertThat(getValue(repositorySettings, Repository.PROTOCOL_SETTING, Repositories.PROTOCOL_SETTING), is(Protocol.HTTPS)); + assertThat(getValue(repositorySettings, Repository.REGION_SETTING, Repositories.REGION_SETTING), is("global-region")); + assertThat(getValue(repositorySettings, Repository.ENDPOINT_SETTING, Repositories.ENDPOINT_SETTING), isEmptyString()); + assertThat(AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING.get(nodeSettings), is("global-proxy-host")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING.get(nodeSettings), is(10000)); + assertThat(AwsS3Service.CLOUD_S3.PROXY_USERNAME_SETTING.get(nodeSettings), is("global-proxy-username")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING.get(nodeSettings), is("global-proxy-password")); + assertThat(AwsS3Service.CLOUD_S3.SIGNER_SETTING.get(nodeSettings), is("global-signer")); + assertThat(getValue(repositorySettings, Repository.SERVER_SIDE_ENCRYPTION_SETTING, Repositories.SERVER_SIDE_ENCRYPTION_SETTING), + is(false)); + assertThat(getValue(repositorySettings, Repository.BUFFER_SIZE_SETTING, Repositories.BUFFER_SIZE_SETTING).getMb(), is(5L)); + assertThat(getValue(repositorySettings, Repository.MAX_RETRIES_SETTING, Repositories.MAX_RETRIES_SETTING), is(3)); + assertThat(getValue(repositorySettings, Repository.CHUNK_SIZE_SETTING, Repositories.CHUNK_SIZE_SETTING).getMb(), is(100L)); + assertThat(getValue(repositorySettings, Repository.COMPRESS_SETTING, Repositories.COMPRESS_SETTING), is(false)); + assertThat(getValue(repositorySettings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING), isEmptyString()); + assertThat(getValue(repositorySettings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING), isEmptyString()); + assertThat(getValue(repositorySettings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING), isEmptyString()); + } + + /** + * We test when cloud.aws settings are overloaded by cloud.aws.s3 settings + */ + public void testRepositorySettingsGlobalOverloadedByS3() { + Settings nodeSettings = buildSettings(AWS, S3); + RepositorySettings repositorySettings = new RepositorySettings(nodeSettings, Settings.EMPTY); + assertThat(getValue(repositorySettings, Repository.KEY_SETTING, Repositories.KEY_SETTING), is("s3-key")); + assertThat(getValue(repositorySettings, Repository.SECRET_SETTING, Repositories.SECRET_SETTING), is("s3-secret")); + assertThat(getValue(repositorySettings, Repository.BUCKET_SETTING, Repositories.BUCKET_SETTING), isEmptyString()); + assertThat(getValue(repositorySettings, Repository.PROTOCOL_SETTING, Repositories.PROTOCOL_SETTING), is(Protocol.HTTP)); + assertThat(getValue(repositorySettings, Repository.REGION_SETTING, Repositories.REGION_SETTING), is("s3-region")); + assertThat(getValue(repositorySettings, Repository.ENDPOINT_SETTING, Repositories.ENDPOINT_SETTING), is("s3-endpoint")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING.get(nodeSettings), is("s3-proxy-host")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING.get(nodeSettings), is(20000)); + assertThat(AwsS3Service.CLOUD_S3.PROXY_USERNAME_SETTING.get(nodeSettings), is("s3-proxy-username")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING.get(nodeSettings), is("s3-proxy-password")); + assertThat(AwsS3Service.CLOUD_S3.SIGNER_SETTING.get(nodeSettings), is("s3-signer")); + assertThat(getValue(repositorySettings, Repository.SERVER_SIDE_ENCRYPTION_SETTING, Repositories.SERVER_SIDE_ENCRYPTION_SETTING), + is(false)); + assertThat(getValue(repositorySettings, Repository.BUFFER_SIZE_SETTING, Repositories.BUFFER_SIZE_SETTING).getMb(), is(5L)); + assertThat(getValue(repositorySettings, Repository.MAX_RETRIES_SETTING, Repositories.MAX_RETRIES_SETTING), is(3)); + assertThat(getValue(repositorySettings, Repository.CHUNK_SIZE_SETTING, Repositories.CHUNK_SIZE_SETTING).getMb(), is(100L)); + assertThat(getValue(repositorySettings, Repository.COMPRESS_SETTING, Repositories.COMPRESS_SETTING), is(false)); + assertThat(getValue(repositorySettings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING), isEmptyString()); + assertThat(getValue(repositorySettings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING), isEmptyString()); + assertThat(getValue(repositorySettings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING), isEmptyString()); + } + + /** + * We test when cloud.aws settings are overloaded by repositories.s3 settings + */ + public void testRepositorySettingsGlobalOverloadedByRepositories() { + Settings nodeSettings = buildSettings(AWS, REPOSITORIES); + RepositorySettings repositorySettings = new RepositorySettings(nodeSettings, Settings.EMPTY); + assertThat(getValue(repositorySettings, Repository.KEY_SETTING, Repositories.KEY_SETTING), is("repositories-key")); + assertThat(getValue(repositorySettings, Repository.SECRET_SETTING, Repositories.SECRET_SETTING), is("repositories-secret")); + assertThat(getValue(repositorySettings, Repository.BUCKET_SETTING, Repositories.BUCKET_SETTING), is("repositories-bucket")); + assertThat(getValue(repositorySettings, Repository.PROTOCOL_SETTING, Repositories.PROTOCOL_SETTING), is(Protocol.HTTPS)); + assertThat(getValue(repositorySettings, Repository.REGION_SETTING, Repositories.REGION_SETTING), is("repositories-region")); + assertThat(getValue(repositorySettings, Repository.ENDPOINT_SETTING, Repositories.ENDPOINT_SETTING), is("repositories-endpoint")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING.get(nodeSettings), is("global-proxy-host")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING.get(nodeSettings), is(10000)); + assertThat(AwsS3Service.CLOUD_S3.PROXY_USERNAME_SETTING.get(nodeSettings), is("global-proxy-username")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING.get(nodeSettings), is("global-proxy-password")); + assertThat(AwsS3Service.CLOUD_S3.SIGNER_SETTING.get(nodeSettings), is("global-signer")); + assertThat(getValue(repositorySettings, Repository.SERVER_SIDE_ENCRYPTION_SETTING, Repositories.SERVER_SIDE_ENCRYPTION_SETTING), + is(true)); + assertThat(getValue(repositorySettings, Repository.BUFFER_SIZE_SETTING, Repositories.BUFFER_SIZE_SETTING).getMb(), is(6L)); + assertThat(getValue(repositorySettings, Repository.MAX_RETRIES_SETTING, Repositories.MAX_RETRIES_SETTING), is(4)); + assertThat(getValue(repositorySettings, Repository.CHUNK_SIZE_SETTING, Repositories.CHUNK_SIZE_SETTING).getMb(), is(110L)); + assertThat(getValue(repositorySettings, Repository.COMPRESS_SETTING, Repositories.COMPRESS_SETTING), is(true)); + assertThat(getValue(repositorySettings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING), + is("repositories-class")); + assertThat(getValue(repositorySettings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING), is("repositories-acl")); + assertThat(getValue(repositorySettings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING), is("repositories-basepath")); + } + + /** + * We test when cloud.aws.s3 settings are overloaded by repositories.s3 settings + */ + public void testRepositorySettingsS3OverloadedByRepositories() { + Settings nodeSettings = buildSettings(AWS, S3, REPOSITORIES); + RepositorySettings repositorySettings = new RepositorySettings(nodeSettings, Settings.EMPTY); + assertThat(getValue(repositorySettings, Repository.KEY_SETTING, Repositories.KEY_SETTING), is("repositories-key")); + assertThat(getValue(repositorySettings, Repository.SECRET_SETTING, Repositories.SECRET_SETTING), is("repositories-secret")); + assertThat(getValue(repositorySettings, Repository.BUCKET_SETTING, Repositories.BUCKET_SETTING), is("repositories-bucket")); + assertThat(getValue(repositorySettings, Repository.PROTOCOL_SETTING, Repositories.PROTOCOL_SETTING), is(Protocol.HTTPS)); + assertThat(getValue(repositorySettings, Repository.REGION_SETTING, Repositories.REGION_SETTING), is("repositories-region")); + assertThat(getValue(repositorySettings, Repository.ENDPOINT_SETTING, Repositories.ENDPOINT_SETTING), is("repositories-endpoint")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING.get(nodeSettings), is("s3-proxy-host")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING.get(nodeSettings), is(20000)); + assertThat(AwsS3Service.CLOUD_S3.PROXY_USERNAME_SETTING.get(nodeSettings), is("s3-proxy-username")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING.get(nodeSettings), is("s3-proxy-password")); + assertThat(AwsS3Service.CLOUD_S3.SIGNER_SETTING.get(nodeSettings), is("s3-signer")); + assertThat(getValue(repositorySettings, Repository.SERVER_SIDE_ENCRYPTION_SETTING, Repositories.SERVER_SIDE_ENCRYPTION_SETTING), + is(true)); + assertThat(getValue(repositorySettings, Repository.BUFFER_SIZE_SETTING, Repositories.BUFFER_SIZE_SETTING).getMb(), is(6L)); + assertThat(getValue(repositorySettings, Repository.MAX_RETRIES_SETTING, Repositories.MAX_RETRIES_SETTING), is(4)); + assertThat(getValue(repositorySettings, Repository.CHUNK_SIZE_SETTING, Repositories.CHUNK_SIZE_SETTING).getMb(), is(110L)); + assertThat(getValue(repositorySettings, Repository.COMPRESS_SETTING, Repositories.COMPRESS_SETTING), is(true)); + assertThat(getValue(repositorySettings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING), + is("repositories-class")); + assertThat(getValue(repositorySettings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING), is("repositories-acl")); + assertThat(getValue(repositorySettings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING), is("repositories-basepath")); + } + + /** + * We test when cloud.aws settings are overloaded by single repository settings + */ + public void testRepositorySettingsGlobalOverloadedByRepository() { + Settings nodeSettings = buildSettings(AWS); + RepositorySettings repositorySettings = new RepositorySettings(nodeSettings, REPOSITORY); + assertThat(getValue(repositorySettings, Repository.KEY_SETTING, Repositories.KEY_SETTING), is("repository-key")); + assertThat(getValue(repositorySettings, Repository.SECRET_SETTING, Repositories.SECRET_SETTING), is("repository-secret")); + assertThat(getValue(repositorySettings, Repository.BUCKET_SETTING, Repositories.BUCKET_SETTING), is("repository-bucket")); + assertThat(getValue(repositorySettings, Repository.PROTOCOL_SETTING, Repositories.PROTOCOL_SETTING), is(Protocol.HTTPS)); + assertThat(getValue(repositorySettings, Repository.REGION_SETTING, Repositories.REGION_SETTING), is("repository-region")); + assertThat(getValue(repositorySettings, Repository.ENDPOINT_SETTING, Repositories.ENDPOINT_SETTING), is("repository-endpoint")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING.get(nodeSettings), is("global-proxy-host")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING.get(nodeSettings), is(10000)); + assertThat(AwsS3Service.CLOUD_S3.PROXY_USERNAME_SETTING.get(nodeSettings), is("global-proxy-username")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING.get(nodeSettings), is("global-proxy-password")); + assertThat(AwsS3Service.CLOUD_S3.SIGNER_SETTING.get(nodeSettings), is("global-signer")); + assertThat(getValue(repositorySettings, Repository.SERVER_SIDE_ENCRYPTION_SETTING, Repositories.SERVER_SIDE_ENCRYPTION_SETTING), + is(false)); + assertThat(getValue(repositorySettings, Repository.BUFFER_SIZE_SETTING, Repositories.BUFFER_SIZE_SETTING).getMb(), is(7L)); + assertThat(getValue(repositorySettings, Repository.MAX_RETRIES_SETTING, Repositories.MAX_RETRIES_SETTING), is(5)); + assertThat(getValue(repositorySettings, Repository.CHUNK_SIZE_SETTING, Repositories.CHUNK_SIZE_SETTING).getMb(), is(120L)); + assertThat(getValue(repositorySettings, Repository.COMPRESS_SETTING, Repositories.COMPRESS_SETTING), is(false)); + assertThat(getValue(repositorySettings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING), + is("repository-class")); + assertThat(getValue(repositorySettings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING), is("repository-acl")); + assertThat(getValue(repositorySettings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING), is("repository-basepath")); + } + + /** + * We test when cloud.aws.s3 settings are overloaded by single repository settings + */ + public void testRepositorySettingsS3OverloadedByRepository() { + Settings nodeSettings = buildSettings(AWS, S3); + RepositorySettings repositorySettings = new RepositorySettings(nodeSettings, REPOSITORY); + assertThat(getValue(repositorySettings, Repository.KEY_SETTING, Repositories.KEY_SETTING), is("repository-key")); + assertThat(getValue(repositorySettings, Repository.SECRET_SETTING, Repositories.SECRET_SETTING), is("repository-secret")); + assertThat(getValue(repositorySettings, Repository.BUCKET_SETTING, Repositories.BUCKET_SETTING), is("repository-bucket")); + assertThat(getValue(repositorySettings, Repository.PROTOCOL_SETTING, Repositories.PROTOCOL_SETTING), is(Protocol.HTTPS)); + assertThat(getValue(repositorySettings, Repository.REGION_SETTING, Repositories.REGION_SETTING), is("repository-region")); + assertThat(getValue(repositorySettings, Repository.ENDPOINT_SETTING, Repositories.ENDPOINT_SETTING), is("repository-endpoint")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING.get(nodeSettings), is("s3-proxy-host")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING.get(nodeSettings), is(20000)); + assertThat(AwsS3Service.CLOUD_S3.PROXY_USERNAME_SETTING.get(nodeSettings), is("s3-proxy-username")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING.get(nodeSettings), is("s3-proxy-password")); + assertThat(AwsS3Service.CLOUD_S3.SIGNER_SETTING.get(nodeSettings), is("s3-signer")); + assertThat(getValue(repositorySettings, Repository.SERVER_SIDE_ENCRYPTION_SETTING, Repositories.SERVER_SIDE_ENCRYPTION_SETTING), + is(false)); + assertThat(getValue(repositorySettings, Repository.BUFFER_SIZE_SETTING, Repositories.BUFFER_SIZE_SETTING).getMb(), is(7L)); + assertThat(getValue(repositorySettings, Repository.MAX_RETRIES_SETTING, Repositories.MAX_RETRIES_SETTING), is(5)); + assertThat(getValue(repositorySettings, Repository.CHUNK_SIZE_SETTING, Repositories.CHUNK_SIZE_SETTING).getMb(), is(120L)); + assertThat(getValue(repositorySettings, Repository.COMPRESS_SETTING, Repositories.COMPRESS_SETTING), is(false)); + assertThat(getValue(repositorySettings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING), + is("repository-class")); + assertThat(getValue(repositorySettings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING), is("repository-acl")); + assertThat(getValue(repositorySettings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING), is("repository-basepath")); + } + + /** + * We test when repositories settings are overloaded by single repository settings + */ + public void testRepositorySettingsRepositoriesOverloadedByRepository() { + Settings nodeSettings = buildSettings(AWS, S3, REPOSITORIES); + RepositorySettings repositorySettings = new RepositorySettings(nodeSettings, REPOSITORY); + assertThat(getValue(repositorySettings, Repository.KEY_SETTING, Repositories.KEY_SETTING), is("repository-key")); + assertThat(getValue(repositorySettings, Repository.SECRET_SETTING, Repositories.SECRET_SETTING), is("repository-secret")); + assertThat(getValue(repositorySettings, Repository.BUCKET_SETTING, Repositories.BUCKET_SETTING), is("repository-bucket")); + assertThat(getValue(repositorySettings, Repository.PROTOCOL_SETTING, Repositories.PROTOCOL_SETTING), is(Protocol.HTTPS)); + assertThat(getValue(repositorySettings, Repository.REGION_SETTING, Repositories.REGION_SETTING), is("repository-region")); + assertThat(getValue(repositorySettings, Repository.ENDPOINT_SETTING, Repositories.ENDPOINT_SETTING), is("repository-endpoint")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING.get(nodeSettings), is("s3-proxy-host")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING.get(nodeSettings), is(20000)); + assertThat(AwsS3Service.CLOUD_S3.PROXY_USERNAME_SETTING.get(nodeSettings), is("s3-proxy-username")); + assertThat(AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING.get(nodeSettings), is("s3-proxy-password")); + assertThat(AwsS3Service.CLOUD_S3.SIGNER_SETTING.get(nodeSettings), is("s3-signer")); + assertThat(getValue(repositorySettings, Repository.SERVER_SIDE_ENCRYPTION_SETTING, Repositories.SERVER_SIDE_ENCRYPTION_SETTING), + is(false)); + assertThat(getValue(repositorySettings, Repository.BUFFER_SIZE_SETTING, Repositories.BUFFER_SIZE_SETTING).getMb(), is(7L)); + assertThat(getValue(repositorySettings, Repository.MAX_RETRIES_SETTING, Repositories.MAX_RETRIES_SETTING), is(5)); + assertThat(getValue(repositorySettings, Repository.CHUNK_SIZE_SETTING, Repositories.CHUNK_SIZE_SETTING).getMb(), is(120L)); + assertThat(getValue(repositorySettings, Repository.COMPRESS_SETTING, Repositories.COMPRESS_SETTING), is(false)); + assertThat(getValue(repositorySettings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING), + is("repository-class")); + assertThat(getValue(repositorySettings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING), is("repository-acl")); + assertThat(getValue(repositorySettings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING), is("repository-basepath")); + } + + private Settings buildSettings(Settings... global) { + Settings.Builder builder = Settings.builder(); + for (Settings settings : global) { + builder.put(settings); + } + return builder.build(); + } +} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAwsS3Service.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAwsS3Service.java index da2fcd2b4d3..47e884d73bd 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAwsS3Service.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAwsS3Service.java @@ -18,11 +18,11 @@ */ package org.elasticsearch.cloud.aws; +import com.amazonaws.Protocol; import com.amazonaws.services.s3.AmazonS3; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.plugins.Plugin; import java.util.IdentityHashMap; @@ -51,17 +51,7 @@ public class TestAwsS3Service extends InternalAwsS3Service { @Override - public synchronized AmazonS3 client() { - return cachedWrapper(super.client()); - } - - @Override - public synchronized AmazonS3 client(String endpoint, String protocol, String region, String account, String key) { - return cachedWrapper(super.client(endpoint, protocol, region, account, key)); - } - - @Override - public synchronized AmazonS3 client(String endpoint, String protocol, String region, String account, String key, Integer maxRetries) { + public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries) { return cachedWrapper(super.client(endpoint, protocol, region, account, key, maxRetries)); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java index 151daaab2a8..8cc53d669b3 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java @@ -19,6 +19,7 @@ package org.elasticsearch.repositories.s3; +import com.amazonaws.Protocol; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.ObjectListing; @@ -32,14 +33,12 @@ import org.elasticsearch.cloud.aws.AbstractAwsTestCase; import org.elasticsearch.cloud.aws.AwsS3Service; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugin.repository.s3.S3RepositoryPlugin; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.snapshots.SnapshotMissingException; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.store.MockFSDirectoryService; import org.junit.After; import org.junit.Before; @@ -54,43 +53,43 @@ import static org.hamcrest.Matchers.notNullValue; */ @ClusterScope(scope = Scope.SUITE, numDataNodes = 2, numClientNodes = 0, transportClientRatio = 0.0) abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase { + @Override - public Settings indexSettings() { - // During restore we frequently restore index to exactly the same state it was before, that might cause the same - // checksum file to be written twice during restore operation - return Settings.builder().put(super.indexSettings()) - .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE_SETTING.getKey(), false) - .put(MockFSDirectoryService.RANDOM_NO_DELETE_OPEN_FILE_SETTING.getKey(), false) - .put("cloud.enabled", true) - .put("plugin.types", S3RepositoryPlugin.class.getName()) - .put("repositories.s3.base_path", basePath) + public Settings nodeSettings(int nodeOrdinal) { + // nodeSettings is called before `wipeBefore()` so we need to define basePath here + globalBasePath = "repo-" + randomInt(); + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put(S3Repository.Repositories.BASE_PATH_SETTING.getKey(), globalBasePath) .build(); } private String basePath; + private String globalBasePath; @Before public final void wipeBefore() { wipeRepositories(); basePath = "repo-" + randomInt(); cleanRepositoryFiles(basePath); + cleanRepositoryFiles(globalBasePath); } @After public final void wipeAfter() { wipeRepositories(); cleanRepositoryFiles(basePath); + cleanRepositoryFiles(globalBasePath); } @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211") public void testSimpleWorkflow() { Client client = client(); Settings.Builder settings = Settings.settingsBuilder() - .put("chunk_size", randomIntBetween(1000, 10000)); + .put(S3Repository.Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000)); // We sometime test getting the base_path from node settings using repositories.s3.base_path if (usually()) { - settings.put("base_path", basePath); + settings.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath); } logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath); @@ -166,9 +165,9 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") .setType("s3").setSettings(Settings.settingsBuilder() - .put("base_path", basePath) - .put("chunk_size", randomIntBetween(1000, 10000)) - .put("server_side_encryption", true) + .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) + .put(S3Repository.Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000)) + .put(S3Repository.Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), true) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); @@ -196,11 +195,12 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase Settings settings = internalCluster().getInstance(Settings.class); Settings bucket = settings.getByPrefix("repositories.s3."); AmazonS3 s3Client = internalCluster().getInstance(AwsS3Service.class).client( - null, - null, - bucket.get("region", settings.get("repositories.s3.region")), - bucket.get("access_key", settings.get("cloud.aws.access_key")), - bucket.get("secret_key", settings.get("cloud.aws.secret_key"))); + null, + S3Repository.Repositories.PROTOCOL_SETTING.get(settings), + S3Repository.Repositories.REGION_SETTING.get(settings), + S3Repository.Repositories.KEY_SETTING.get(settings), + S3Repository.Repositories.SECRET_SETTING.get(settings), + null); String bucketName = bucket.get("bucket"); logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath); @@ -260,26 +260,37 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase try { client.admin().cluster().preparePutRepository("test-repo") .setType("s3").setSettings(Settings.settingsBuilder() - .put("base_path", basePath) - .put("bucket", bucketSettings.get("bucket")) + .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) + .put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket")) ).get(); fail("repository verification should have raise an exception!"); } catch (RepositoryVerificationException e) { } } + public void testRepositoryWithBasePath() { + Client client = client(); + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") + .setType("s3").setSettings(Settings.settingsBuilder() + .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + assertRepositoryIsOperational(client, "test-repo"); + } + public void testRepositoryWithCustomCredentials() { Client client = client(); Settings bucketSettings = internalCluster().getInstance(Settings.class).getByPrefix("repositories.s3.private-bucket."); logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") .setType("s3").setSettings(Settings.settingsBuilder() - .put("base_path", basePath) - .put("region", bucketSettings.get("region")) - .put("access_key", bucketSettings.get("access_key")) - .put("secret_key", bucketSettings.get("secret_key")) - .put("bucket", bucketSettings.get("bucket")) - ).get(); + .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) + .put(S3Repository.Repository.REGION_SETTING.getKey(), bucketSettings.get("region")) + .put(S3Repository.Repository.KEY_SETTING.getKey(), bucketSettings.get("access_key")) + .put(S3Repository.Repository.SECRET_SETTING.getKey(), bucketSettings.get("secret_key")) + .put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket")) + ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); assertRepositoryIsOperational(client, "test-repo"); @@ -292,12 +303,12 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase logger.info("--> creating s3 repostoriy with endpoint [{}], bucket[{}] and path [{}]", bucketSettings.get("endpoint"), bucketSettings.get("bucket"), basePath); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") .setType("s3").setSettings(Settings.settingsBuilder() - .put("bucket", bucketSettings.get("bucket")) - .put("endpoint", bucketSettings.get("endpoint")) - .put("access_key", bucketSettings.get("access_key")) - .put("secret_key", bucketSettings.get("secret_key")) - .put("base_path", basePath) - ).get(); + .put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket")) + .put(S3Repository.Repository.ENDPOINT_SETTING.getKey(), bucketSettings.get("endpoint")) + .put(S3Repository.Repository.KEY_SETTING.getKey(), bucketSettings.get("access_key")) + .put(S3Repository.Repository.SECRET_SETTING.getKey(), bucketSettings.get("secret_key")) + .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) + ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); assertRepositoryIsOperational(client, "test-repo"); } @@ -313,8 +324,8 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase try { client.admin().cluster().preparePutRepository("test-repo") .setType("s3").setSettings(Settings.settingsBuilder() - .put("base_path", basePath) - .put("bucket", bucketSettings.get("bucket")) + .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) + .put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket")) // Below setting intentionally omitted to assert bucket is not available in default region. // .put("region", privateBucketSettings.get("region")) ).get(); @@ -331,10 +342,10 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") .setType("s3").setSettings(Settings.settingsBuilder() - .put("base_path", basePath) - .put("bucket", bucketSettings.get("bucket")) - .put("region", bucketSettings.get("region")) - ).get(); + .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) + .put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket")) + .put(S3Repository.Repository.REGION_SETTING.getKey(), bucketSettings.get("region")) + ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); assertRepositoryIsOperational(client, "test-repo"); @@ -348,7 +359,7 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") .setType("s3").setSettings(Settings.settingsBuilder() - .put("base_path", basePath) + .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); @@ -369,8 +380,8 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase logger.info("--> creating s3 repository without any path"); PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo") .setType("s3").setSettings(Settings.settingsBuilder() - .put("base_path", basePath) - ).get(); + .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) + ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); try { @@ -454,17 +465,17 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase settings.getByPrefix("repositories.s3.external-bucket.") }; for (Settings bucket : buckets) { - String endpoint = bucket.get("endpoint", settings.get("repositories.s3.endpoint")); - String protocol = bucket.get("protocol", settings.get("repositories.s3.protocol")); - String region = bucket.get("region", settings.get("repositories.s3.region")); - String accessKey = bucket.get("access_key", settings.get("cloud.aws.access_key")); - String secretKey = bucket.get("secret_key", settings.get("cloud.aws.secret_key")); + String endpoint = bucket.get("endpoint", S3Repository.Repositories.ENDPOINT_SETTING.get(settings)); + Protocol protocol = S3Repository.Repositories.PROTOCOL_SETTING.get(settings); + String region = bucket.get("region", S3Repository.Repositories.REGION_SETTING.get(settings)); + String accessKey = bucket.get("access_key", S3Repository.Repositories.KEY_SETTING.get(settings)); + String secretKey = bucket.get("secret_key", S3Repository.Repositories.SECRET_SETTING.get(settings)); String bucketName = bucket.get("bucket"); // We check that settings has been set in elasticsearch.yml integration test file // as described in README assertThat("Your settings in elasticsearch.yml are incorrects. Check README file.", bucketName, notNullValue()); - AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(endpoint, protocol, region, accessKey, secretKey); + AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(endpoint, protocol, region, accessKey, secretKey, null); try { ObjectListing prevListing = null; //From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html From de7b152736dc30fd47bb4f8cc1d7b5bc61afc498 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 11 Feb 2016 12:09:30 +0100 Subject: [PATCH 07/22] Fix test --- .../java/org/elasticsearch/index/IndexModuleTests.java | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 01cd056ed8e..55c0c85a889 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -88,7 +88,7 @@ public class IndexModuleTests extends ESTestCase { private Environment environment; private NodeEnvironment nodeEnvironment; private NodeServicesProvider nodeServicesProvider; - private IndicesQueryCache indicesQueryCache = new IndicesQueryCache(settings); + private IndicesQueryCache indicesQueryCache; private IndexService.ShardStoreDeleter deleter = new IndexService.ShardStoreDeleter() { @Override @@ -122,6 +122,7 @@ public class IndexModuleTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); + indicesQueryCache = new IndicesQueryCache(settings); indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); index = indexSettings.getIndex(); environment = new Environment(settings); @@ -134,10 +135,8 @@ public class IndexModuleTests extends ESTestCase { public void tearDown() throws Exception { super.tearDown(); nodeEnvironment.close(); - nodeServicesProvider.getThreadPool().shutdown(); - if (nodeServicesProvider.getThreadPool().awaitTermination(10, TimeUnit.SECONDS) == false) { - nodeServicesProvider.getThreadPool().shutdownNow(); - } + indicesQueryCache.close(); + ThreadPool.terminate(nodeServicesProvider.getThreadPool(), 10, TimeUnit.SECONDS); } public void testWrapperIsBound() throws IOException { From 94f19d7e3761b86a443c9303cc7d5192b8bc3465 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 9 Feb 2016 11:59:10 +0100 Subject: [PATCH 08/22] Reuse existing allocation id for primary shard allocation Closes #16530 --- .../cluster/routing/AllocationId.java | 7 + .../cluster/routing/RoutingNodes.java | 15 +- .../cluster/routing/ShardRouting.java | 10 +- .../allocator/BalancedShardsAllocator.java | 4 +- .../AbstractAllocateAllocationCommand.java | 2 +- .../gateway/PrimaryShardAllocator.java | 162 +++++++++--------- .../gateway/ReplicaShardAllocator.java | 2 +- ...ransportNodesListGatewayStartedShards.java | 23 +++ .../cluster/routing/AllocationIdTests.java | 10 +- .../routing/RandomShardRoutingMutator.java | 2 +- .../cluster/routing/ShardRoutingHelper.java | 6 +- .../cluster/routing/ShardRoutingTests.java | 2 +- .../cluster/routing/UnassignedInfoTests.java | 2 +- .../allocation/BalanceConfigurationTests.java | 20 +-- .../gateway/PrimaryShardAllocatorTests.java | 7 +- 15 files changed, 156 insertions(+), 118 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java b/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java index 528ed8b1c3f..a5e96e60e64 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java @@ -96,6 +96,13 @@ public class AllocationId implements ToXContent { return new AllocationId(Strings.randomBase64UUID(), null); } + /** + * Creates a new allocation id for initializing allocation based on an existing id. + */ + public static AllocationId newInitializing(String existingAllocationId) { + return new AllocationId(existingAllocationId, null); + } + /** * Creates a new allocation id for the target initializing shard that is the result * of a relocation. diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 02bcea4ff2d..c1a5f3ff208 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.index.Index; @@ -420,11 +421,13 @@ public class RoutingNodes implements Iterable { /** * Moves a shard from unassigned to initialize state + * + * @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated. */ - public void initialize(ShardRouting shard, String nodeId, long expectedSize) { + public void initialize(ShardRouting shard, String nodeId, @Nullable String existingAllocationId, long expectedSize) { ensureMutable(); assert shard.unassigned() : shard; - shard.initialize(nodeId, expectedSize); + shard.initialize(nodeId, existingAllocationId, expectedSize); node(nodeId).add(shard); inactiveShardCount++; if (shard.primary()) { @@ -692,10 +695,12 @@ public class RoutingNodes implements Iterable { /** * Initializes the current unassigned shard and moves it from the unassigned list. + * + * @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated. */ - public void initialize(String nodeId, long expectedShardSize) { + public void initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize) { innerRemove(); - nodes.initialize(new ShardRouting(current), nodeId, expectedShardSize); + nodes.initialize(new ShardRouting(current), nodeId, existingAllocationId, expectedShardSize); } /** @@ -711,7 +716,7 @@ public class RoutingNodes implements Iterable { /** * Unsupported operation, just there for the interface. Use {@link #removeAndIgnore()} or - * {@link #initialize(String, long)}. + * {@link #initialize(String, String, long)}. */ @Override public void remove() { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 7535aa1226e..336b6547de4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -410,14 +410,20 @@ public final class ShardRouting implements Streamable, ToXContent { /** * Initializes an unassigned shard on a node. + * + * @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated. */ - void initialize(String nodeId, long expectedShardSize) { + void initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize) { ensureNotFrozen(); assert state == ShardRoutingState.UNASSIGNED : this; assert relocatingNodeId == null : this; state = ShardRoutingState.INITIALIZING; currentNodeId = nodeId; - allocationId = AllocationId.newInitializing(); + if (existingAllocationId == null) { + allocationId = AllocationId.newInitializing(); + } else { + allocationId = AllocationId.newInitializing(existingAllocationId); + } this.expectedShardSize = expectedShardSize; } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 15c303a2f70..e12020cfa74 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -702,7 +702,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards if (logger.isTraceEnabled()) { logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); } - routingNodes.initialize(shard, minNode.getNodeId(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); + routingNodes.initialize(shard, minNode.getNodeId(), null, allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); changed = true; continue; // don't add to ignoreUnassigned } else { @@ -790,7 +790,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards routingNodes.relocate(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); } else { - routingNodes.initialize(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); + routingNodes.initialize(candidate, minNode.getNodeId(), null, allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); } return true; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java index 5a13b3b9683..3a89507871e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java @@ -242,7 +242,7 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom if (shardRoutingChanges != null) { shardRoutingChanges.accept(unassigned); } - it.initialize(routingNode.nodeId(), allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); + it.initialize(routingNode.nodeId(), null, allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); return; } assert false : "shard to initialize not found in list of unassigned shards"; diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index 012b33d7571..8809f68853b 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -32,15 +32,14 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import org.elasticsearch.index.shard.ShardStateMetaData; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; -import java.util.HashMap; import java.util.LinkedList; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -98,7 +97,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { continue; } - final AsyncShardFetch.FetchResult shardState = fetchData(shard, allocation); + final AsyncShardFetch.FetchResult shardState = fetchData(shard, allocation); if (shardState.hasData() == false) { logger.trace("{}: ignoring allocation, still fetching shard started state", shard); allocation.setHasPendingAsyncFetch(); @@ -110,7 +109,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { final boolean snapshotRestore = shard.restoreSource() != null; final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData); - final NodesResult nodesResult; + final NodeShardsResult nodeShardsResult; final boolean enoughAllocationsFound; if (lastActiveAllocationIds.isEmpty()) { @@ -118,20 +117,20 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { // when we load an old index (after upgrading cluster) or restore a snapshot of an old index // fall back to old version-based allocation mode // Note that once the shard has been active, lastActiveAllocationIds will be non-empty - nodesResult = buildVersionBasedNodes(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState); + nodeShardsResult = buildVersionBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState); if (snapshotRestore || recoverOnAnyNode) { - enoughAllocationsFound = nodesResult.allocationsFound > 0; + enoughAllocationsFound = nodeShardsResult.allocationsFound > 0; } else { - enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(shard, indexMetaData, nodesResult); + enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult); } - logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_3_0_0, nodesResult.allocationsFound, shard); + logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_3_0_0, nodeShardsResult.allocationsFound, shard); } else { assert lastActiveAllocationIds.isEmpty() == false; // use allocation ids to select nodes - nodesResult = buildAllocationIdBasedNodes(shard, snapshotRestore || recoverOnAnyNode, + nodeShardsResult = buildAllocationIdBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), lastActiveAllocationIds, shardState); - enoughAllocationsFound = nodesResult.allocationsFound > 0; - logger.debug("[{}][{}]: found {} allocations of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodesResult.allocationsFound, shard, lastActiveAllocationIds); + enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0; + logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodeShardsResult.orderedAllocationCandidates.size(), shard, lastActiveAllocationIds); } if (enoughAllocationsFound == false){ @@ -144,25 +143,25 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { } else { // we can't really allocate, so ignore it and continue unassignedIterator.removeAndIgnore(); - logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodesResult.allocationsFound); + logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodeShardsResult.allocationsFound); } continue; } - final NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesResult.nodes); - if (nodesToAllocate.yesNodes.isEmpty() == false) { - DiscoveryNode node = nodesToAllocate.yesNodes.get(0); - logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node); + final NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodeShardsResult.orderedAllocationCandidates); + if (nodesToAllocate.yesNodeShards.isEmpty() == false) { + NodeGatewayStartedShards nodeShardState = nodesToAllocate.yesNodeShards.get(0); + logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode()); changed = true; - unassignedIterator.initialize(node.id(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); - } else if (nodesToAllocate.throttleNodes.isEmpty() == true && nodesToAllocate.noNodes.isEmpty() == false) { - DiscoveryNode node = nodesToAllocate.noNodes.get(0); - logger.debug("[{}][{}]: forcing allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node); + unassignedIterator.initialize(nodeShardState.getNode().id(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); + } else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) { + NodeGatewayStartedShards nodeShardState = nodesToAllocate.noNodeShards.get(0); + logger.debug("[{}][{}]: forcing allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode()); changed = true; - unassignedIterator.initialize(node.id(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); + unassignedIterator.initialize(nodeShardState.getNode().id(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); } else { // we are throttling this, but we have enough to allocate to this node, ignore it for now - logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodes); + logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodeShards); unassignedIterator.removeAndIgnore(); } } @@ -174,11 +173,12 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { * lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but * entries with matching allocation id are always at the front of the list. */ - protected NodesResult buildAllocationIdBasedNodes(ShardRouting shard, boolean matchAnyShard, Set ignoreNodes, - Set lastActiveAllocationIds, AsyncShardFetch.FetchResult shardState) { - LinkedList matchingNodes = new LinkedList<>(); - LinkedList nonMatchingNodes = new LinkedList<>(); - for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : shardState.getData().values()) { + protected NodeShardsResult buildAllocationIdBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set ignoreNodes, + Set lastActiveAllocationIds, AsyncShardFetch.FetchResult shardState) { + LinkedList matchingNodeShardStates = new LinkedList<>(); + LinkedList nonMatchingNodeShardStates = new LinkedList<>(); + int numberOfAllocationsFound = 0; + for (NodeGatewayStartedShards nodeShardState : shardState.getData().values()) { DiscoveryNode node = nodeShardState.getNode(); String allocationId = nodeShardState.allocationId(); @@ -199,36 +199,37 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { } if (allocationId != null) { + numberOfAllocationsFound++; if (lastActiveAllocationIds.contains(allocationId)) { if (nodeShardState.primary()) { - matchingNodes.addFirst(node); + matchingNodeShardStates.addFirst(nodeShardState); } else { - matchingNodes.addLast(node); + matchingNodeShardStates.addLast(nodeShardState); } } else if (matchAnyShard) { if (nodeShardState.primary()) { - nonMatchingNodes.addFirst(node); + nonMatchingNodeShardStates.addFirst(nodeShardState); } else { - nonMatchingNodes.addLast(node); + nonMatchingNodeShardStates.addLast(nodeShardState); } } } } - List nodes = new ArrayList<>(); - nodes.addAll(matchingNodes); - nodes.addAll(nonMatchingNodes); + List nodeShardStates = new ArrayList<>(); + nodeShardStates.addAll(matchingNodeShardStates); + nodeShardStates.addAll(nonMatchingNodeShardStates); if (logger.isTraceEnabled()) { - logger.trace("{} candidates for allocation: {}", shard, nodes.stream().map(DiscoveryNode::name).collect(Collectors.joining(", "))); + logger.trace("{} candidates for allocation: {}", shard, nodeShardStates.stream().map(s -> s.getNode().getName()).collect(Collectors.joining(", "))); } - return new NodesResult(nodes, nodes.size()); + return new NodeShardsResult(nodeShardStates, numberOfAllocationsFound); } /** * used by old version-based allocation */ - private boolean isEnoughVersionBasedAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesResult nodesAndVersions) { + private boolean isEnoughVersionBasedAllocationsFound(IndexMetaData indexMetaData, NodeShardsResult nodeShardsResult) { // check if the counts meets the minimum set int requiredAllocation = 1; // if we restore from a repository one copy is more then enough @@ -253,45 +254,44 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { requiredAllocation = Integer.parseInt(initialShards); } - return nodesAndVersions.allocationsFound >= requiredAllocation; + return nodeShardsResult.allocationsFound >= requiredAllocation; } /** - * Split the list of nodes to lists of yes/no/throttle nodes based on allocation deciders + * Split the list of node shard states into groups yes/no/throttle based on allocation deciders */ - private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, List nodes) { - List yesNodes = new ArrayList<>(); - List throttledNodes = new ArrayList<>(); - List noNodes = new ArrayList<>(); - for (DiscoveryNode discoNode : nodes) { - RoutingNode node = allocation.routingNodes().node(discoNode.id()); + private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, List nodeShardStates) { + List yesNodeShards = new ArrayList<>(); + List throttledNodeShards = new ArrayList<>(); + List noNodeShards = new ArrayList<>(); + for (NodeGatewayStartedShards nodeShardState : nodeShardStates) { + RoutingNode node = allocation.routingNodes().node(nodeShardState.getNode().id()); if (node == null) { continue; } Decision decision = allocation.deciders().canAllocate(shard, node, allocation); if (decision.type() == Decision.Type.THROTTLE) { - throttledNodes.add(discoNode); + throttledNodeShards.add(nodeShardState); } else if (decision.type() == Decision.Type.NO) { - noNodes.add(discoNode); + noNodeShards.add(nodeShardState); } else { - yesNodes.add(discoNode); + yesNodeShards.add(nodeShardState); } } - return new NodesToAllocate(Collections.unmodifiableList(yesNodes), Collections.unmodifiableList(throttledNodes), Collections.unmodifiableList(noNodes)); + return new NodesToAllocate(Collections.unmodifiableList(yesNodeShards), Collections.unmodifiableList(throttledNodeShards), Collections.unmodifiableList(noNodeShards)); } /** - * Builds a list of nodes. If matchAnyShard is set to false, only nodes that have the highest shard version - * are added to the list. Otherwise, any node that has a shard is added to the list, but entries with highest - * version are always at the front of the list. + * Builds a list of previously started shards. If matchAnyShard is set to false, only shards with the highest shard version are added to + * the list. Otherwise, any existing shard is added to the list, but entries with highest version are always at the front of the list. */ - NodesResult buildVersionBasedNodes(ShardRouting shard, boolean matchAnyShard, Set ignoreNodes, - AsyncShardFetch.FetchResult shardState) { - final Map nodesWithVersion = new HashMap<>(); + NodeShardsResult buildVersionBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set ignoreNodes, + AsyncShardFetch.FetchResult shardState) { + final List allocationCandidates = new ArrayList<>(); int numberOfAllocationsFound = 0; long highestVersion = ShardStateMetaData.NO_VERSION; - for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : shardState.getData().values()) { + for (NodeGatewayStartedShards nodeShardState : shardState.getData().values()) { long version = nodeShardState.legacyVersion(); DiscoveryNode node = nodeShardState.getNode(); @@ -315,38 +315,29 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { if (version > highestVersion) { highestVersion = version; if (matchAnyShard == false) { - nodesWithVersion.clear(); + allocationCandidates.clear(); } - nodesWithVersion.put(node, version); + allocationCandidates.add(nodeShardState); } else if (version == highestVersion) { // If the candidate is the same, add it to the // list, but keep the current candidate - nodesWithVersion.put(node, version); + allocationCandidates.add(nodeShardState); } } } - // Now that we have a map of nodes to versions along with the - // number of allocations found (and not ignored), we need to sort - // it so the node with the highest version is at the beginning - List nodesWithHighestVersion = new ArrayList<>(); - nodesWithHighestVersion.addAll(nodesWithVersion.keySet()); - CollectionUtil.timSort(nodesWithHighestVersion, new Comparator() { - @Override - public int compare(DiscoveryNode o1, DiscoveryNode o2) { - return Long.compare(nodesWithVersion.get(o2), nodesWithVersion.get(o1)); - } - }); + // sort array so the node with the highest version is at the beginning + CollectionUtil.timSort(allocationCandidates, Comparator.comparing(NodeGatewayStartedShards::legacyVersion).reversed()); if (logger.isTraceEnabled()) { StringBuilder sb = new StringBuilder("["); - for (DiscoveryNode n : nodesWithVersion.keySet()) { - sb.append("[").append(n.getName()).append("]").append(" -> ").append(nodesWithVersion.get(n)).append(", "); + for (NodeGatewayStartedShards n : allocationCandidates) { + sb.append("[").append(n.getNode().getName()).append("]").append(" -> ").append(n.legacyVersion()).append(", "); } sb.append("]"); logger.trace("{} candidates for allocation: {}", shard, sb.toString()); } - return new NodesResult(Collections.unmodifiableList(nodesWithHighestVersion), numberOfAllocationsFound); + return new NodeShardsResult(Collections.unmodifiableList(allocationCandidates), numberOfAllocationsFound); } /** @@ -358,27 +349,28 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { && IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING.get(metaData.getSettings(), this.settings); } - protected abstract AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation); + protected abstract AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation); - static class NodesResult { - public final List nodes; + static class NodeShardsResult { + public final List orderedAllocationCandidates; public final int allocationsFound; - public NodesResult(List nodes, int allocationsFound) { - this.nodes = nodes; + public NodeShardsResult(List orderedAllocationCandidates, int allocationsFound) { + this.orderedAllocationCandidates = orderedAllocationCandidates; this.allocationsFound = allocationsFound; } } static class NodesToAllocate { - final List yesNodes; - final List throttleNodes; - final List noNodes; + final List yesNodeShards; + final List throttleNodeShards; + final List noNodeShards; - public NodesToAllocate(List yesNodes, List throttleNodes, List noNodes) { - this.yesNodes = yesNodes; - this.throttleNodes = throttleNodes; - this.noNodes = noNodes; + public NodesToAllocate(List yesNodeShards, List throttleNodeShards, + List noNodeShards) { + this.yesNodeShards = yesNodeShards; + this.throttleNodeShards = throttleNodeShards; + this.noNodeShards = noNodeShards; } } } diff --git a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index 2c25ce50365..e2b6f0d27ed 100644 --- a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -173,7 +173,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node()); // we found a match changed = true; - unassignedIterator.initialize(nodeWithHighestMatch.nodeId(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); + unassignedIterator.initialize(nodeWithHighestMatch.nodeId(), null, allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); } } else if (matchingNodes.hasAnyData() == false) { // if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation of the replica shard needs to be delayed diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index 79e9c53a72e..03f8dc81703 100644 --- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -335,5 +335,28 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction out.writeBoolean(false); } } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + NodeGatewayStartedShards that = (NodeGatewayStartedShards) o; + + if (legacyVersion != that.legacyVersion) return false; + if (primary != that.primary) return false; + if (allocationId != null ? !allocationId.equals(that.allocationId) : that.allocationId != null) return false; + return storeException != null ? storeException.equals(that.storeException) : that.storeException == null; + + } + + @Override + public int hashCode() { + int result = Long.hashCode(legacyVersion); + result = 31 * result + (allocationId != null ? allocationId.hashCode() : 0); + result = 31 * result + (primary ? 1 : 0); + result = 31 * result + (storeException != null ? storeException.hashCode() : 0); + return result; + } } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java index 00acf1ebabc..53a0faf0705 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java @@ -42,7 +42,7 @@ public class AllocationIdTests extends ESTestCase { assertThat(shard.allocationId(), nullValue()); logger.info("-- initialize the shard"); - shard.initialize("node1", -1); + shard.initialize("node1", null, -1); AllocationId allocationId = shard.allocationId(); assertThat(allocationId, notNullValue()); assertThat(allocationId.getId(), notNullValue()); @@ -59,7 +59,7 @@ public class AllocationIdTests extends ESTestCase { public void testSuccessfulRelocation() { logger.info("-- build started shard"); ShardRouting shard = ShardRouting.newUnassigned(new Index("test","_na_"), 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); - shard.initialize("node1", -1); + shard.initialize("node1", null, -1); shard.moveToStarted(); AllocationId allocationId = shard.allocationId(); @@ -82,7 +82,7 @@ public class AllocationIdTests extends ESTestCase { public void testCancelRelocation() { logger.info("-- build started shard"); ShardRouting shard = ShardRouting.newUnassigned(new Index("test","_na_"), 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); - shard.initialize("node1", -1); + shard.initialize("node1", null, -1); shard.moveToStarted(); AllocationId allocationId = shard.allocationId(); @@ -102,7 +102,7 @@ public class AllocationIdTests extends ESTestCase { public void testMoveToUnassigned() { logger.info("-- build started shard"); ShardRouting shard = ShardRouting.newUnassigned(new Index("test","_na_"), 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); - shard.initialize("node1", -1); + shard.initialize("node1", null, -1); shard.moveToStarted(); logger.info("-- move to unassigned"); @@ -113,7 +113,7 @@ public class AllocationIdTests extends ESTestCase { public void testReinitializing() { logger.info("-- build started shard"); ShardRouting shard = ShardRouting.newUnassigned(new Index("test","_na_"), 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); - shard.initialize("node1", -1); + shard.initialize("node1", null, -1); shard.moveToStarted(); AllocationId allocationId = shard.allocationId(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java b/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java index 72ecc171eed..2b581a9d82e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java @@ -42,7 +42,7 @@ public final class RandomShardRoutingMutator { break; case 1: if (shardRouting.unassigned()) { - shardRouting.initialize(randomFrom(nodes), -1); + shardRouting.initialize(randomFrom(nodes), null, -1); } break; case 2: diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java b/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java index 5d3466b5e43..7299cbdf590 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java @@ -25,7 +25,7 @@ package org.elasticsearch.cluster.routing; public class ShardRoutingHelper { public static void relocate(ShardRouting routing, String nodeId) { - relocate(routing, nodeId, -1); + relocate(routing, nodeId, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); } public static void relocate(ShardRouting routing, String nodeId, long expectedByteSize) { @@ -37,11 +37,11 @@ public class ShardRoutingHelper { } public static void initialize(ShardRouting routing, String nodeId) { - initialize(routing, nodeId, -1); + initialize(routing, nodeId, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); } public static void initialize(ShardRouting routing, String nodeId, long expectedSize) { - routing.initialize(nodeId, expectedSize); + routing.initialize(nodeId, null, expectedSize); } public static void reinit(ShardRouting routing) { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java index dd38b0c7ea3..4e70761b169 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java @@ -254,7 +254,7 @@ public class ShardRoutingTests extends ESTestCase { } try { - routing.initialize("boom", -1); + routing.initialize("boom", null, -1); fail("must be frozen"); } catch (IllegalStateException ex) { // expected diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index 95f69a768a8..ba73181ad97 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -187,7 +187,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { ShardRouting shard = TestShardRouting.newShardRouting("test", 1, null, null, null, true, ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); ShardRouting mutable = new ShardRouting(shard); assertThat(mutable.unassignedInfo(), notNullValue()); - mutable.initialize("test_node", -1); + mutable.initialize("test_node", null, -1); assertThat(mutable.state(), equalTo(ShardRoutingState.INITIALIZING)); assertThat(mutable.unassignedInfo(), notNullValue()); mutable.moveToStarted(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index 08cbdc09fe0..68706d96df7 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -365,37 +365,37 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { switch (sr.id()) { case 0: if (sr.primary()) { - allocation.routingNodes().initialize(sr, "node1", -1); + allocation.routingNodes().initialize(sr, "node1", null, -1); } else { - allocation.routingNodes().initialize(sr, "node0", -1); + allocation.routingNodes().initialize(sr, "node0", null, -1); } break; case 1: if (sr.primary()) { - allocation.routingNodes().initialize(sr, "node1", -1); + allocation.routingNodes().initialize(sr, "node1", null, -1); } else { - allocation.routingNodes().initialize(sr, "node2", -1); + allocation.routingNodes().initialize(sr, "node2", null, -1); } break; case 2: if (sr.primary()) { - allocation.routingNodes().initialize(sr, "node3", -1); + allocation.routingNodes().initialize(sr, "node3", null, -1); } else { - allocation.routingNodes().initialize(sr, "node2", -1); + allocation.routingNodes().initialize(sr, "node2", null, -1); } break; case 3: if (sr.primary()) { - allocation.routingNodes().initialize(sr, "node3", -1); + allocation.routingNodes().initialize(sr, "node3", null, -1); } else { - allocation.routingNodes().initialize(sr, "node1", -1); + allocation.routingNodes().initialize(sr, "node1", null, -1); } break; case 4: if (sr.primary()) { - allocation.routingNodes().initialize(sr, "node2", -1); + allocation.routingNodes().initialize(sr, "node2", null, -1); } else { - allocation.routingNodes().initialize(sr, "node0", -1); + allocation.routingNodes().initialize(sr, "node0", null, -1); } break; } diff --git a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index e2830b1e226..d1cd8d974c6 100644 --- a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -161,7 +161,8 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { */ public void testFoundAllocationAndAllocating() { final RoutingAllocation allocation; - if (randomBoolean()) { + boolean useAllocationIds = randomBoolean(); + if (useAllocationIds) { allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1"); testAllocator.addData(node1, 1, "allocId1", randomBoolean()); } else { @@ -173,6 +174,10 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.id())); + if (useAllocationIds) { + // check that allocation id is reused + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo("allocId1")); + } } /** From efb4582eb15c17f4546f4f5bb2947bbad4059c67 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Tue, 9 Feb 2016 12:34:28 +0100 Subject: [PATCH 09/22] Move reminder settings in NettyHttpServerTransport to the new infra Some bw incompatible setting changes: http.netty.http.blocking_server -> http.tcp.blocking_server http.netty.host (removed, we just have http.host) http.netty.bind_host (removed, we just have http.bind_host) http.netty.publish_host (removed, we just have http.publish_host) http.netty.tcp_no_delay -> http.tcp.no_delay http.netty.tcp_keep_alive -> http.tcp.keep_alive http.netty.reuse_address -> http.txp.reuse_address http.netty.tcp_send_buffer_size -> http.tcp.send_buffer_size http.netty.tcp_receive_buffer_size -> http.tcp.receive_buffer_size Closes #16531 --- .../common/network/NetworkService.java | 2 +- .../common/settings/ClusterSettings.java | 491 +++++++++--------- .../http/HttpTransportSettings.java | 9 + .../http/netty/NettyHttpServerTransport.java | 99 ++-- 4 files changed, 329 insertions(+), 272 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java index 1debc6960af..5e8dbc4dcad 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java @@ -104,7 +104,7 @@ public class NetworkService extends AbstractComponent { */ public InetAddress[] resolveBindHostAddresses(String bindHosts[]) throws IOException { // first check settings - if (bindHosts == null) { + if (bindHosts == null || bindHosts.length == 0) { if (GLOBAL_NETWORK_BINDHOST_SETTING.exists(settings) || GLOBAL_NETWORK_HOST_SETTING.exists(settings)) { // if we have settings use them (we have a fallback to GLOBAL_NETWORK_HOST_SETTING inline bindHosts = GLOBAL_NETWORK_BINDHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY); diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 033423b08ef..142fbacfb01 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.bootstrap.BootstrapSettings; import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClientNodesService; @@ -58,6 +59,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.http.netty.NettyHttpServerTransport; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.indices.IndicesService; @@ -88,7 +90,6 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.transport.netty.NettyTransport; import org.elasticsearch.tribe.TribeService; -import org.elasticsearch.bootstrap.BootstrapSettings; import java.util.Arrays; import java.util.Collections; @@ -141,7 +142,8 @@ public final class ClusterSettings extends AbstractScopedSettings { String component = key.substring("logger.".length()); if ("_root".equals(component)) { final String rootLevel = value.get(key); - ESLoggerFactory.getRootLogger().setLevel(rootLevel == null ? ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings).name() : rootLevel); + ESLoggerFactory.getRootLogger().setLevel(rootLevel == null ? ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings) + .name() : rootLevel); } else { ESLoggerFactory.getLogger(component).setLevel(value.get(key)); } @@ -150,240 +152,253 @@ public final class ClusterSettings extends AbstractScopedSettings { } public static Set> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>( - Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, - TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client - TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT, - TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME, - TransportClientNodesService.CLIENT_TRANSPORT_SNIFF, - AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, - BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, - BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, - BalancedShardsAllocator.THRESHOLD_SETTING, - ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, - ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, - EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, - EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, - FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, - FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, - FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, - FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING, - FsRepository.REPOSITORIES_COMPRESS_SETTING, - FsRepository.REPOSITORIES_LOCATION_SETTING, - IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, - IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, - IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING, - IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING, - IndicesTTLService.INDICES_TTL_INTERVAL_SETTING, - MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, - MetaData.SETTING_READ_ONLY_SETTING, - RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, - RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, - RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, - RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, - RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, - RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, - ThreadPool.THREADPOOL_GROUP_SETTING, - ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, - ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, - ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING, - ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, - InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, - InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, - SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, - DestructiveOperations.REQUIRES_NAME_SETTING, - DiscoverySettings.PUBLISH_TIMEOUT_SETTING, - DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING, - DiscoverySettings.COMMIT_TIMEOUT_SETTING, - DiscoverySettings.NO_MASTER_BLOCK_SETTING, - GatewayService.EXPECTED_DATA_NODES_SETTING, - GatewayService.EXPECTED_MASTER_NODES_SETTING, - GatewayService.EXPECTED_NODES_SETTING, - GatewayService.RECOVER_AFTER_DATA_NODES_SETTING, - GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING, - GatewayService.RECOVER_AFTER_NODES_SETTING, - GatewayService.RECOVER_AFTER_TIME_SETTING, - NetworkModule.HTTP_ENABLED, - NetworkModule.HTTP_TYPE_SETTING, - NetworkModule.TRANSPORT_SERVICE_TYPE_SETTING, - NetworkModule.TRANSPORT_TYPE_SETTING, - HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS, - HttpTransportSettings.SETTING_CORS_ENABLED, - HttpTransportSettings.SETTING_CORS_MAX_AGE, - HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED, - HttpTransportSettings.SETTING_PIPELINING, - HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN, - HttpTransportSettings.SETTING_HTTP_PORT, - HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT, - HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS, - HttpTransportSettings.SETTING_HTTP_COMPRESSION, - HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL, - HttpTransportSettings.SETTING_CORS_ALLOW_METHODS, - HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS, - HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED, - HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH, - HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE, - HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE, - HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH, - HttpTransportSettings.SETTING_HTTP_RESET_COOKIES, - HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, - HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, - InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, - SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, - ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, - TransportService.TRACE_LOG_EXCLUDE_SETTING, - TransportService.TRACE_LOG_INCLUDE_SETTING, - TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, - ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, - InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING, - HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, - HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, - Transport.TRANSPORT_TCP_COMPRESS, - TransportSettings.TRANSPORT_PROFILES_SETTING, - TransportSettings.HOST, - TransportSettings.PUBLISH_HOST, - TransportSettings.BIND_HOST, - TransportSettings.PUBLISH_PORT, - TransportSettings.PORT, - NettyTransport.WORKER_COUNT, - NettyTransport.CONNECTIONS_PER_NODE_RECOVERY, - NettyTransport.CONNECTIONS_PER_NODE_BULK, - NettyTransport.CONNECTIONS_PER_NODE_REG, - NettyTransport.CONNECTIONS_PER_NODE_STATE, - NettyTransport.CONNECTIONS_PER_NODE_PING, - NettyTransport.PING_SCHEDULE, - NettyTransport.TCP_BLOCKING_CLIENT, - NettyTransport.TCP_CONNECT_TIMEOUT, - NettyTransport.NETTY_MAX_CUMULATION_BUFFER_CAPACITY, - NettyTransport.NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, - NettyTransport.NETTY_RECEIVE_PREDICTOR_SIZE, - NettyTransport.NETTY_RECEIVE_PREDICTOR_MIN, - NettyTransport.NETTY_RECEIVE_PREDICTOR_MAX, - NetworkService.NETWORK_SERVER, - NettyTransport.NETTY_BOSS_COUNT, - NettyTransport.TCP_NO_DELAY, - NettyTransport.TCP_KEEP_ALIVE, - NettyTransport.TCP_REUSE_ADDRESS, - NettyTransport.TCP_SEND_BUFFER_SIZE, - NettyTransport.TCP_RECEIVE_BUFFER_SIZE, - NettyTransport.TCP_BLOCKING_SERVER, - NetworkService.GLOBAL_NETWORK_HOST_SETTING, - NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING, - NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING, - NetworkService.TcpSettings.TCP_NO_DELAY, - NetworkService.TcpSettings.TCP_KEEP_ALIVE, - NetworkService.TcpSettings.TCP_REUSE_ADDRESS, - NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, - NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, - NetworkService.TcpSettings.TCP_BLOCKING, - NetworkService.TcpSettings.TCP_BLOCKING_SERVER, - NetworkService.TcpSettings.TCP_BLOCKING_CLIENT, - NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT, - IndexSettings.QUERY_STRING_ANALYZE_WILDCARD, - IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD, - PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING, - ScriptService.SCRIPT_CACHE_SIZE_SETTING, - ScriptService.SCRIPT_CACHE_EXPIRE_SETTING, - ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING, - IndicesService.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING, - IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY, - IndicesRequestCache.INDICES_CACHE_QUERY_SIZE, - IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE, - IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL, - HunspellService.HUNSPELL_LAZY_LOAD, - HunspellService.HUNSPELL_IGNORE_CASE, - HunspellService.HUNSPELL_DICTIONARY_OPTIONS, - IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, - Environment.PATH_CONF_SETTING, - Environment.PATH_DATA_SETTING, - Environment.PATH_HOME_SETTING, - Environment.PATH_LOGS_SETTING, - Environment.PATH_PLUGINS_SETTING, - Environment.PATH_REPO_SETTING, - Environment.PATH_SCRIPTS_SETTING, - Environment.PATH_SHARED_DATA_SETTING, - Environment.PIDFILE_SETTING, - DiscoveryService.DISCOVERY_SEED_SETTING, - DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING, - DiscoveryModule.DISCOVERY_TYPE_SETTING, - DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING, - FaultDetection.PING_RETRIES_SETTING, - FaultDetection.PING_TIMEOUT_SETTING, - FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING, - FaultDetection.PING_INTERVAL_SETTING, - FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING, - ZenDiscovery.PING_TIMEOUT_SETTING, - ZenDiscovery.JOIN_TIMEOUT_SETTING, - ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING, - ZenDiscovery.JOIN_RETRY_DELAY_SETTING, - ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING, - ZenDiscovery.SEND_LEAVE_REQUEST_SETTING, - ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING, - ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING, - ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING, - UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING, - UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING, - SearchService.DEFAULT_KEEPALIVE_SETTING, - SearchService.KEEPALIVE_INTERVAL_SETTING, - Node.WRITE_PORTS_FIELD_SETTING, - Node.NODE_NAME_SETTING, - Node.NODE_CLIENT_SETTING, - Node.NODE_DATA_SETTING, - Node.NODE_MASTER_SETTING, - Node.NODE_LOCAL_SETTING, - Node.NODE_MODE_SETTING, - Node.NODE_INGEST_SETTING, - Node.NODE_ATTRIBUTES, - URLRepository.ALLOWED_URLS_SETTING, - URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING, - URLRepository.REPOSITORIES_URL_SETTING, - URLRepository.SUPPORTED_PROTOCOLS_SETTING, - TransportMasterNodeReadAction.FORCE_LOCAL_SETTING, - AutoCreateIndex.AUTO_CREATE_INDEX_SETTING, - BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX, - ClusterName.CLUSTER_NAME_SETTING, - Client.CLIENT_TYPE_SETTING_S, - InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, - ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING, - EsExecutors.PROCESSORS_SETTING, - ThreadContext.DEFAULT_HEADERS_SETTING, - ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING, - ESLoggerFactory.LOG_LEVEL_SETTING, - TribeService.BLOCKS_METADATA_SETTING, - TribeService.BLOCKS_WRITE_SETTING, - TribeService.BLOCKS_WRITE_INDICES_SETTING, - TribeService.BLOCKS_READ_INDICES_SETTING, - TribeService.BLOCKS_METADATA_INDICES_SETTING, - TribeService.ON_CONFLICT_SETTING, - TribeService.TRIBE_NAME_SETTING, - NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING, - NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING, - NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH, - OsService.REFRESH_INTERVAL_SETTING, - ProcessService.REFRESH_INTERVAL_SETTING, - JvmService.REFRESH_INTERVAL_SETTING, - FsService.REFRESH_INTERVAL_SETTING, - JvmGcMonitorService.ENABLED_SETTING, - JvmGcMonitorService.REFRESH_INTERVAL_SETTING, - JvmGcMonitorService.GC_SETTING, - PageCacheRecycler.LIMIT_HEAP_SETTING, - PageCacheRecycler.WEIGHT_BYTES_SETTING, - PageCacheRecycler.WEIGHT_INT_SETTING, - PageCacheRecycler.WEIGHT_LONG_SETTING, - PageCacheRecycler.WEIGHT_OBJECTS_SETTING, - PageCacheRecycler.TYPE_SETTING, - PluginsService.MANDATORY_SETTING, - BootstrapSettings.SECURITY_MANAGER_ENABLED_SETTING, - BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING, - BootstrapSettings.MLOCKALL_SETTING, - BootstrapSettings.SECCOMP_SETTING, - BootstrapSettings.CTRLHANDLER_SETTING - ))); + Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, + TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind + // of odd here and should only be valid if we are a transport client + TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT, + TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME, + TransportClientNodesService.CLIENT_TRANSPORT_SNIFF, + AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, + BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.THRESHOLD_SETTING, + ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, + ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, + FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING, + FsRepository.REPOSITORIES_COMPRESS_SETTING, + FsRepository.REPOSITORIES_LOCATION_SETTING, + IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, + IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, + IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING, + IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING, + IndicesTTLService.INDICES_TTL_INTERVAL_SETTING, + MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, + MetaData.SETTING_READ_ONLY_SETTING, + RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, + RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, + RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, + RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, + ThreadPool.THREADPOOL_GROUP_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, + SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, + DestructiveOperations.REQUIRES_NAME_SETTING, + DiscoverySettings.PUBLISH_TIMEOUT_SETTING, + DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING, + DiscoverySettings.COMMIT_TIMEOUT_SETTING, + DiscoverySettings.NO_MASTER_BLOCK_SETTING, + GatewayService.EXPECTED_DATA_NODES_SETTING, + GatewayService.EXPECTED_MASTER_NODES_SETTING, + GatewayService.EXPECTED_NODES_SETTING, + GatewayService.RECOVER_AFTER_DATA_NODES_SETTING, + GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING, + GatewayService.RECOVER_AFTER_NODES_SETTING, + GatewayService.RECOVER_AFTER_TIME_SETTING, + NetworkModule.HTTP_ENABLED, + NetworkModule.HTTP_TYPE_SETTING, + NetworkModule.TRANSPORT_SERVICE_TYPE_SETTING, + NetworkModule.TRANSPORT_TYPE_SETTING, + HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS, + HttpTransportSettings.SETTING_CORS_ENABLED, + HttpTransportSettings.SETTING_CORS_MAX_AGE, + HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED, + HttpTransportSettings.SETTING_PIPELINING, + HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN, + HttpTransportSettings.SETTING_HTTP_PORT, + HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT, + HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS, + HttpTransportSettings.SETTING_HTTP_COMPRESSION, + HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL, + HttpTransportSettings.SETTING_CORS_ALLOW_METHODS, + HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS, + HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED, + HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH, + HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE, + HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE, + HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH, + HttpTransportSettings.SETTING_HTTP_RESET_COOKIES, + NettyHttpServerTransport.SETTING_HTTP_NETTY_MAX_CUMULATION_BUFFER_CAPACITY, + NettyHttpServerTransport.SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, + NettyHttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, + NettyHttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN, + NettyHttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX, + NettyHttpServerTransport.SETTING_HTTP_WORKER_COUNT, + NettyHttpServerTransport.SETTING_HTTP_TCP_NO_DELAY, + NettyHttpServerTransport.SETTING_HTTP_TCP_KEEP_ALIVE, + NettyHttpServerTransport.SETTING_HTTP_TCP_BLOCKING_SERVER, + NettyHttpServerTransport.SETTING_HTTP_TCP_REUSE_ADDRESS, + NettyHttpServerTransport.SETTING_HTTP_TCP_SEND_BUFFER_SIZE, + NettyHttpServerTransport.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE, + HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, + InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, + TransportService.TRACE_LOG_EXCLUDE_SETTING, + TransportService.TRACE_LOG_INCLUDE_SETTING, + TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, + ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, + InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, + Transport.TRANSPORT_TCP_COMPRESS, + TransportSettings.TRANSPORT_PROFILES_SETTING, + TransportSettings.HOST, + TransportSettings.PUBLISH_HOST, + TransportSettings.BIND_HOST, + TransportSettings.PUBLISH_PORT, + TransportSettings.PORT, + NettyTransport.WORKER_COUNT, + NettyTransport.CONNECTIONS_PER_NODE_RECOVERY, + NettyTransport.CONNECTIONS_PER_NODE_BULK, + NettyTransport.CONNECTIONS_PER_NODE_REG, + NettyTransport.CONNECTIONS_PER_NODE_STATE, + NettyTransport.CONNECTIONS_PER_NODE_PING, + NettyTransport.PING_SCHEDULE, + NettyTransport.TCP_BLOCKING_CLIENT, + NettyTransport.TCP_CONNECT_TIMEOUT, + NettyTransport.NETTY_MAX_CUMULATION_BUFFER_CAPACITY, + NettyTransport.NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, + NettyTransport.NETTY_RECEIVE_PREDICTOR_SIZE, + NettyTransport.NETTY_RECEIVE_PREDICTOR_MIN, + NettyTransport.NETTY_RECEIVE_PREDICTOR_MAX, + NetworkService.NETWORK_SERVER, + NettyTransport.NETTY_BOSS_COUNT, + NettyTransport.TCP_NO_DELAY, + NettyTransport.TCP_KEEP_ALIVE, + NettyTransport.TCP_REUSE_ADDRESS, + NettyTransport.TCP_SEND_BUFFER_SIZE, + NettyTransport.TCP_RECEIVE_BUFFER_SIZE, + NettyTransport.TCP_BLOCKING_SERVER, + NetworkService.GLOBAL_NETWORK_HOST_SETTING, + NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING, + NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING, + NetworkService.TcpSettings.TCP_NO_DELAY, + NetworkService.TcpSettings.TCP_KEEP_ALIVE, + NetworkService.TcpSettings.TCP_REUSE_ADDRESS, + NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, + NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, + NetworkService.TcpSettings.TCP_BLOCKING, + NetworkService.TcpSettings.TCP_BLOCKING_SERVER, + NetworkService.TcpSettings.TCP_BLOCKING_CLIENT, + NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT, + IndexSettings.QUERY_STRING_ANALYZE_WILDCARD, + IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD, + PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING, + ScriptService.SCRIPT_CACHE_SIZE_SETTING, + ScriptService.SCRIPT_CACHE_EXPIRE_SETTING, + ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING, + IndicesService.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING, + IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY, + IndicesRequestCache.INDICES_CACHE_QUERY_SIZE, + IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE, + IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL, + HunspellService.HUNSPELL_LAZY_LOAD, + HunspellService.HUNSPELL_IGNORE_CASE, + HunspellService.HUNSPELL_DICTIONARY_OPTIONS, + IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, + Environment.PATH_CONF_SETTING, + Environment.PATH_DATA_SETTING, + Environment.PATH_HOME_SETTING, + Environment.PATH_LOGS_SETTING, + Environment.PATH_PLUGINS_SETTING, + Environment.PATH_REPO_SETTING, + Environment.PATH_SCRIPTS_SETTING, + Environment.PATH_SHARED_DATA_SETTING, + Environment.PIDFILE_SETTING, + DiscoveryService.DISCOVERY_SEED_SETTING, + DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING, + DiscoveryModule.DISCOVERY_TYPE_SETTING, + DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING, + FaultDetection.PING_RETRIES_SETTING, + FaultDetection.PING_TIMEOUT_SETTING, + FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING, + FaultDetection.PING_INTERVAL_SETTING, + FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING, + ZenDiscovery.PING_TIMEOUT_SETTING, + ZenDiscovery.JOIN_TIMEOUT_SETTING, + ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING, + ZenDiscovery.JOIN_RETRY_DELAY_SETTING, + ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING, + ZenDiscovery.SEND_LEAVE_REQUEST_SETTING, + ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING, + ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING, + ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING, + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING, + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING, + SearchService.DEFAULT_KEEPALIVE_SETTING, + SearchService.KEEPALIVE_INTERVAL_SETTING, + Node.WRITE_PORTS_FIELD_SETTING, + Node.NODE_NAME_SETTING, + Node.NODE_CLIENT_SETTING, + Node.NODE_DATA_SETTING, + Node.NODE_MASTER_SETTING, + Node.NODE_LOCAL_SETTING, + Node.NODE_MODE_SETTING, + Node.NODE_INGEST_SETTING, + Node.NODE_ATTRIBUTES, + URLRepository.ALLOWED_URLS_SETTING, + URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING, + URLRepository.REPOSITORIES_URL_SETTING, + URLRepository.SUPPORTED_PROTOCOLS_SETTING, + TransportMasterNodeReadAction.FORCE_LOCAL_SETTING, + AutoCreateIndex.AUTO_CREATE_INDEX_SETTING, + BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX, + ClusterName.CLUSTER_NAME_SETTING, + Client.CLIENT_TYPE_SETTING_S, + InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, + ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING, + EsExecutors.PROCESSORS_SETTING, + ThreadContext.DEFAULT_HEADERS_SETTING, + ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING, + ESLoggerFactory.LOG_LEVEL_SETTING, + TribeService.BLOCKS_METADATA_SETTING, + TribeService.BLOCKS_WRITE_SETTING, + TribeService.BLOCKS_WRITE_INDICES_SETTING, + TribeService.BLOCKS_READ_INDICES_SETTING, + TribeService.BLOCKS_METADATA_INDICES_SETTING, + TribeService.ON_CONFLICT_SETTING, + TribeService.TRIBE_NAME_SETTING, + NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING, + NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING, + NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH, + OsService.REFRESH_INTERVAL_SETTING, + ProcessService.REFRESH_INTERVAL_SETTING, + JvmService.REFRESH_INTERVAL_SETTING, + FsService.REFRESH_INTERVAL_SETTING, + JvmGcMonitorService.ENABLED_SETTING, + JvmGcMonitorService.REFRESH_INTERVAL_SETTING, + JvmGcMonitorService.GC_SETTING, + PageCacheRecycler.LIMIT_HEAP_SETTING, + PageCacheRecycler.WEIGHT_BYTES_SETTING, + PageCacheRecycler.WEIGHT_INT_SETTING, + PageCacheRecycler.WEIGHT_LONG_SETTING, + PageCacheRecycler.WEIGHT_OBJECTS_SETTING, + PageCacheRecycler.TYPE_SETTING, + PluginsService.MANDATORY_SETTING, + BootstrapSettings.SECURITY_MANAGER_ENABLED_SETTING, + BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING, + BootstrapSettings.MLOCKALL_SETTING, + BootstrapSettings.SECCOMP_SETTING, + BootstrapSettings.CTRLHANDLER_SETTING + ))); } diff --git a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java index c5a1844f7ff..0e362615f0c 100644 --- a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java +++ b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -25,6 +25,11 @@ import org.elasticsearch.common.transport.PortsRange; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.common.settings.Setting.listSetting; + public final class HttpTransportSettings { public static final Setting SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER); @@ -37,6 +42,10 @@ public final class HttpTransportSettings { public static final Setting SETTING_PIPELINING_MAX_EVENTS = Setting.intSetting("http.pipelining.max_events", 10000, false, Scope.CLUSTER); public static final Setting SETTING_HTTP_COMPRESSION = Setting.boolSetting("http.compression", false, false, Scope.CLUSTER); public static final Setting SETTING_HTTP_COMPRESSION_LEVEL = Setting.intSetting("http.compression_level", 6, false, Scope.CLUSTER); + public static final Setting> SETTING_HTTP_HOST = listSetting("http.host", emptyList(), s -> s, false, Scope.CLUSTER); + public static final Setting> SETTING_HTTP_PUBLISH_HOST = listSetting("http.publish_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER); + public static final Setting> SETTING_HTTP_BIND_HOST = listSetting("http.bind_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER); + public static final Setting SETTING_HTTP_PORT = new Setting("http.port", "9200-9300", PortsRange::new, false, Scope.CLUSTER); public static final Setting SETTING_HTTP_PUBLISH_PORT = Setting.intSetting("http.publish_port", 0, 0, false, Scope.CLUSTER); public static final Setting SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER); diff --git a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java index 280ef711dc9..79927c27632 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java +++ b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java @@ -20,13 +20,13 @@ package org.elasticsearch.http.netty; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.netty.NettyUtils; import org.elasticsearch.common.netty.OpenChannelsHandler; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress; @@ -50,8 +50,8 @@ import org.elasticsearch.http.netty.cors.CorsConfigBuilder; import org.elasticsearch.http.netty.cors.CorsHandler; import org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler; import org.elasticsearch.monitor.jvm.JvmInfo; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.rest.support.RestUtils; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BindTransportException; import org.jboss.netty.bootstrap.ServerBootstrap; import org.jboss.netty.channel.AdaptiveReceiveBufferSizePredictorFactory; @@ -81,19 +81,16 @@ import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Pattern; +import static org.elasticsearch.common.settings.Setting.boolSetting; +import static org.elasticsearch.common.settings.Setting.byteSizeSetting; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; -import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING; -import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE; -import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY; -import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE; -import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_REUSE_ADDRESS; -import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_BIND_HOST; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED; @@ -102,6 +99,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONT import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PORT; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_HOST; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES; import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING; @@ -117,6 +115,52 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent SETTING_HTTP_NETTY_MAX_CUMULATION_BUFFER_CAPACITY = + Setting.byteSizeSetting("http.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); + public static Setting SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = + Setting.intSetting("http.netty.max_composite_buffer_components", -1, false, Setting.Scope.CLUSTER); + + public static final Setting SETTING_HTTP_WORKER_COUNT = new Setting<>("http.netty.worker_count", + (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), + (s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), false, Setting.Scope.CLUSTER); + + public static final Setting SETTING_HTTP_TCP_NO_DELAY = boolSetting("http.tcp_no_delay", NetworkService.TcpSettings + .TCP_NO_DELAY, false, + Setting.Scope.CLUSTER); + public static final Setting SETTING_HTTP_TCP_KEEP_ALIVE = boolSetting("http.tcp.keep_alive", NetworkService.TcpSettings + .TCP_KEEP_ALIVE, false, + Setting.Scope.CLUSTER); + public static final Setting SETTING_HTTP_TCP_BLOCKING_SERVER = boolSetting("http.tcp.blocking_server", NetworkService + .TcpSettings.TCP_BLOCKING_SERVER, + false, Setting.Scope.CLUSTER); + public static final Setting SETTING_HTTP_TCP_REUSE_ADDRESS = boolSetting("http.tcp.reuse_address", NetworkService + .TcpSettings.TCP_REUSE_ADDRESS, + false, Setting.Scope.CLUSTER); + + public static final Setting SETTING_HTTP_TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("http.tcp.send_buffer_size", + NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, false, Setting.Scope.CLUSTER); + public static final Setting SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("http.tcp" + + ".receive_buffer_size", NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, false, Setting.Scope.CLUSTER); + public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( + "transport.netty.receive_predictor_size", + settings -> { + long defaultReceiverPredictor = 512 * 1024; + if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) { + // we can guess a better default... + long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / SETTING_HTTP_WORKER_COUNT.get + (settings)); + defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024)); + } + return new ByteSizeValue(defaultReceiverPredictor).toString(); + }, false, Setting.Scope.CLUSTER); + public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting("http.netty" + + ".receive_predictor_min", + SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER); + public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting("http.netty" + + ".receive_predictor_max", + SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER); + + protected final NetworkService networkService; protected final BigArrays bigArrays; @@ -175,47 +219,36 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent 0) { - // we can guess a better default... - long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / workerCount); - defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024)); - } // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one - ByteSizeValue receivePredictorMin = settings.getAsBytesSize("http.netty.receive_predictor_min", settings.getAsBytesSize("http.netty.receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor))); - ByteSizeValue receivePredictorMax = settings.getAsBytesSize("http.netty.receive_predictor_max", settings.getAsBytesSize("http.netty.receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor))); + ByteSizeValue receivePredictorMin = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN.get(settings); + ByteSizeValue receivePredictorMax = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX.get(settings); if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) { receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes()); } else { @@ -479,7 +512,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent= 0) { if (transport.maxCumulationBufferCapacity.bytes() > Integer.MAX_VALUE) { requestDecoder.setMaxCumulationBufferCapacity(Integer.MAX_VALUE); } else { From cb50e73f7c7c78927df9365cad768c28053b14cd Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 11 Feb 2016 15:13:25 +0100 Subject: [PATCH 10/22] Add missing try with resources in InstallPluginCommandTest, this should fix the build on windows. --- .../org/elasticsearch/plugins/InstallPluginCommandTests.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index c512cc5a7b8..0c37d7bb0ee 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.plugins; import java.io.IOException; +import java.io.InputStream; import java.net.MalformedURLException; import java.net.URL; import java.nio.charset.StandardCharsets; @@ -203,7 +204,9 @@ public class InstallPluginCommandTests extends ESTestCase { Path pluginDir = createTempDir(); String pluginZip = createPlugin("fake", pluginDir); Path pluginZipWithSpaces = createTempFile("foo bar", ".zip"); - Files.copy(new URL(pluginZip).openStream(), pluginZipWithSpaces, StandardCopyOption.REPLACE_EXISTING); + try (InputStream in = new URL(pluginZip).openStream()) { + Files.copy(in, pluginZipWithSpaces, StandardCopyOption.REPLACE_EXISTING); + } installPlugin(pluginZipWithSpaces.toUri().toURL().toString(), env); assertPlugin("fake", pluginDir, env); } From d538dd64c28c39a7eace380b50e999d116984152 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 11 Feb 2016 16:30:34 +0100 Subject: [PATCH 11/22] Refactor IndicesRequestCache to make it testable. This commit moves IndicesRequestCache into o.e.indics and makes all API in this class package private. All references to SearchReqeust, SearchContext etc. have been factored out and relevant glue code has been added to IndicesService. The IndicesRequestCache is not a simple class without any hard dependencies on ThreadPool nor SearchService or IndexShard. This now allows to add unittests. This commit also removes two settings `indices.requests.cache.clean_interval` and `indices.fielddata.cache.clean_interval` in favor of `indices.cache.clean_interval` which cleans both caches. --- .../resources/checkstyle_suppressions.xml | 3 - .../TransportClearIndicesCacheAction.java | 5 +- .../admin/indices/stats/CommonStats.java | 5 +- .../common/settings/ClusterSettings.java | 7 +- .../common/settings/IndexScopedSettings.java | 2 +- .../org/elasticsearch/index/IndexModule.java | 2 +- .../cache/query/index/IndexQueryCache.java | 2 +- .../cache/request/ShardRequestCache.java | 24 +- .../index/query/QueryBuilders.java | 2 +- .../index/query/TermsQueryBuilder.java | 3 +- .../index/query/TermsQueryParser.java | 2 +- .../elasticsearch/index/shard/IndexShard.java | 2 +- .../{cache/query => }/IndicesQueryCache.java | 2 +- .../indices/IndicesRequestCache.java | 337 +++++++++++++ .../elasticsearch/indices/IndicesService.java | 189 +++++++- .../{cache/query/terms => }/TermsLookup.java | 9 +- .../cache/request/IndicesRequestCache.java | 443 ------------------ .../elasticsearch/search/SearchService.java | 9 +- .../elasticsearch/index/IndexModuleTests.java | 3 +- .../index/query/TermsQueryBuilderTests.java | 2 +- .../query => }/IndicesQueryCacheTests.java | 4 +- .../query => }/IndicesRequestCacheIT.java | 13 +- .../indices/IndicesRequestCacheTests.java | 366 +++++++++++++++ .../query/terms => }/TermsLookupTests.java | 3 +- .../indices/stats/IndexStatsIT.java | 5 +- .../search/query/SearchQueryIT.java | 6 +- .../ContextAndHeaderTransportIT.java | 2 +- docs/reference/migration/migrate_3_0.asciidoc | 6 + .../elasticsearch/test/ESIntegTestCase.java | 2 +- 29 files changed, 934 insertions(+), 526 deletions(-) rename core/src/main/java/org/elasticsearch/indices/{cache/query => }/IndicesQueryCache.java (99%) create mode 100644 core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java rename core/src/main/java/org/elasticsearch/indices/{cache/query/terms => }/TermsLookup.java (95%) delete mode 100644 core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java rename core/src/test/java/org/elasticsearch/indices/{cache/query => }/IndicesQueryCacheTests.java (99%) rename core/src/test/java/org/elasticsearch/indices/{cache/query => }/IndicesRequestCacheIT.java (90%) create mode 100644 core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java rename core/src/test/java/org/elasticsearch/indices/{cache/query/terms => }/TermsLookupTests.java (97%) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index b824ad94998..ca026e92d80 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -649,8 +649,6 @@ - - @@ -1309,7 +1307,6 @@ - diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index 16002d36448..bc229d72b1b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -98,7 +97,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc } if (request.requestCache()) { clearedAtLeastOne = true; - indicesService.getIndicesRequestCache().clear(shard); + indicesService.clearRequestCache(shard); } if (request.recycler()) { logger.debug("Clear CacheRecycler on index [{}]", service.index()); @@ -114,7 +113,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc } else { service.cache().clear("api"); service.fieldData().clear(); - indicesService.getIndicesRequestCache().clear(shard); + indicesService.clearRequestCache(shard); } } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index 47fb8d8356a..c1c29780a47 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.cache.request.RequestCacheStats; import org.elasticsearch.index.engine.SegmentsStats; @@ -41,13 +40,11 @@ import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.index.suggest.stats.SuggestStats; import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.index.warmer.WarmerStats; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.search.suggest.completion.CompletionStats; import java.io.IOException; diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 142fbacfb01..84542ae73ff 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -65,8 +65,8 @@ import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.HunspellService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; -import org.elasticsearch.indices.cache.request.IndicesRequestCache; +import org.elasticsearch.indices.IndicesQueryCache; +import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.store.IndicesStore; @@ -307,11 +307,10 @@ public final class ClusterSettings extends AbstractScopedSettings { ScriptService.SCRIPT_CACHE_SIZE_SETTING, ScriptService.SCRIPT_CACHE_EXPIRE_SETTING, ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING, - IndicesService.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING, + IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING, IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY, IndicesRequestCache.INDICES_CACHE_QUERY_SIZE, IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE, - IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL, HunspellService.HUNSPELL_LAZY_LOAD, HunspellService.HUNSPELL_IGNORE_CASE, HunspellService.HUNSPELL_DICTIONARY_OPTIONS, diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 157bbfbd5b9..69ef795812d 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -39,7 +39,7 @@ import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.IndexWarmer; -import org.elasticsearch.indices.cache.request.IndicesRequestCache; +import org.elasticsearch.indices.IndicesRequestCache; import java.util.Arrays; import java.util.Collections; diff --git a/core/src/main/java/org/elasticsearch/index/IndexModule.java b/core/src/main/java/org/elasticsearch/index/IndexModule.java index f23441fa908..f9eb3ec2b54 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/core/src/main/java/org/elasticsearch/index/IndexModule.java @@ -37,7 +37,7 @@ import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.IndexStoreConfig; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; diff --git a/core/src/main/java/org/elasticsearch/index/cache/query/index/IndexQueryCache.java b/core/src/main/java/org/elasticsearch/index/cache/query/index/IndexQueryCache.java index 352d6af5ee9..96b2ede2a99 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/query/index/IndexQueryCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/query/index/IndexQueryCache.java @@ -25,7 +25,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.cache.query.QueryCache; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.IndicesQueryCache; /** * The index-level query cache. This class mostly delegates to the node-level diff --git a/core/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java b/core/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java index 5e9c8156046..ef81b288f92 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java @@ -19,27 +19,24 @@ package org.elasticsearch.index.cache.request; +import org.apache.lucene.util.Accountable; import org.elasticsearch.common.cache.RemovalListener; import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.cache.request.IndicesRequestCache; +import org.elasticsearch.indices.IndicesRequestCache; /** */ -public class ShardRequestCache extends AbstractIndexShardComponent implements RemovalListener { +public final class ShardRequestCache { final CounterMetric evictionsMetric = new CounterMetric(); final CounterMetric totalMetric = new CounterMetric(); final CounterMetric hitCount = new CounterMetric(); final CounterMetric missCount = new CounterMetric(); - public ShardRequestCache(ShardId shardId, IndexSettings indexSettings) { - super(shardId, indexSettings); - } - public RequestCacheStats stats() { return new RequestCacheStats(totalMetric.count(), evictionsMetric.count(), hitCount.count(), missCount.count()); } @@ -52,21 +49,20 @@ public class ShardRequestCache extends AbstractIndexShardComponent implements Re missCount.inc(); } - public void onCached(IndicesRequestCache.Key key, IndicesRequestCache.Value value) { + public void onCached(Accountable key, Accountable value) { totalMetric.inc(key.ramBytesUsed() + value.ramBytesUsed()); } - @Override - public void onRemoval(RemovalNotification removalNotification) { - if (removalNotification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED) { + public void onRemoval(Accountable key, Accountable value, boolean evicted) { + if (evicted) { evictionsMetric.inc(); } long dec = 0; - if (removalNotification.getKey() != null) { - dec += removalNotification.getKey().ramBytesUsed(); + if (key != null) { + dec += key.ramBytesUsed(); } - if (removalNotification.getValue() != null) { - dec += removalNotification.getValue().ramBytesUsed(); + if (value != null) { + dec += value.ramBytesUsed(); } totalMetric.dec(dec); } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index 03ccebf0479..21c1f3ff695 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -27,7 +27,7 @@ import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; import org.elasticsearch.index.search.MatchQuery; -import org.elasticsearch.indices.cache.query.terms.TermsLookup; +import org.elasticsearch.indices.TermsLookup; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.Template; diff --git a/core/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java index 326a6ed8b8e..67e5b5643a5 100644 --- a/core/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java @@ -37,8 +37,7 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.indices.cache.query.terms.TermsLookup; -import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.indices.TermsLookup; import java.io.IOException; import java.util.ArrayList; diff --git a/core/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java index 310256556c8..d92b87d3af4 100644 --- a/core/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.query; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.indices.cache.query.terms.TermsLookup; +import org.elasticsearch.indices.TermsLookup; import java.io.IOException; import java.util.ArrayList; diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 0f47eec6b25..954c2f8af4b 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -224,7 +224,7 @@ public class IndexShard extends AbstractIndexShardComponent { this.getService = new ShardGetService(indexSettings, this, mapperService); this.searchService = new ShardSearchStats(slowLog); this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings); - this.shardQueryCache = new ShardRequestCache(shardId, indexSettings); + this.shardQueryCache = new ShardRequestCache(); this.shardFieldData = new ShardFieldData(); this.indexFieldDataService = indexFieldDataService; this.shardBitsetFilterCache = new ShardBitsetFilterCache(shardId, indexSettings); diff --git a/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java similarity index 99% rename from core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java rename to core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index 718f4db9c4e..926ff482248 100644 --- a/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.indices.cache.query; +package org.elasticsearch.indices; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java new file mode 100644 index 00000000000..575153c8ada --- /dev/null +++ b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java @@ -0,0 +1,337 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices; + +import com.carrotsearch.hppc.ObjectHashSet; +import com.carrotsearch.hppc.ObjectSet; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.cache.CacheLoader; +import org.elasticsearch.common.cache.RemovalListener; +import org.elasticsearch.common.cache.RemovalNotification; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.Set; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; + +/** + * The indices request cache allows to cache a shard level request stage responses, helping with improving + * similar requests that are potentially expensive (because of aggs for example). The cache is fully coherent + * with the semantics of NRT (the index reader version is part of the cache key), and relies on size based + * eviction to evict old reader associated cache entries as well as scheduler reaper to clean readers that + * are no longer used or closed shards. + *

+ * Currently, the cache is only enabled for count requests, and can only be opted in on an index + * level setting that can be dynamically changed and defaults to false. + *

+ * There are still several TODOs left in this class, some easily addressable, some more complex, but the support + * is functional. + */ +public final class IndicesRequestCache extends AbstractComponent implements RemovalListener, Closeable { + + /** + * A setting to enable or disable request caching on an index level. Its dynamic by default + * since we are checking on the cluster state IndexMetaData always. + */ + public static final Setting INDEX_CACHE_REQUEST_ENABLED_SETTING = Setting.boolSetting("index.requests.cache.enable", + false, true, Setting.Scope.INDEX); + public static final Setting INDICES_CACHE_QUERY_SIZE = Setting.byteSizeSetting("indices.requests.cache.size", "1%", + false, Setting.Scope.CLUSTER); + public static final Setting INDICES_CACHE_QUERY_EXPIRE = Setting.positiveTimeSetting("indices.requests.cache.expire", + new TimeValue(0), false, Setting.Scope.CLUSTER); + + private final ConcurrentMap registeredClosedListeners = ConcurrentCollections.newConcurrentMap(); + private final Set keysToClean = ConcurrentCollections.newConcurrentSet(); + private final ByteSizeValue size; + private final TimeValue expire; + private final Cache cache; + + IndicesRequestCache(Settings settings) { + super(settings); + this.size = INDICES_CACHE_QUERY_SIZE.get(settings); + this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null; + long sizeInBytes = size.bytes(); + CacheBuilder cacheBuilder = CacheBuilder.builder() + .setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this); + if (expire != null) { + cacheBuilder.setExpireAfterAccess(TimeUnit.MILLISECONDS.toNanos(expire.millis())); + } + cache = cacheBuilder.build(); + } + + @Override + public void close() { + cache.invalidateAll(); + } + + void clear(CacheEntity entity) { + keysToClean.add(new CleanupKey(entity, -1)); + cleanCache(); + } + + @Override + public void onRemoval(RemovalNotification notification) { + notification.getKey().entity.onRemoval(notification); + } + + BytesReference getOrCompute(CacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey) throws Exception { + final Key key = new Key(cacheEntity, reader.getVersion(), cacheKey); + Loader loader = new Loader(cacheEntity); + Value value = cache.computeIfAbsent(key, loader); + if (loader.isLoaded()) { + key.entity.onMiss(); + // see if its the first time we see this reader, and make sure to register a cleanup key + CleanupKey cleanupKey = new CleanupKey(cacheEntity, reader.getVersion()); + if (!registeredClosedListeners.containsKey(cleanupKey)) { + Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE); + if (previous == null) { + ElasticsearchDirectoryReader.addReaderCloseListener(reader, cleanupKey); + } + } + } else { + key.entity.onHit(); + } + return value.reference; + } + + private static class Loader implements CacheLoader { + + private final CacheEntity entity; + private boolean loaded; + + Loader(CacheEntity entity) { + this.entity = entity; + } + + public boolean isLoaded() { + return this.loaded; + } + + @Override + public Value load(Key key) throws Exception { + Value value = entity.loadValue(); + entity.onCached(key, value); + loaded = true; + return value; + } + } + + /** + * Basic interface to make this cache testable. + */ + interface CacheEntity { + /** + * Loads the actual cache value. this is the heavy lifting part. + */ + Value loadValue() throws IOException; + + /** + * Called after the value was loaded via {@link #loadValue()} + */ + void onCached(Key key, Value value); + + /** + * Returns true iff the resource behind this entity is still open ie. + * entities assiciated with it can remain in the cache. ie. IndexShard is still open. + */ + boolean isOpen(); + + /** + * Returns the cache identity. this is, similar to {@link #isOpen()} the resource identity behind this cache entity. + * For instance IndexShard is the identity while a CacheEntity is per DirectoryReader. Yet, we group by IndexShard instance. + */ + Object getCacheIdentity(); + + /** + * Called each time this entity has a cache hit. + */ + void onHit(); + + /** + * Called each time this entity has a cache miss. + */ + void onMiss(); + + /** + * Called when this entity instance is removed + */ + void onRemoval(RemovalNotification notification); + } + + + + static class Value implements Accountable { + final BytesReference reference; + final long ramBytesUsed; + + Value(BytesReference reference, long ramBytesUsed) { + this.reference = reference; + this.ramBytesUsed = ramBytesUsed; + } + + @Override + public long ramBytesUsed() { + return ramBytesUsed; + } + + @Override + public Collection getChildResources() { + return Collections.emptyList(); + } + } + + static class Key implements Accountable { + public final CacheEntity entity; // use as identity equality + public final long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped + public final BytesReference value; + + Key(CacheEntity entity, long readerVersion, BytesReference value) { + this.entity = entity; + this.readerVersion = readerVersion; + this.value = value; + } + + @Override + public long ramBytesUsed() { + return RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_LONG + value.length(); + } + + @Override + public Collection getChildResources() { + // TODO: more detailed ram usage? + return Collections.emptyList(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + Key key = (Key) o; + if (readerVersion != key.readerVersion) return false; + if (!entity.getCacheIdentity().equals(key.entity.getCacheIdentity())) return false; + if (!value.equals(key.value)) return false; + return true; + } + + @Override + public int hashCode() { + int result = entity.getCacheIdentity().hashCode(); + result = 31 * result + Long.hashCode(readerVersion); + result = 31 * result + value.hashCode(); + return result; + } + } + + private class CleanupKey implements IndexReader.ReaderClosedListener { + final CacheEntity entity; + final long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped + + private CleanupKey(CacheEntity entity, long readerVersion) { + this.entity = entity; + this.readerVersion = readerVersion; + } + + @Override + public void onClose(IndexReader reader) { + Boolean remove = registeredClosedListeners.remove(this); + if (remove != null) { + keysToClean.add(this); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + CleanupKey that = (CleanupKey) o; + if (readerVersion != that.readerVersion) return false; + if (!entity.getCacheIdentity().equals(that.entity.getCacheIdentity())) return false; + return true; + } + + @Override + public int hashCode() { + int result = entity.getCacheIdentity().hashCode(); + result = 31 * result + Long.hashCode(readerVersion); + return result; + } + } + + + + synchronized void cleanCache() { + final ObjectSet currentKeysToClean = new ObjectHashSet<>(); + final ObjectSet currentFullClean = new ObjectHashSet<>(); + currentKeysToClean.clear(); + currentFullClean.clear(); + for (Iterator iterator = keysToClean.iterator(); iterator.hasNext(); ) { + CleanupKey cleanupKey = iterator.next(); + iterator.remove(); + if (cleanupKey.readerVersion == -1 || cleanupKey.entity.isOpen() == false) { + // -1 indicates full cleanup, as does a closed shard + currentFullClean.add(cleanupKey.entity.getCacheIdentity()); + } else { + currentKeysToClean.add(cleanupKey); + } + } + if (!currentKeysToClean.isEmpty() || !currentFullClean.isEmpty()) { + for (Iterator iterator = cache.keys().iterator(); iterator.hasNext(); ) { + Key key = iterator.next(); + if (currentFullClean.contains(key.entity.getCacheIdentity())) { + iterator.remove(); + } else { + if (currentKeysToClean.contains(new CleanupKey(key.entity, key.readerVersion))) { + iterator.remove(); + } + } + } + } + + cache.refresh(); + } + + + /** + * Returns the current size of the cache + */ + final int count() { + return cache.count(); + } + + final int numRegisteredCloseListeners() { // for testing + return registeredClosedListeners.size(); + } +} diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 7b2bc89e646..c7d1be4bf71 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -19,8 +19,8 @@ package org.elasticsearch.indices; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.store.LockObtainFailedException; -import org.apache.lucene.util.Accountable; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; @@ -29,15 +29,19 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; @@ -56,6 +60,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.cache.request.ShardRequestCache; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.flush.FlushStats; @@ -67,21 +72,25 @@ import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; -import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.search.query.QueryPhase; +import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.nio.file.Files; import java.util.ArrayList; +import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -106,7 +115,7 @@ import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList; public class IndicesService extends AbstractLifecycleComponent implements Iterable, IndexService.ShardStoreDeleter { public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout"; - public static final Setting INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.fielddata.cache.cleanup_interval", TimeValue.timeValueMinutes(1), false, Setting.Scope.CLUSTER); + public static final Setting INDICES_CACHE_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.cache.cleanup_interval", TimeValue.timeValueMinutes(1), false, Setting.Scope.CLUSTER); private final PluginsService pluginsService; private final NodeEnvironment nodeEnv; private final TimeValue shardsClosedTimeout; @@ -116,7 +125,7 @@ public class IndicesService extends AbstractLifecycleComponent i private final IndexNameExpressionResolver indexNameExpressionResolver; private final IndexScopedSettings indexScopeSetting; private final IndicesFieldDataCache indicesFieldDataCache; - private final FieldDataCacheCleaner fieldDataCacheCleaner; + private final CacheCleaner cacheCleaner; private final ThreadPool threadPool; private final CircuitBreakerService circuitBreakerService; private volatile Map indices = emptyMap(); @@ -132,7 +141,7 @@ public class IndicesService extends AbstractLifecycleComponent i @Override protected void doStart() { // Start thread that will manage cleaning the field data cache periodically - threadPool.schedule(this.cleanInterval, ThreadPool.Names.SAME, this.fieldDataCacheCleaner); + threadPool.schedule(this.cleanInterval, ThreadPool.Names.SAME, this.cacheCleaner); } @Inject @@ -150,7 +159,7 @@ public class IndicesService extends AbstractLifecycleComponent i this.indicesQueriesRegistry = indicesQueriesRegistry; this.clusterService = clusterService; this.indexNameExpressionResolver = indexNameExpressionResolver; - this.indicesRequestCache = new IndicesRequestCache(settings, threadPool); + this.indicesRequestCache = new IndicesRequestCache(settings); this.indicesQueryCache = new IndicesQueryCache(settings); this.mapperRegistry = mapperRegistry; clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, indexStoreConfig::setRateLimitingType); @@ -165,8 +174,8 @@ public class IndicesService extends AbstractLifecycleComponent i circuitBreakerService.getBreaker(CircuitBreaker.FIELDDATA).addWithoutBreaking(-sizeInBytes); } }); - this.cleanInterval = INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING.get(settings); - this.fieldDataCacheCleaner = new FieldDataCacheCleaner(indicesFieldDataCache, logger, threadPool, this.cleanInterval); + this.cleanInterval = INDICES_CACHE_CLEAN_INTERVAL_SETTING.get(settings); + this.cacheCleaner = new CacheCleaner(indicesFieldDataCache, indicesRequestCache, logger, threadPool, this.cleanInterval); } @@ -202,7 +211,7 @@ public class IndicesService extends AbstractLifecycleComponent i @Override protected void doClose() { - IOUtils.closeWhileHandlingException(analysisRegistry, indexingMemoryController, indicesFieldDataCache, fieldDataCacheCleaner, indicesRequestCache, indicesQueryCache); + IOUtils.closeWhileHandlingException(analysisRegistry, indexingMemoryController, indicesFieldDataCache, cacheCleaner, indicesRequestCache, indicesQueryCache); } /** @@ -433,10 +442,6 @@ public class IndicesService extends AbstractLifecycleComponent i return circuitBreakerService; } - public IndicesRequestCache getIndicesRequestCache() { - return indicesRequestCache; - } - public IndicesQueryCache getIndicesQueryCache() { return indicesQueryCache; } @@ -827,16 +832,18 @@ public class IndicesService extends AbstractLifecycleComponent i * has an entry invalidated may not clean up the entry if it is not read from * or written to after invalidation. */ - private final static class FieldDataCacheCleaner implements Runnable, Releasable { + private final static class CacheCleaner implements Runnable, Releasable { private final IndicesFieldDataCache cache; private final ESLogger logger; private final ThreadPool threadPool; private final TimeValue interval; private final AtomicBoolean closed = new AtomicBoolean(false); + private final IndicesRequestCache requestCache; - public FieldDataCacheCleaner(IndicesFieldDataCache cache, ESLogger logger, ThreadPool threadPool, TimeValue interval) { + public CacheCleaner(IndicesFieldDataCache cache, IndicesRequestCache requestCache, ESLogger logger, ThreadPool threadPool, TimeValue interval) { this.cache = cache; + this.requestCache = requestCache; this.logger = logger; this.threadPool = threadPool; this.interval = interval; @@ -856,6 +863,12 @@ public class IndicesService extends AbstractLifecycleComponent i if (logger.isTraceEnabled()) { logger.trace("periodic field data cache cleanup finished in {} milliseconds", TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)); } + + try { + this.requestCache.cleanCache(); + } catch (Exception e) { + logger.warn("Exception during periodic request cache cleanup:", e); + } // Reschedule itself to run again if not closed if (closed.get() == false) { threadPool.schedule(interval, ThreadPool.Names.SAME, this); @@ -867,4 +880,148 @@ public class IndicesService extends AbstractLifecycleComponent i closed.compareAndSet(false, true); } } + + + private static final Set CACHEABLE_SEARCH_TYPES = EnumSet.of(SearchType.QUERY_THEN_FETCH, SearchType.QUERY_AND_FETCH); + + /** + * Can the shard request be cached at all? + */ + public boolean canCache(ShardSearchRequest request, SearchContext context) { + if (request.template() != null) { + return false; + } + + // for now, only enable it for requests with no hits + if (context.size() != 0) { + return false; + } + + // We cannot cache with DFS because results depend not only on the content of the index but also + // on the overridden statistics. So if you ran two queries on the same index with different stats + // (because an other shard was updated) you would get wrong results because of the scores + // (think about top_hits aggs or scripts using the score) + if (!CACHEABLE_SEARCH_TYPES.contains(context.searchType())) { + return false; + } + IndexSettings settings = context.indexShard().getIndexSettings(); + // if not explicitly set in the request, use the index setting, if not, use the request + if (request.requestCache() == null) { + if (settings.getValue(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING) == false) { + return false; + } + } else if (request.requestCache() == false) { + return false; + } + // if the reader is not a directory reader, we can't get the version from it + if ((context.searcher().getIndexReader() instanceof DirectoryReader) == false) { + return false; + } + // if now in millis is used (or in the future, a more generic "isDeterministic" flag + // then we can't cache based on "now" key within the search request, as it is not deterministic + if (context.nowInMillisUsed()) { + return false; + } + return true; + + } + + public void clearRequestCache(IndexShard shard) { + if (shard == null) { + return; + } + indicesRequestCache.clear(new IndexShardCacheEntity(shard)); + logger.trace("{} explicit cache clear", shard.shardId()); + } + /** + * Loads the cache result, computing it if needed by executing the query phase and otherwise deserializing the cached + * value into the {@link SearchContext#queryResult() context's query result}. The combination of load + compute allows + * to have a single load operation that will cause other requests with the same key to wait till its loaded an reuse + * the same cache. + */ + public void loadIntoContext(ShardSearchRequest request, SearchContext context, QueryPhase queryPhase) throws Exception { + assert canCache(request, context); + final IndexShardCacheEntity entity = new IndexShardCacheEntity(context.indexShard(), queryPhase, context); + final DirectoryReader directoryReader = context.searcher().getDirectoryReader(); + final BytesReference bytesReference = indicesRequestCache.getOrCompute(entity, directoryReader, request.cacheKey()); + if (entity.loaded == false) { // if we have loaded this we don't need to do anything + // restore the cached query result into the context + final QuerySearchResult result = context.queryResult(); + result.readFromWithId(context.id(), bytesReference.streamInput()); + result.shardTarget(context.shardTarget()); + } + } + + static final class IndexShardCacheEntity implements IndicesRequestCache.CacheEntity { + private final QueryPhase queryPhase; + private final SearchContext context; + private final IndexShard indexShard; + private final ShardRequestCache requestCache; + private boolean loaded = false; + + IndexShardCacheEntity(IndexShard indexShard) { + this(indexShard, null, null); + } + + public IndexShardCacheEntity(IndexShard indexShard, QueryPhase queryPhase, SearchContext context) { + this.queryPhase = queryPhase; + this.context = context; + this.indexShard = indexShard; + this.requestCache = indexShard.requestCache(); + } + + @Override + public IndicesRequestCache.Value loadValue() throws IOException { + queryPhase.execute(context); + /* BytesStreamOutput allows to pass the expected size but by default uses + * BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie. + * a date histogram with 3 buckets is ~100byte so 16k might be very wasteful + * since we don't shrink to the actual size once we are done serializing. + * By passing 512 as the expected size we will resize the byte array in the stream + * slowly until we hit the page size and don't waste too much memory for small query + * results.*/ + final int expectedSizeInBytes = 512; + try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) { + context.queryResult().writeToNoId(out); + // for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep + // the memory properly paged instead of having varied sized bytes + final BytesReference reference = out.bytes(); + loaded = true; + return new IndicesRequestCache.Value(reference, out.ramBytesUsed()); + } + } + + @Override + public void onCached(IndicesRequestCache.Key key, IndicesRequestCache.Value value) { + requestCache.onCached(key, value); + } + + + @Override + public boolean isOpen() { + return indexShard.state() != IndexShardState.CLOSED; + } + + @Override + public Object getCacheIdentity() { + return indexShard; + } + + @Override + public void onHit() { + requestCache.onHit(); + } + + @Override + public void onMiss() { + requestCache.onMiss(); + } + + @Override + public void onRemoval(RemovalNotification notification) { + requestCache.onRemoval(notification.getKey(), notification.getValue(), notification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED); + } + + } + } diff --git a/core/src/main/java/org/elasticsearch/indices/cache/query/terms/TermsLookup.java b/core/src/main/java/org/elasticsearch/indices/TermsLookup.java similarity index 95% rename from core/src/main/java/org/elasticsearch/indices/cache/query/terms/TermsLookup.java rename to core/src/main/java/org/elasticsearch/indices/TermsLookup.java index 62c0011312d..806181d0d4e 100644 --- a/core/src/main/java/org/elasticsearch/indices/cache/query/terms/TermsLookup.java +++ b/core/src/main/java/org/elasticsearch/indices/TermsLookup.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.indices.cache.query.terms; +package org.elasticsearch.indices; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -123,11 +123,12 @@ public class TermsLookup implements Writeable, ToXContent { path = parser.text(); break; default: - throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] query does not support [" + currentFieldName - + "] within lookup element"); + throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + + "] query does not support [" + currentFieldName + "] within lookup element"); } } else { - throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"); + throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] unknown token [" + + token + "] after [" + currentFieldName + "]"); } } return new TermsLookup(index, type, id, path).routing(routing); diff --git a/core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java b/core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java deleted file mode 100644 index d58c1c13994..00000000000 --- a/core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java +++ /dev/null @@ -1,443 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.indices.cache.request; - -import com.carrotsearch.hppc.ObjectHashSet; -import com.carrotsearch.hppc.ObjectSet; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.util.Accountable; -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.cache.Cache; -import org.elasticsearch.common.cache.CacheBuilder; -import org.elasticsearch.common.cache.CacheLoader; -import org.elasticsearch.common.cache.RemovalListener; -import org.elasticsearch.common.cache.RemovalNotification; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.MemorySizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShardState; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.ShardSearchRequest; -import org.elasticsearch.search.query.QueryPhase; -import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.threadpool.ThreadPool; - -import java.io.Closeable; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumSet; -import java.util.Iterator; -import java.util.Set; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; - -/** - * The indices request cache allows to cache a shard level request stage responses, helping with improving - * similar requests that are potentially expensive (because of aggs for example). The cache is fully coherent - * with the semantics of NRT (the index reader version is part of the cache key), and relies on size based - * eviction to evict old reader associated cache entries as well as scheduler reaper to clean readers that - * are no longer used or closed shards. - *

- * Currently, the cache is only enabled for count requests, and can only be opted in on an index - * level setting that can be dynamically changed and defaults to false. - *

- * There are still several TODOs left in this class, some easily addressable, some more complex, but the support - * is functional. - */ -public class IndicesRequestCache extends AbstractComponent implements RemovalListener, Closeable { - - /** - * A setting to enable or disable request caching on an index level. Its dynamic by default - * since we are checking on the cluster state IndexMetaData always. - */ - public static final Setting INDEX_CACHE_REQUEST_ENABLED_SETTING = Setting.boolSetting("index.requests.cache.enable", false, true, Setting.Scope.INDEX); - public static final Setting INDICES_CACHE_REQUEST_CLEAN_INTERVAL = Setting.positiveTimeSetting("indices.requests.cache.clean_interval", TimeValue.timeValueSeconds(60), false, Setting.Scope.CLUSTER); - - public static final Setting INDICES_CACHE_QUERY_SIZE = Setting.byteSizeSetting("indices.requests.cache.size", "1%", false, Setting.Scope.CLUSTER); - public static final Setting INDICES_CACHE_QUERY_EXPIRE = Setting.positiveTimeSetting("indices.requests.cache.expire", new TimeValue(0), false, Setting.Scope.CLUSTER); - - private static final Set CACHEABLE_SEARCH_TYPES = EnumSet.of(SearchType.QUERY_THEN_FETCH, SearchType.QUERY_AND_FETCH); - - private final ThreadPool threadPool; - - private final TimeValue cleanInterval; - private final Reaper reaper; - - final ConcurrentMap registeredClosedListeners = ConcurrentCollections.newConcurrentMap(); - final Set keysToClean = ConcurrentCollections.newConcurrentSet(); - - - //TODO make these changes configurable on the cluster level - private final ByteSizeValue size; - private final TimeValue expire; - - private volatile Cache cache; - - public IndicesRequestCache(Settings settings, ThreadPool threadPool) { - super(settings); - this.threadPool = threadPool; - this.cleanInterval = INDICES_CACHE_REQUEST_CLEAN_INTERVAL.get(settings); - this.size = INDICES_CACHE_QUERY_SIZE.get(settings); - this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null; - buildCache(); - this.reaper = new Reaper(); - threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, reaper); - } - - - private void buildCache() { - long sizeInBytes = size.bytes(); - CacheBuilder cacheBuilder = CacheBuilder.builder() - .setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this); - - if (expire != null) { - cacheBuilder.setExpireAfterAccess(TimeUnit.MILLISECONDS.toNanos(expire.millis())); - } - - cache = cacheBuilder.build(); - } - - @Override - public void close() { - reaper.close(); - cache.invalidateAll(); - } - - public void clear(IndexShard shard) { - if (shard == null) { - return; - } - keysToClean.add(new CleanupKey(shard, -1)); - logger.trace("{} explicit cache clear", shard.shardId()); - reaper.reap(); - } - - @Override - public void onRemoval(RemovalNotification notification) { - notification.getKey().shard.requestCache().onRemoval(notification); - } - - /** - * Can the shard request be cached at all? - */ - public boolean canCache(ShardSearchRequest request, SearchContext context) { - if (request.template() != null) { - return false; - } - - // for now, only enable it for requests with no hits - if (context.size() != 0) { - return false; - } - - // We cannot cache with DFS because results depend not only on the content of the index but also - // on the overridden statistics. So if you ran two queries on the same index with different stats - // (because an other shard was updated) you would get wrong results because of the scores - // (think about top_hits aggs or scripts using the score) - if (!CACHEABLE_SEARCH_TYPES.contains(context.searchType())) { - return false; - } - IndexSettings settings = context.indexShard().getIndexSettings(); - // if not explicitly set in the request, use the index setting, if not, use the request - if (request.requestCache() == null) { - if (settings.getValue(INDEX_CACHE_REQUEST_ENABLED_SETTING) == false) { - return false; - } - } else if (request.requestCache() == false) { - return false; - } - // if the reader is not a directory reader, we can't get the version from it - if ((context.searcher().getIndexReader() instanceof DirectoryReader) == false) { - return false; - } - // if now in millis is used (or in the future, a more generic "isDeterministic" flag - // then we can't cache based on "now" key within the search request, as it is not deterministic - if (context.nowInMillisUsed()) { - return false; - } - return true; - } - - /** - * Loads the cache result, computing it if needed by executing the query phase and otherwise deserializing the cached - * value into the {@link SearchContext#queryResult() context's query result}. The combination of load + compute allows - * to have a single load operation that will cause other requests with the same key to wait till its loaded an reuse - * the same cache. - */ - public void loadIntoContext(final ShardSearchRequest request, final SearchContext context, final QueryPhase queryPhase) throws Exception { - assert canCache(request, context); - Key key = buildKey(request, context); - Loader loader = new Loader(queryPhase, context); - Value value = cache.computeIfAbsent(key, loader); - if (loader.isLoaded()) { - key.shard.requestCache().onMiss(); - // see if its the first time we see this reader, and make sure to register a cleanup key - CleanupKey cleanupKey = new CleanupKey(context.indexShard(), ((DirectoryReader) context.searcher().getIndexReader()).getVersion()); - if (!registeredClosedListeners.containsKey(cleanupKey)) { - Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE); - if (previous == null) { - ElasticsearchDirectoryReader.addReaderCloseListener(context.searcher().getDirectoryReader(), cleanupKey); - } - } - } else { - key.shard.requestCache().onHit(); - // restore the cached query result into the context - final QuerySearchResult result = context.queryResult(); - result.readFromWithId(context.id(), value.reference.streamInput()); - result.shardTarget(context.shardTarget()); - } - } - - private static class Loader implements CacheLoader { - - private final QueryPhase queryPhase; - private final SearchContext context; - private boolean loaded; - - Loader(QueryPhase queryPhase, SearchContext context) { - this.queryPhase = queryPhase; - this.context = context; - } - - public boolean isLoaded() { - return this.loaded; - } - - @Override - public Value load(Key key) throws Exception { - queryPhase.execute(context); - - /* BytesStreamOutput allows to pass the expected size but by default uses - * BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie. - * a date histogram with 3 buckets is ~100byte so 16k might be very wasteful - * since we don't shrink to the actual size once we are done serializing. - * By passing 512 as the expected size we will resize the byte array in the stream - * slowly until we hit the page size and don't waste too much memory for small query - * results.*/ - final int expectedSizeInBytes = 512; - try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) { - context.queryResult().writeToNoId(out); - // for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep - // the memory properly paged instead of having varied sized bytes - final BytesReference reference = out.bytes(); - loaded = true; - Value value = new Value(reference, out.ramBytesUsed()); - key.shard.requestCache().onCached(key, value); - return value; - } - } - } - - public static class Value implements Accountable { - final BytesReference reference; - final long ramBytesUsed; - - public Value(BytesReference reference, long ramBytesUsed) { - this.reference = reference; - this.ramBytesUsed = ramBytesUsed; - } - - @Override - public long ramBytesUsed() { - return ramBytesUsed; - } - - @Override - public Collection getChildResources() { - return Collections.emptyList(); - } - } - - public static class Key implements Accountable { - public final IndexShard shard; // use as identity equality - public final long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped - public final BytesReference value; - - Key(IndexShard shard, long readerVersion, BytesReference value) { - this.shard = shard; - this.readerVersion = readerVersion; - this.value = value; - } - - @Override - public long ramBytesUsed() { - return RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_LONG + value.length(); - } - - @Override - public Collection getChildResources() { - // TODO: more detailed ram usage? - return Collections.emptyList(); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - Key key = (Key) o; - if (readerVersion != key.readerVersion) return false; - if (!shard.equals(key.shard)) return false; - if (!value.equals(key.value)) return false; - return true; - } - - @Override - public int hashCode() { - int result = shard.hashCode(); - result = 31 * result + Long.hashCode(readerVersion); - result = 31 * result + value.hashCode(); - return result; - } - } - - private class CleanupKey implements IndexReader.ReaderClosedListener { - IndexShard indexShard; - long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped - - private CleanupKey(IndexShard indexShard, long readerVersion) { - this.indexShard = indexShard; - this.readerVersion = readerVersion; - } - - @Override - public void onClose(IndexReader reader) { - Boolean remove = registeredClosedListeners.remove(this); - if (remove != null) { - keysToClean.add(this); - } - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - CleanupKey that = (CleanupKey) o; - if (readerVersion != that.readerVersion) return false; - if (!indexShard.equals(that.indexShard)) return false; - return true; - } - - @Override - public int hashCode() { - int result = indexShard.hashCode(); - result = 31 * result + Long.hashCode(readerVersion); - return result; - } - } - - private class Reaper implements Runnable { - - private final ObjectSet currentKeysToClean = new ObjectHashSet<>(); - private final ObjectSet currentFullClean = new ObjectHashSet<>(); - - private volatile boolean closed; - - void close() { - closed = true; - } - - @Override - public void run() { - if (closed) { - return; - } - if (keysToClean.isEmpty()) { - schedule(); - return; - } - try { - threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() { - @Override - public void run() { - reap(); - schedule(); - } - }); - } catch (EsRejectedExecutionException ex) { - logger.debug("Can not run ReaderCleaner - execution rejected", ex); - } - } - - private void schedule() { - try { - threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, this); - } catch (EsRejectedExecutionException ex) { - logger.debug("Can not schedule ReaderCleaner - execution rejected", ex); - } - } - - synchronized void reap() { - currentKeysToClean.clear(); - currentFullClean.clear(); - for (Iterator iterator = keysToClean.iterator(); iterator.hasNext(); ) { - CleanupKey cleanupKey = iterator.next(); - iterator.remove(); - if (cleanupKey.readerVersion == -1 || cleanupKey.indexShard.state() == IndexShardState.CLOSED) { - // -1 indicates full cleanup, as does a closed shard - currentFullClean.add(cleanupKey.indexShard); - } else { - currentKeysToClean.add(cleanupKey); - } - } - - if (!currentKeysToClean.isEmpty() || !currentFullClean.isEmpty()) { - CleanupKey lookupKey = new CleanupKey(null, -1); - for (Iterator iterator = cache.keys().iterator(); iterator.hasNext(); ) { - Key key = iterator.next(); - if (currentFullClean.contains(key.shard)) { - iterator.remove(); - } else { - lookupKey.indexShard = key.shard; - lookupKey.readerVersion = key.readerVersion; - if (currentKeysToClean.contains(lookupKey)) { - iterator.remove(); - } - } - } - } - - cache.refresh(); - currentKeysToClean.clear(); - currentFullClean.clear(); - } - } - - private static Key buildKey(ShardSearchRequest request, SearchContext context) throws Exception { - // TODO: for now, this will create different keys for different JSON order - // TODO: tricky to get around this, need to parse and order all, which can be expensive - return new Key(context.indexShard(), - ((DirectoryReader) context.searcher().getIndexReader()).getVersion(), - request.cacheKey()); - } -} diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 4afef7117f5..3f62066cd4c 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -54,7 +54,7 @@ import org.elasticsearch.index.search.stats.StatsGroupsParseElement; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.cache.request.IndicesRequestCache; +import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; @@ -131,8 +131,6 @@ public class SearchService extends AbstractLifecycleComponent imp private final FetchPhase fetchPhase; - private final IndicesRequestCache indicesQueryCache; - private final long defaultKeepAlive; private volatile TimeValue defaultSearchTimeout; @@ -161,7 +159,6 @@ public class SearchService extends AbstractLifecycleComponent imp this.dfsPhase = dfsPhase; this.queryPhase = queryPhase; this.fetchPhase = fetchPhase; - this.indicesQueryCache = indicesService.getIndicesRequestCache(); TimeValue keepAliveInterval = KEEPALIVE_INTERVAL_SETTING.get(settings); this.defaultKeepAlive = DEFAULT_KEEPALIVE_SETTING.get(settings).millis(); @@ -250,9 +247,9 @@ public class SearchService extends AbstractLifecycleComponent imp */ private void loadOrExecuteQueryPhase(final ShardSearchRequest request, final SearchContext context, final QueryPhase queryPhase) throws Exception { - final boolean canCache = indicesQueryCache.canCache(request, context); + final boolean canCache = indicesService.canCache(request, context); if (canCache) { - indicesQueryCache.loadIntoContext(request, context, queryPhase); + indicesService.loadIntoContext(request, context, queryPhase); } else { queryPhase.execute(context); } diff --git a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 55c0c85a889..9e0c3776bf1 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -55,9 +55,8 @@ import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; -import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.script.ScriptContextRegistry; diff --git a/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java index f7dc3b2d8e1..72241330289 100644 --- a/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.get.GetResult; -import org.elasticsearch.indices.cache.query.terms.TermsLookup; +import org.elasticsearch.indices.TermsLookup; import org.hamcrest.Matchers; import org.junit.Before; diff --git a/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java similarity index 99% rename from core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java rename to core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java index ff5dc9ad708..d6e248f1c94 100644 --- a/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.indices.cache.query; +package org.elasticsearch.indices; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; @@ -35,9 +35,9 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.Index; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.test.ESTestCase; import java.io.IOException; diff --git a/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesRequestCacheIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java similarity index 90% rename from core/src/test/java/org/elasticsearch/indices/cache/query/IndicesRequestCacheIT.java rename to core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index f09e033648c..125969fc978 100644 --- a/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesRequestCacheIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -17,11 +17,11 @@ * under the License. */ -package org.elasticsearch.indices.cache.query; +package org.elasticsearch.indices; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.indices.cache.request.IndicesRequestCache; +import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -50,15 +50,18 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { // which used to not work well with the query cache because of the handles stream output // see #9500 final SearchResponse r1 = client().prepareSearch("index").setSize(0).setSearchType(SearchType.QUERY_THEN_FETCH) - .addAggregation(dateHistogram("histo").field("f").timeZone("+01:00").minDocCount(0).interval(DateHistogramInterval.MONTH)).get(); + .addAggregation(dateHistogram("histo").field("f").timeZone("+01:00") + .minDocCount(0).interval(DateHistogramInterval.MONTH)).get(); assertSearchResponse(r1); // The cached is actually used - assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache() + .getMemorySizeInBytes(), greaterThan(0L)); for (int i = 0; i < 10; ++i) { final SearchResponse r2 = client().prepareSearch("index").setSize(0).setSearchType(SearchType.QUERY_THEN_FETCH) - .addAggregation(dateHistogram("histo").field("f").timeZone("+01:00").minDocCount(0).interval(DateHistogramInterval.MONTH)).get(); + .addAggregation(dateHistogram("histo").field("f").timeZone("+01:00").minDocCount(0) + .interval(DateHistogramInterval.MONTH)).get(); assertSearchResponse(r2); Histogram h1 = r1.getAggregations().get("histo"); Histogram h2 = r2.getAggregations().get("histo"); diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java new file mode 100644 index 00000000000..f87516bc295 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java @@ -0,0 +1,366 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.cache.RemovalNotification; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.cache.request.ShardRequestCache; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.atomic.AtomicBoolean; + +public class IndicesRequestCacheTests extends ESTestCase { + + public void testBasicOperationsCache() throws Exception { + ShardRequestCache requestCacheStats = new ShardRequestCache(); + IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "bar", 1)); + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + AtomicBoolean indexShard = new AtomicBoolean(true); + TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); + + // initial cache + BytesReference value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + assertEquals("foo", value.toUtf8()); + assertEquals(0, requestCacheStats.stats().getHitCount()); + assertEquals(1, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertEquals(1, entity.loaded); + assertEquals(1, cache.count()); + + // cache hit + value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + assertEquals("foo", value.toUtf8()); + assertEquals(1, requestCacheStats.stats().getHitCount()); + assertEquals(1, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertEquals(1, entity.loaded); + assertEquals(1, cache.count()); + assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > value.length()); + assertEquals(1, cache.numRegisteredCloseListeners()); + + // release + if (randomBoolean()) { + reader.close(); + } else { + indexShard.set(false); // closed shard but reader is still open + cache.clear(entity); + } + cache.cleanCache(); + assertEquals(1, requestCacheStats.stats().getHitCount()); + assertEquals(1, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertEquals(1, entity.loaded); + assertEquals(0, cache.count()); + assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); + + IOUtils.close(reader, writer, dir, cache); + assertEquals(0, cache.numRegisteredCloseListeners()); + } + + public void testCacheWithDifferentEntityInstance() throws Exception { + IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); + AtomicBoolean indexShard = new AtomicBoolean(true); + ShardRequestCache requestCacheStats = new ShardRequestCache(); + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "bar", 1)); + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); + + // initial cache + BytesReference value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + assertEquals("foo", value.toUtf8()); + assertEquals(0, requestCacheStats.stats().getHitCount()); + assertEquals(1, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertEquals(1, entity.loaded); + assertEquals(1, cache.count()); + assertEquals(1, cache.numRegisteredCloseListeners()); + final int cacheSize = requestCacheStats.stats().getMemorySize().bytesAsInt(); + + value = cache.getOrCompute(new TestEntity(requestCacheStats, reader, indexShard, 0), reader, termQuery.buildAsBytes()); + assertEquals("foo", value.toUtf8()); + assertEquals(1, requestCacheStats.stats().getHitCount()); + assertEquals(1, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertEquals(1, entity.loaded); + assertEquals(1, cache.count()); + assertEquals(cacheSize, requestCacheStats.stats().getMemorySize().bytesAsInt()); + + assertEquals(1, cache.numRegisteredCloseListeners()); + IOUtils.close(reader, writer, dir, cache); + } + + public void testCacheDifferentReaders() throws Exception { + IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); + AtomicBoolean indexShard = new AtomicBoolean(true); + ShardRequestCache requestCacheStats = new ShardRequestCache(); + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "bar", 1)); + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); + + writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); + DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "bar", 1)); + TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); + + // initial cache + BytesReference value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + assertEquals("foo", value.toUtf8()); + assertEquals(0, requestCacheStats.stats().getHitCount()); + assertEquals(1, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertEquals(1, entity.loaded); + assertEquals(1, cache.count()); + assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > value.length()); + final int cacheSize = requestCacheStats.stats().getMemorySize().bytesAsInt(); + assertEquals(1, cache.numRegisteredCloseListeners()); + + // cache the second + value = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); + assertEquals("bar", value.toUtf8()); + assertEquals(0, requestCacheStats.stats().getHitCount()); + assertEquals(2, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertEquals(1, entity.loaded); + assertEquals(1, secondEntity.loaded); + assertEquals(2, cache.count()); + assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > cacheSize + value.length()); + assertEquals(2, cache.numRegisteredCloseListeners()); + + + + value = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); + assertEquals("bar", value.toUtf8()); + assertEquals(1, requestCacheStats.stats().getHitCount()); + assertEquals(2, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertEquals(1, entity.loaded); + assertEquals(1, secondEntity.loaded); + assertEquals(2, cache.count()); + + value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + assertEquals("foo", value.toUtf8()); + assertEquals(2, requestCacheStats.stats().getHitCount()); + assertEquals(2, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertEquals(1, entity.loaded); + assertEquals(1, secondEntity.loaded); + assertEquals(2, cache.count()); + + reader.close(); + cache.cleanCache(); + assertEquals(2, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertEquals(1, entity.loaded); + assertEquals(1, secondEntity.loaded); + assertEquals(1, cache.count()); + assertEquals(cacheSize, requestCacheStats.stats().getMemorySize().bytesAsInt()); + assertEquals(1, cache.numRegisteredCloseListeners()); + + + // release + if (randomBoolean()) { + secondReader.close(); + } else { + indexShard.set(false); // closed shard but reader is still open + cache.clear(secondEntity); + } + cache.cleanCache(); + assertEquals(2, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertEquals(1, entity.loaded); + assertEquals(1, secondEntity.loaded); + assertEquals(0, cache.count()); + assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); + + IOUtils.close(secondReader, writer, dir, cache); + assertEquals(0, cache.numRegisteredCloseListeners()); + + } + + public void testEviction() throws Exception { + IndicesRequestCache cache = new IndicesRequestCache(Settings.builder() + .put(IndicesRequestCache.INDICES_CACHE_QUERY_SIZE.getKey(), "113b") // the first 2 cache entries add up to 112b + .build()); + AtomicBoolean indexShard = new AtomicBoolean(true); + ShardRequestCache requestCacheStats = new ShardRequestCache(); + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "bar", 1)); + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); + + writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); + DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "bar", 1)); + TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); + + writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); + DirectoryReader thirdReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "bar", 1)); + TestEntity thirddEntity = new TestEntity(requestCacheStats, thirdReader, indexShard, 0); + + BytesReference value1 = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + assertEquals("foo", value1.toUtf8()); + BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); + assertEquals("bar", value2.toUtf8()); + logger.info(requestCacheStats.stats().getMemorySize().toString()); + BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes()); + assertEquals("baz", value3.toUtf8()); + assertEquals(2, cache.count()); + assertEquals(1, requestCacheStats.stats().getEvictions()); + IOUtils.close(reader, secondReader, thirdReader, writer, dir, cache); + } + + public void testClearAllEntityIdentity() throws Exception { + IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); + AtomicBoolean indexShard = new AtomicBoolean(true); + + ShardRequestCache requestCacheStats = new ShardRequestCache(); + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "bar", 1)); + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); + + writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); + DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "bar", 1)); + TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); + + writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); + DirectoryReader thirdReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "bar", 1)); + AtomicBoolean differentIdentity = new AtomicBoolean(true); + TestEntity thirddEntity = new TestEntity(requestCacheStats, thirdReader, differentIdentity, 0); + + BytesReference value1 = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + assertEquals("foo", value1.toUtf8()); + BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); + assertEquals("bar", value2.toUtf8()); + logger.info(requestCacheStats.stats().getMemorySize().toString()); + BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes()); + assertEquals("baz", value3.toUtf8()); + assertEquals(3, cache.count()); + final long hitCount = requestCacheStats.stats().getHitCount(); + // clear all for the indexShard Idendity even though is't still open + cache.clear(randomFrom(entity, secondEntity)); + cache.cleanCache(); + assertEquals(1, cache.count()); + // third has not been validated since it's a different identity + value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes()); + assertEquals(hitCount + 1, requestCacheStats.stats().getHitCount()); + assertEquals("baz", value3.toUtf8()); + + + IOUtils.close(reader, secondReader, thirdReader, writer, dir, cache); + + } + + public Iterable newDoc(int id, String value) { + return Arrays.asList(newField("id", Integer.toString(id), StringField.TYPE_STORED), newField("value", value, + StringField.TYPE_STORED)); + } + + private class TestEntity implements IndicesRequestCache.CacheEntity { + private final DirectoryReader reader; + private final int id; + private final AtomicBoolean identity; + private final ShardRequestCache shardRequestCache; + private int loaded; + private TestEntity(ShardRequestCache shardRequestCache, DirectoryReader reader, AtomicBoolean identity, int id) { + this.reader = reader; + this.id = id; + this.identity = identity; + this.shardRequestCache = shardRequestCache; + } + + @Override + public IndicesRequestCache.Value loadValue() throws IOException { + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs topDocs = searcher.search(new TermQuery(new Term("id", Integer.toString(this.id))), 1); + assertEquals(1, topDocs.totalHits); + Document document = reader.document(topDocs.scoreDocs[0].doc); + BytesArray value = new BytesArray(document.get("value")); + loaded++; + return new IndicesRequestCache.Value(value, value.length()); + } + + @Override + public void onCached(IndicesRequestCache.Key key, IndicesRequestCache.Value value) { + shardRequestCache.onCached(key, value); + } + + @Override + public boolean isOpen() { + return identity.get(); + } + + @Override + public Object getCacheIdentity() { + return identity; + } + + @Override + public void onHit() { + shardRequestCache.onHit(); + } + + @Override + public void onMiss() { + shardRequestCache.onMiss(); + } + + @Override + public void onRemoval(RemovalNotification notification) { + shardRequestCache.onRemoval(notification.getKey(), notification.getValue(), + notification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/indices/cache/query/terms/TermsLookupTests.java b/core/src/test/java/org/elasticsearch/indices/TermsLookupTests.java similarity index 97% rename from core/src/test/java/org/elasticsearch/indices/cache/query/terms/TermsLookupTests.java rename to core/src/test/java/org/elasticsearch/indices/TermsLookupTests.java index bf0394988b4..d711402f566 100644 --- a/core/src/test/java/org/elasticsearch/indices/cache/query/terms/TermsLookupTests.java +++ b/core/src/test/java/org/elasticsearch/indices/TermsLookupTests.java @@ -17,10 +17,11 @@ * under the License. */ -package org.elasticsearch.indices.cache.query.terms; +package org.elasticsearch.indices; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.indices.TermsLookup; import org.elasticsearch.test.ESTestCase; import java.io.IOException; diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index 38e81f7eba2..1d6c54f643b 100644 --- a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -48,7 +48,8 @@ import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.MergeSchedulerConfig; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.indices.cache.request.IndicesRequestCache; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -78,7 +79,7 @@ public class IndexStatsIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { //Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) - .put(IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL.getKey(), "1ms") + .put(IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), "1ms") .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true) .put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), IndexModule.INDEX_QUERY_CACHE) .build(); diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index faa6b62d2ad..f1f84040d54 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.query; import org.apache.lucene.util.English; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -29,9 +28,7 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.MultiMatchQueryBuilder; @@ -42,7 +39,7 @@ import org.elasticsearch.index.query.WrapperQueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders; import org.elasticsearch.index.search.MatchQuery; import org.elasticsearch.index.search.MatchQuery.Type; -import org.elasticsearch.indices.cache.query.terms.TermsLookup; +import org.elasticsearch.indices.TermsLookup; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -86,7 +83,6 @@ import static org.elasticsearch.index.query.QueryBuilders.termsQuery; import static org.elasticsearch.index.query.QueryBuilders.typeQuery; import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery; import static org.elasticsearch.index.query.QueryBuilders.wrapperQuery; -import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; diff --git a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java index e2ff218a942..451f844344f 100644 --- a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java @@ -46,7 +46,7 @@ import org.elasticsearch.index.query.MoreLikeThisQueryBuilder; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermsQueryBuilder; -import org.elasticsearch.indices.cache.query.terms.TermsLookup; +import org.elasticsearch.indices.TermsLookup; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.test.ESIntegTestCase; diff --git a/docs/reference/migration/migrate_3_0.asciidoc b/docs/reference/migration/migrate_3_0.asciidoc index 58d1eae2611..2ce6378eb85 100644 --- a/docs/reference/migration/migrate_3_0.asciidoc +++ b/docs/reference/migration/migrate_3_0.asciidoc @@ -273,6 +273,12 @@ now doesn't accept a value less than `100ms` which prevents fsyncing too often i The deprecated settings `index.cache.query.enable` and `indices.cache.query.size` have been removed and are replaced with `index.requests.cache.enable` and `indices.requests.cache.size` respectively. +`indices.requests.cache.clean_interval` has been replaced with `indices.cache.clean_interval` and is no longer supported. + +==== Field Data Cache Settings + +`indices.fielddata.cache.clean_interval` has been replaced with `indices.cache.clean_interval` and is no longer supported. + ==== Allocation settings Allocation settings deprecated in 1.x have been removed: diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index bb03cf8ba47..a3161f4090f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -108,7 +108,7 @@ import org.elasticsearch.index.MergeSchedulerConfig; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.index.IndexWarmer; -import org.elasticsearch.indices.cache.request.IndicesRequestCache; +import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.node.NodeMocksPlugin; import org.elasticsearch.plugins.Plugin; From c50586599ea0041dfb3a31bab39910d8943e3abf Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 22 Jan 2016 13:00:50 +0100 Subject: [PATCH 12/22] Make security non-optional 2.x has show so far that running with security manager is the way to go. This commit make this non-optional. Users that need to pass their own rules can still do this via the system configuration for the security manager. They can even opt out of all security that way. --- .../elasticsearch/bootstrap/Bootstrap.java | 10 +-------- .../bootstrap/BootstrapSettings.java | 8 ------- .../common/settings/ClusterSettings.java | 1 - .../bootstrap/BootstrapSettingsTests.java | 1 - docs/reference/migration/migrate_3_0.asciidoc | 8 +++++++ .../modules/scripting/security.asciidoc | 21 +------------------ 6 files changed, 10 insertions(+), 39 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 70e9ca85ee7..004b0541eee 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -175,7 +175,7 @@ final class Bootstrap { JarHell.checkJarHell(); // install SM after natives, shutdown hooks, etc. - setupSecurity(settings, environment); + Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings)); // We do not need to reload system properties here as we have already applied them in building the settings and // reloading could cause multiple prompts to the user for values if a system property was specified with a prompt @@ -188,14 +188,6 @@ final class Bootstrap { node = new Node(nodeSettings); } - - - private void setupSecurity(Settings settings, Environment environment) throws Exception { - if (BootstrapSettings.SECURITY_MANAGER_ENABLED_SETTING.get(settings)) { - Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings)); - } - } - @SuppressForbidden(reason = "Exception#printStackTrace()") private static void setupLogging(Settings settings, Environment environment) { try { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java index 9122504f0e8..a20ff9bb059 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java @@ -27,14 +27,6 @@ public final class BootstrapSettings { private BootstrapSettings() { } - // TODO: remove this: http://www.openbsd.org/papers/hackfest2015-pledge/mgp00005.jpg - /** - * option to turn off our security manager completely, for example - * if you want to have your own configuration or just disable - */ - public static final Setting SECURITY_MANAGER_ENABLED_SETTING = - Setting.boolSetting("security.manager.enabled", true, false, Scope.CLUSTER); - // TODO: remove this hack when insecure defaults are removed from java public static final Setting SECURITY_FILTER_BAD_DEFAULTS_SETTING = Setting.boolSetting("security.manager.filter_bad_defaults", true, false, Scope.CLUSTER); diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 142fbacfb01..87d2882155f 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -395,7 +395,6 @@ public final class ClusterSettings extends AbstractScopedSettings { PageCacheRecycler.WEIGHT_OBJECTS_SETTING, PageCacheRecycler.TYPE_SETTING, PluginsService.MANDATORY_SETTING, - BootstrapSettings.SECURITY_MANAGER_ENABLED_SETTING, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING, BootstrapSettings.MLOCKALL_SETTING, BootstrapSettings.SECCOMP_SETTING, diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapSettingsTests.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapSettingsTests.java index 0570a69f0b9..c032d3ddee8 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapSettingsTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.test.ESTestCase; public class BootstrapSettingsTests extends ESTestCase { public void testDefaultSettings() { - assertTrue(BootstrapSettings.SECURITY_MANAGER_ENABLED_SETTING.get(Settings.EMPTY)); assertTrue(BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(Settings.EMPTY)); assertFalse(BootstrapSettings.MLOCKALL_SETTING.get(Settings.EMPTY)); assertTrue(BootstrapSettings.SECCOMP_SETTING.get(Settings.EMPTY)); diff --git a/docs/reference/migration/migrate_3_0.asciidoc b/docs/reference/migration/migrate_3_0.asciidoc index 58d1eae2611..398f17e4146 100644 --- a/docs/reference/migration/migrate_3_0.asciidoc +++ b/docs/reference/migration/migrate_3_0.asciidoc @@ -20,6 +20,7 @@ your application to Elasticsearch 3.0. * <> * <> * <> +* <> [[breaking_30_search_changes]] === Warmers @@ -767,3 +768,10 @@ The term vectors APIs no longer persist unmapped fields in the mappings. The `dfs` parameter has been removed completely, term vectors don't support distributed document frequencies anymore. + +[[breaking_30_security]] +=== Security + +The option to disable the security manager `--security.manager.enabled` has been removed. In order to grant special +permissions to elasticsearch users must tweak the local Java Security Policy. + diff --git a/docs/reference/modules/scripting/security.asciidoc b/docs/reference/modules/scripting/security.asciidoc index 2761fb02ad9..af193b35103 100644 --- a/docs/reference/modules/scripting/security.asciidoc +++ b/docs/reference/modules/scripting/security.asciidoc @@ -82,7 +82,7 @@ Returns the following exception: [float] == Dealing with Java Security Manager issues -If you encounter issues with the Java Security Manager, you have three options +If you encounter issues with the Java Security Manager, you have two options for resolving these issues: [float] @@ -92,25 +92,6 @@ The safest and most secure long term solution is to change the code causing the security issue. We recognise that this may take time to do correctly and so we provide the following two alternatives. -[float] -=== Disable the Java Security Manager - -deprecated[2.2.0,The ability to disable the Java Security Manager will be removed in a future version] - -You can disable the Java Security Manager entirely with the -`security.manager.enabled` command line flag: - -[source,sh] ------------------------------ -./bin/elasticsearch --security.manager.enabled false ------------------------------ - -WARNING: This disables the Security Manager entirely and makes Elasticsearch -much more vulnerable to attacks! It is an option that should only be used in -the most urgent of situations and for the shortest amount of time possible. -Optional security is not secure at all because it **will** be disabled and -leave the system vulnerable. This option will be removed in a future version. - [float] === Customising the classloader whitelist From a1e251af20f97593f3197a0416444371d2f9ccf0 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 11 Feb 2016 16:23:16 +0100 Subject: [PATCH 13/22] Remove the MapperBuilders utility class. We can just call constructors directly. --- .../index/mapper/DocumentMapperParser.java | 3 +- .../index/mapper/DocumentParser.java | 40 ++++--- .../index/mapper/MapperBuilders.java | 110 ------------------ .../index/mapper/core/BinaryFieldMapper.java | 3 +- .../index/mapper/core/BooleanFieldMapper.java | 3 +- .../index/mapper/core/ByteFieldMapper.java | 3 +- .../mapper/core/CompletionFieldMapper.java | 3 +- .../index/mapper/core/DateFieldMapper.java | 3 +- .../index/mapper/core/DoubleFieldMapper.java | 3 +- .../index/mapper/core/FloatFieldMapper.java | 3 +- .../index/mapper/core/IntegerFieldMapper.java | 3 +- .../index/mapper/core/LongFieldMapper.java | 3 +- .../index/mapper/core/ShortFieldMapper.java | 3 +- .../index/mapper/core/StringFieldMapper.java | 3 +- .../mapper/core/TokenCountFieldMapper.java | 3 +- .../mapper/geo/BaseGeoPointFieldMapper.java | 9 +- .../index/mapper/geo/GeoShapeFieldMapper.java | 4 +- .../mapper/internal/ParentFieldMapper.java | 3 +- .../index/mapper/ip/IpFieldMapper.java | 3 +- .../index/mapper/object/ObjectMapper.java | 3 +- .../percolator/PercolatorFieldMapper.java | 3 +- .../index/query/QueryShardContext.java | 3 +- .../index/engine/InternalEngineTests.java | 4 +- .../fielddata/AbstractFieldDataTestCase.java | 25 ++-- .../fielddata/IndexFieldDataServiceTests.java | 7 +- .../mapper/externalvalues/ExternalMapper.java | 3 +- .../mapper/multifield/MultiFieldTests.java | 12 +- .../mapper/simple/SimpleMapperTests.java | 19 ++- .../highlight/HighlightBuilderTests.java | 3 +- .../rescore/QueryRescoreBuilderTests.java | 3 +- .../phrase/DirectCandidateGeneratorTests.java | 3 +- .../mapper/attachments/AttachmentMapper.java | 24 ++-- 32 files changed, 99 insertions(+), 221 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/index/mapper/MapperBuilders.java diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java index 20aeb34bd39..b1f6f7cd9bd 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java @@ -43,7 +43,6 @@ import java.util.Map; import java.util.function.Supplier; import static java.util.Collections.unmodifiableMap; -import static org.elasticsearch.index.mapper.MapperBuilders.doc; public class DocumentMapperParser { @@ -111,7 +110,7 @@ public class DocumentMapperParser { Mapper.TypeParser.ParserContext parserContext = parserContext(type); // parse RootObjectMapper - DocumentMapper.Builder docBuilder = doc((RootObjectMapper.Builder) rootObjectTypeParser.parse(type, mapping, parserContext), mapperService); + DocumentMapper.Builder docBuilder = new DocumentMapper.Builder((RootObjectMapper.Builder) rootObjectTypeParser.parse(type, mapping, parserContext), mapperService); Iterator> iterator = mapping.entrySet().iterator(); // parse DocumentMapper while(iterator.hasNext()) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index c1362287d67..ac7240495bd 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -28,7 +28,15 @@ import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.mapper.core.BinaryFieldMapper; +import org.elasticsearch.index.mapper.core.BooleanFieldMapper; +import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.elasticsearch.index.mapper.core.DateFieldMapper.DateFieldType; +import org.elasticsearch.index.mapper.core.DoubleFieldMapper; +import org.elasticsearch.index.mapper.core.FloatFieldMapper; +import org.elasticsearch.index.mapper.core.IntegerFieldMapper; +import org.elasticsearch.index.mapper.core.LongFieldMapper; +import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper.StringFieldType; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; @@ -323,7 +331,7 @@ class DocumentParser implements Closeable { context.path().remove(); Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object"); if (builder == null) { - builder = MapperBuilders.object(currentFieldName).enabled(true); + builder = new ObjectMapper.Builder(currentFieldName).enabled(true); // if this is a non root object, then explicitly set the dynamic behavior if set if (!(mapper instanceof RootObjectMapper) && mapper.dynamic() != ObjectMapper.Defaults.DYNAMIC) { ((ObjectMapper.Builder) builder).dynamic(mapper.dynamic()); @@ -442,37 +450,37 @@ class DocumentParser implements Closeable { if (fieldType instanceof StringFieldType) { builder = context.root().findTemplateBuilder(context, currentFieldName, "string"); if (builder == null) { - builder = MapperBuilders.stringField(currentFieldName); + builder = new StringFieldMapper.Builder(currentFieldName); } } else if (fieldType instanceof DateFieldType) { builder = context.root().findTemplateBuilder(context, currentFieldName, "date"); if (builder == null) { - builder = MapperBuilders.dateField(currentFieldName); + builder = new DateFieldMapper.Builder(currentFieldName); } } else if (fieldType.numericType() != null) { switch (fieldType.numericType()) { case LONG: builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); if (builder == null) { - builder = MapperBuilders.longField(currentFieldName); + builder = new LongFieldMapper.Builder(currentFieldName); } break; case DOUBLE: builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); if (builder == null) { - builder = MapperBuilders.doubleField(currentFieldName); + builder = new DoubleFieldMapper.Builder(currentFieldName); } break; case INT: builder = context.root().findTemplateBuilder(context, currentFieldName, "integer"); if (builder == null) { - builder = MapperBuilders.integerField(currentFieldName); + builder = new IntegerFieldMapper.Builder(currentFieldName); } break; case FLOAT: builder = context.root().findTemplateBuilder(context, currentFieldName, "float"); if (builder == null) { - builder = MapperBuilders.floatField(currentFieldName); + builder = new FloatFieldMapper.Builder(currentFieldName); } break; default: @@ -503,7 +511,7 @@ class DocumentParser implements Closeable { dateTimeFormatter.parser().parseMillis(text); Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "date"); if (builder == null) { - builder = MapperBuilders.dateField(currentFieldName).dateTimeFormatter(dateTimeFormatter); + builder = new DateFieldMapper.Builder(currentFieldName).dateTimeFormatter(dateTimeFormatter); } return builder; } catch (Exception e) { @@ -518,7 +526,7 @@ class DocumentParser implements Closeable { Long.parseLong(text); Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); if (builder == null) { - builder = MapperBuilders.longField(currentFieldName); + builder = new LongFieldMapper.Builder(currentFieldName); } return builder; } catch (NumberFormatException e) { @@ -528,7 +536,7 @@ class DocumentParser implements Closeable { Double.parseDouble(text); Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); if (builder == null) { - builder = MapperBuilders.doubleField(currentFieldName); + builder = new DoubleFieldMapper.Builder(currentFieldName); } return builder; } catch (NumberFormatException e) { @@ -537,7 +545,7 @@ class DocumentParser implements Closeable { } Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string"); if (builder == null) { - builder = MapperBuilders.stringField(currentFieldName); + builder = new StringFieldMapper.Builder(currentFieldName); } return builder; } else if (token == XContentParser.Token.VALUE_NUMBER) { @@ -545,7 +553,7 @@ class DocumentParser implements Closeable { if (numberType == XContentParser.NumberType.INT || numberType == XContentParser.NumberType.LONG) { Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); if (builder == null) { - builder = MapperBuilders.longField(currentFieldName); + builder = new LongFieldMapper.Builder(currentFieldName); } return builder; } else if (numberType == XContentParser.NumberType.FLOAT || numberType == XContentParser.NumberType.DOUBLE) { @@ -554,20 +562,20 @@ class DocumentParser implements Closeable { // no templates are defined, we use float by default instead of double // since this is much more space-efficient and should be enough most of // the time - builder = MapperBuilders.floatField(currentFieldName); + builder = new FloatFieldMapper.Builder(currentFieldName); } return builder; } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "boolean"); if (builder == null) { - builder = MapperBuilders.booleanField(currentFieldName); + builder = new BooleanFieldMapper.Builder(currentFieldName); } return builder; } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "binary"); if (builder == null) { - builder = MapperBuilders.binaryField(currentFieldName); + builder = new BinaryFieldMapper.Builder(currentFieldName); } return builder; } else { @@ -677,7 +685,7 @@ class DocumentParser implements Closeable { if (!(parent instanceof RootObjectMapper) && parent.dynamic() != ObjectMapper.Defaults.DYNAMIC) { ((ObjectMapper.Builder) builder).dynamic(parent.dynamic()); } - builder = MapperBuilders.object(paths[i]).enabled(true); + builder = new ObjectMapper.Builder(paths[i]).enabled(true); } Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); mapper = (ObjectMapper) builder.build(builderContext); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperBuilders.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperBuilders.java deleted file mode 100644 index 9ea9e99f01b..00000000000 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperBuilders.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.index.mapper.core.BinaryFieldMapper; -import org.elasticsearch.index.mapper.core.BooleanFieldMapper; -import org.elasticsearch.index.mapper.core.ByteFieldMapper; -import org.elasticsearch.index.mapper.core.CompletionFieldMapper; -import org.elasticsearch.index.mapper.core.DateFieldMapper; -import org.elasticsearch.index.mapper.core.DoubleFieldMapper; -import org.elasticsearch.index.mapper.core.FloatFieldMapper; -import org.elasticsearch.index.mapper.core.IntegerFieldMapper; -import org.elasticsearch.index.mapper.core.LongFieldMapper; -import org.elasticsearch.index.mapper.core.ShortFieldMapper; -import org.elasticsearch.index.mapper.core.StringFieldMapper; -import org.elasticsearch.index.mapper.core.TokenCountFieldMapper; -import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper; -import org.elasticsearch.index.mapper.ip.IpFieldMapper; -import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.mapper.object.RootObjectMapper; - -public final class MapperBuilders { - - private MapperBuilders() {} - - public static DocumentMapper.Builder doc(RootObjectMapper.Builder objectBuilder, MapperService mapperService) { - return new DocumentMapper.Builder(objectBuilder, mapperService); - } - - public static RootObjectMapper.Builder rootObject(String name) { - return new RootObjectMapper.Builder(name); - } - - public static ObjectMapper.Builder object(String name) { - return new ObjectMapper.Builder(name); - } - - public static BooleanFieldMapper.Builder booleanField(String name) { - return new BooleanFieldMapper.Builder(name); - } - - public static StringFieldMapper.Builder stringField(String name) { - return new StringFieldMapper.Builder(name); - } - - public static BinaryFieldMapper.Builder binaryField(String name) { - return new BinaryFieldMapper.Builder(name); - } - - public static DateFieldMapper.Builder dateField(String name) { - return new DateFieldMapper.Builder(name); - } - - public static IpFieldMapper.Builder ipField(String name) { - return new IpFieldMapper.Builder(name); - } - - public static ShortFieldMapper.Builder shortField(String name) { - return new ShortFieldMapper.Builder(name); - } - - public static ByteFieldMapper.Builder byteField(String name) { - return new ByteFieldMapper.Builder(name); - } - - public static IntegerFieldMapper.Builder integerField(String name) { - return new IntegerFieldMapper.Builder(name); - } - - public static TokenCountFieldMapper.Builder tokenCountField(String name) { - return new TokenCountFieldMapper.Builder(name); - } - - public static LongFieldMapper.Builder longField(String name) { - return new LongFieldMapper.Builder(name); - } - - public static FloatFieldMapper.Builder floatField(String name) { - return new FloatFieldMapper.Builder(name); - } - - public static DoubleFieldMapper.Builder doubleField(String name) { - return new DoubleFieldMapper.Builder(name); - } - - public static GeoShapeFieldMapper.Builder geoShapeField(String name) { - return new GeoShapeFieldMapper.Builder(name); - } - - public static CompletionFieldMapper.Builder completionField(String name) { - return new CompletionFieldMapper.Builder(name); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java index f71267fa75b..9fc12a3dd0c 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java @@ -42,7 +42,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -import static org.elasticsearch.index.mapper.MapperBuilders.binaryField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseField; /** @@ -79,7 +78,7 @@ public class BinaryFieldMapper extends FieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - BinaryFieldMapper.Builder builder = binaryField(name); + BinaryFieldMapper.Builder builder = new BinaryFieldMapper.Builder(name); parseField(builder, name, node, parserContext); return builder; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java index 29d2ce2176f..4b49d644d5f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java @@ -41,7 +41,6 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue; -import static org.elasticsearch.index.mapper.MapperBuilders.booleanField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; @@ -96,7 +95,7 @@ public class BooleanFieldMapper extends FieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - BooleanFieldMapper.Builder builder = booleanField(name); + BooleanFieldMapper.Builder builder = new BooleanFieldMapper.Builder(name); parseField(builder, name, node, parserContext); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java index b1553d455d7..91019f0aa3f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java @@ -49,7 +49,6 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeByteValue; -import static org.elasticsearch.index.mapper.MapperBuilders.byteField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; /** @@ -97,7 +96,7 @@ public class ByteFieldMapper extends NumberFieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - ByteFieldMapper.Builder builder = byteField(name); + ByteFieldMapper.Builder builder = new ByteFieldMapper.Builder(name); parseNumberField(builder, name, node, parserContext); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java index 1e45780cf18..057a8957121 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java @@ -58,7 +58,6 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -import static org.elasticsearch.index.mapper.MapperBuilders.completionField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; /** @@ -119,7 +118,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - CompletionFieldMapper.Builder builder = completionField(name); + CompletionFieldMapper.Builder builder = new CompletionFieldMapper.Builder(name); NamedAnalyzer indexAnalyzer = null; NamedAnalyzer searchAnalyzer = null; for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index 4b752b2b2af..5a355e2f3d4 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -63,7 +63,6 @@ import java.util.Objects; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.index.mapper.MapperBuilders.dateField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseDateTimeFormatter; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; @@ -153,7 +152,7 @@ public class DateFieldMapper extends NumberFieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - DateFieldMapper.Builder builder = dateField(name); + DateFieldMapper.Builder builder = new DateFieldMapper.Builder(name); parseNumberField(builder, name, node, parserContext); boolean configuredFormat = false; for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index 6c3f24d479e..50ce90aa6c3 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -51,7 +51,6 @@ import java.util.Map; import static org.apache.lucene.util.NumericUtils.doubleToSortableLong; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeDoubleValue; -import static org.elasticsearch.index.mapper.MapperBuilders.doubleField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; /** @@ -98,7 +97,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - DoubleFieldMapper.Builder builder = doubleField(name); + DoubleFieldMapper.Builder builder = new DoubleFieldMapper.Builder(name); parseNumberField(builder, name, node, parserContext); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index 31c22d626ee..c30e307758c 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -52,7 +52,6 @@ import java.util.Map; import static org.apache.lucene.util.NumericUtils.floatToSortableInt; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeFloatValue; -import static org.elasticsearch.index.mapper.MapperBuilders.floatField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; /** @@ -99,7 +98,7 @@ public class FloatFieldMapper extends NumberFieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - FloatFieldMapper.Builder builder = floatField(name); + FloatFieldMapper.Builder builder = new FloatFieldMapper.Builder(name); parseNumberField(builder, name, node, parserContext); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java index 27315ad042a..e37b715278f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java @@ -51,7 +51,6 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue; -import static org.elasticsearch.index.mapper.MapperBuilders.integerField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; /** @@ -104,7 +103,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - IntegerFieldMapper.Builder builder = integerField(name); + IntegerFieldMapper.Builder builder = new IntegerFieldMapper.Builder(name); parseNumberField(builder, name, node, parserContext); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java index 984dc3fed53..51375a6903d 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java @@ -51,7 +51,6 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeLongValue; -import static org.elasticsearch.index.mapper.MapperBuilders.longField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; /** @@ -103,7 +102,7 @@ public class LongFieldMapper extends NumberFieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - LongFieldMapper.Builder builder = longField(name); + LongFieldMapper.Builder builder = new LongFieldMapper.Builder(name); parseNumberField(builder, name, node, parserContext); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java index 01bd16d2c1e..2eead1a124b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java @@ -51,7 +51,6 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeShortValue; -import static org.elasticsearch.index.mapper.MapperBuilders.shortField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; /** @@ -101,7 +100,7 @@ public class ShortFieldMapper extends NumberFieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - ShortFieldMapper.Builder builder = shortField(name); + ShortFieldMapper.Builder builder = new ShortFieldMapper.Builder(name); parseNumberField(builder, name, node, parserContext); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java index 52739e0b6f8..6812fa520aa 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java @@ -45,7 +45,6 @@ import java.util.List; import java.util.Map; import static org.apache.lucene.index.IndexOptions.NONE; -import static org.elasticsearch.index.mapper.MapperBuilders.stringField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseTextField; @@ -146,7 +145,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String fieldName, Map node, ParserContext parserContext) throws MapperParsingException { - StringFieldMapper.Builder builder = stringField(fieldName); + StringFieldMapper.Builder builder = new StringFieldMapper.Builder(fieldName); // hack for the fact that string can't just accept true/false for // the index property and still accepts no/not_analyzed/analyzed final Object index = node.remove("index"); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java index 85df5ea3d3b..4e850176199 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java @@ -43,7 +43,6 @@ import java.util.Map; import static org.apache.lucene.index.IndexOptions.NONE; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue; -import static org.elasticsearch.index.mapper.MapperBuilders.tokenCountField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; /** @@ -98,7 +97,7 @@ public class TokenCountFieldMapper extends IntegerFieldMapper { @Override @SuppressWarnings("unchecked") public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - TokenCountFieldMapper.Builder builder = tokenCountField(name); + TokenCountFieldMapper.Builder builder = new TokenCountFieldMapper.Builder(name); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); String propName = Strings.toUnderscoreCase(entry.getKey()); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java index 0a992aeb27a..426e64ceb12 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java @@ -42,6 +42,7 @@ import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper; +import org.elasticsearch.index.mapper.core.TokenCountFieldMapper; import org.elasticsearch.index.mapper.object.ArrayValueMapperParser; import java.io.IOException; @@ -50,8 +51,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import static org.elasticsearch.index.mapper.MapperBuilders.doubleField; -import static org.elasticsearch.index.mapper.MapperBuilders.stringField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; @@ -159,8 +158,8 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr context.path().add(name); if (enableLatLon) { - NumberFieldMapper.Builder latMapperBuilder = doubleField(Names.LAT).includeInAll(false); - NumberFieldMapper.Builder lonMapperBuilder = doubleField(Names.LON).includeInAll(false); + NumberFieldMapper.Builder latMapperBuilder = new DoubleFieldMapper.Builder(Names.LAT).includeInAll(false); + NumberFieldMapper.Builder lonMapperBuilder = new DoubleFieldMapper.Builder(Names.LON).includeInAll(false); if (precisionStep != null) { latMapperBuilder.precisionStep(precisionStep); lonMapperBuilder.precisionStep(precisionStep); @@ -172,7 +171,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr StringFieldMapper geoHashMapper = null; if (enableGeoHash || enableGeoHashPrefix) { // TODO: possible also implicitly enable geohash if geohash precision is set - geoHashMapper = stringField(Names.GEOHASH).index(true).tokenized(false).includeInAll(false).store(fieldType.stored()) + geoHashMapper = new StringFieldMapper.Builder(Names.GEOHASH).index(true).tokenized(false).includeInAll(false).store(fieldType.stored()) .omitNorms(true).indexOptions(IndexOptions.DOCS).build(context); geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index c98744bb759..bf699afa514 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -45,6 +45,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import java.io.IOException; import java.util.Iterator; @@ -53,7 +54,6 @@ import java.util.Map; import java.util.Objects; import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue; -import static org.elasticsearch.index.mapper.MapperBuilders.geoShapeField; /** @@ -160,7 +160,7 @@ public class GeoShapeFieldMapper extends FieldMapper { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - Builder builder = geoShapeField(name); + Builder builder = new Builder(name); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); String fieldName = Strings.toUnderscoreCase(entry.getKey()); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java index e7cd1b107ae..e5b0d0caaf2 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; @@ -140,7 +139,7 @@ public class ParentFieldMapper extends MetadataFieldMapper { } static StringFieldMapper createParentJoinFieldMapper(String docType, BuilderContext context) { - StringFieldMapper.Builder parentJoinField = MapperBuilders.stringField(joinField(docType)); + StringFieldMapper.Builder parentJoinField = new StringFieldMapper.Builder(joinField(docType)); parentJoinField.indexOptions(IndexOptions.NONE); parentJoinField.docValues(true); parentJoinField.fieldType().setDocValuesType(DocValuesType.SORTED); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index fc9660d5c1d..9b8630040ff 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -57,7 +57,6 @@ import java.util.List; import java.util.Map; import java.util.regex.Pattern; -import static org.elasticsearch.index.mapper.MapperBuilders.ipField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; /** @@ -139,7 +138,7 @@ public class IpFieldMapper extends NumberFieldMapper { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - IpFieldMapper.Builder builder = ipField(name); + IpFieldMapper.Builder builder = new Builder(name); parseNumberField(builder, name, node, parserContext); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java index b5934a40116..31dc34e4208 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java @@ -50,7 +50,6 @@ import java.util.Locale; import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue; -import static org.elasticsearch.index.mapper.MapperBuilders.object; /** * @@ -300,7 +299,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, } protected Builder createBuilder(String name) { - return object(name); + return new Builder(name); } } diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java index 9a103195746..21082805f22 100644 --- a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.StringFieldMapper; @@ -68,7 +67,7 @@ public class PercolatorFieldMapper extends FieldMapper { } static StringFieldMapper.Builder createStringFieldBuilder(String name) { - StringFieldMapper.Builder queryMetaDataFieldBuilder = MapperBuilders.stringField(name); + StringFieldMapper.Builder queryMetaDataFieldBuilder = new StringFieldMapper.Builder(name); queryMetaDataFieldBuilder.docValues(false); queryMetaDataFieldBuilder.store(false); queryMetaDataFieldBuilder.tokenized(false); diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index 4701cdfeecc..3aa5f25004d 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -44,7 +44,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; @@ -278,7 +277,7 @@ public class QueryShardContext { if (fieldMapping != null || allowUnmappedFields) { return fieldMapping; } else if (mapUnmappedFieldAsString) { - StringFieldMapper.Builder builder = MapperBuilders.stringField(name); + StringFieldMapper.Builder builder = new StringFieldMapper.Builder(name); return builder.build(new Mapper.BuilderContext(indexSettings.getSettings(), new ContentPath(1))).fieldType(); } else { throw new QueryShardException(this, "No field mapping can be found for the field with name [{}]", name); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index cd39806e222..61f18eecf87 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -73,11 +73,11 @@ import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperForType; import org.elasticsearch.index.mapper.Mapper.BuilderContext; -import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext.Document; +import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.internal.SourceFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; @@ -1687,7 +1687,7 @@ public class InternalEngineTests extends ESTestCase { private Mapping dynamicUpdate() { BuilderContext context = new BuilderContext(Settings.EMPTY, new ContentPath()); - final RootObjectMapper root = MapperBuilders.rootObject("some_type").build(context); + final RootObjectMapper root = new RootObjectMapper.Builder("some_type").build(context); return new Mapping(Version.CURRENT, root, new MetadataFieldMapper[0], emptyMap()); } diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java index 6b3f0ddbd3f..2371247be47 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java @@ -42,7 +42,14 @@ import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper.BuilderContext; -import org.elasticsearch.index.mapper.MapperBuilders; +import org.elasticsearch.index.mapper.core.BinaryFieldMapper; +import org.elasticsearch.index.mapper.core.ByteFieldMapper; +import org.elasticsearch.index.mapper.core.DoubleFieldMapper; +import org.elasticsearch.index.mapper.core.FloatFieldMapper; +import org.elasticsearch.index.mapper.core.IntegerFieldMapper; +import org.elasticsearch.index.mapper.core.LongFieldMapper; +import org.elasticsearch.index.mapper.core.ShortFieldMapper; +import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper; import org.elasticsearch.index.mapper.geo.GeoPointFieldMapperLegacy; @@ -97,19 +104,19 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase { final MappedFieldType fieldType; final BuilderContext context = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1)); if (type.getType().equals("string")) { - fieldType = MapperBuilders.stringField(fieldName).tokenized(false).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); + fieldType = new StringFieldMapper.Builder(fieldName).tokenized(false).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); } else if (type.getType().equals("float")) { - fieldType = MapperBuilders.floatField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); + fieldType = new FloatFieldMapper.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); } else if (type.getType().equals("double")) { - fieldType = MapperBuilders.doubleField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); + fieldType = new DoubleFieldMapper.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); } else if (type.getType().equals("long")) { - fieldType = MapperBuilders.longField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); + fieldType = new LongFieldMapper.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); } else if (type.getType().equals("int")) { - fieldType = MapperBuilders.integerField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); + fieldType = new IntegerFieldMapper.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); } else if (type.getType().equals("short")) { - fieldType = MapperBuilders.shortField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); + fieldType = new ShortFieldMapper.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); } else if (type.getType().equals("byte")) { - fieldType = MapperBuilders.byteField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); + fieldType = new ByteFieldMapper.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); } else if (type.getType().equals("geo_point")) { if (indexService.getIndexSettings().getIndexVersionCreated().before(Version.V_2_2_0)) { fieldType = new GeoPointFieldMapperLegacy.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); @@ -119,7 +126,7 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase { } else if (type.getType().equals("_parent")) { fieldType = new ParentFieldMapper.Builder("_type").type(fieldName).build(context).fieldType(); } else if (type.getType().equals("binary")) { - fieldType = MapperBuilders.binaryField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); + fieldType = new BinaryFieldMapper.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType(); } else { throw new UnsupportedOperationException(type.getType()); } diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java index 0a2a3c4d79c..d84de2174a2 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java @@ -41,7 +41,6 @@ import org.elasticsearch.index.fielddata.plain.SortedSetDVOrdinalsIndexFieldData import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper.BuilderContext; -import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.core.BooleanFieldMapper; import org.elasticsearch.index.mapper.core.ByteFieldMapper; import org.elasticsearch.index.mapper.core.DoubleFieldMapper; @@ -104,7 +103,7 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase { final IndexService indexService = createIndex("test"); final IndexFieldDataService ifdService = indexService.fieldData(); final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1)); - final MappedFieldType mapper1 = MapperBuilders.stringField("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx).fieldType(); + final MappedFieldType mapper1 = new StringFieldMapper.Builder("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx).fieldType(); final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new KeywordAnalyzer())); Document doc = new Document(); doc.add(new StringField("s", "thisisastring", Store.NO)); @@ -121,7 +120,7 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase { // write new segment writer.addDocument(doc); final IndexReader reader2 = DirectoryReader.open(writer, true); - final MappedFieldType mapper2 = MapperBuilders.stringField("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "doc_values").build()).build(ctx).fieldType(); + final MappedFieldType mapper2 = new StringFieldMapper.Builder("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "doc_values").build()).build(ctx).fieldType(); ifd = ifdService.getForField(mapper2); assertThat(ifd, instanceOf(SortedSetDVOrdinalsIndexFieldData.class)); reader1.close(); @@ -138,7 +137,7 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase { indicesService.getIndicesFieldDataCache(), indicesService.getCircuitBreakerService(), indexService.mapperService()); final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1)); - final MappedFieldType mapper1 = MapperBuilders.stringField("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx).fieldType(); + final MappedFieldType mapper1 = new StringFieldMapper.Builder("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx).fieldType(); final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new KeywordAnalyzer())); Document doc = new Document(); doc.add(new StringField("s", "thisisastring", Store.NO)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java index c4b04000eb6..356a2c815fc 100755 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java @@ -48,7 +48,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import static org.elasticsearch.index.mapper.MapperBuilders.stringField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; @@ -82,7 +81,7 @@ public class ExternalMapper extends FieldMapper { public Builder(String name, String generatedValue, String mapperName) { super(name, new ExternalFieldType(), new ExternalFieldType()); this.builder = this; - this.stringBuilder = stringField(name).store(false); + this.stringBuilder = new StringFieldMapper.Builder(name).store(false); this.generatedValue = generatedValue; this.mapperName = mapperName; } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java index 06374a931de..82bd78b4967 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.core.TokenCountFieldMapper; import org.elasticsearch.index.mapper.geo.BaseGeoPointFieldMapper; +import org.elasticsearch.index.mapper.object.RootObjectMapper; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.VersionUtils; @@ -54,9 +55,6 @@ import java.util.Map; import java.util.TreeMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.mapper.MapperBuilders.doc; -import static org.elasticsearch.index.mapper.MapperBuilders.rootObject; -import static org.elasticsearch.index.mapper.MapperBuilders.stringField; import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.hamcrest.Matchers.equalTo; @@ -147,10 +145,10 @@ public class MultiFieldTests extends ESSingleNodeTestCase { public void testBuildThenParse() throws Exception { IndexService indexService = createIndex("test"); - DocumentMapper builderDocMapper = doc(rootObject("person").add( - stringField("name").store(true) - .addMultiField(stringField("indexed").index(true).tokenized(true)) - .addMultiField(stringField("not_indexed").index(false).store(true)) + DocumentMapper builderDocMapper = new DocumentMapper.Builder(new RootObjectMapper.Builder("person").add( + new StringFieldMapper.Builder("name").store(true) + .addMultiField(new StringFieldMapper.Builder("indexed").index(true).tokenized(true)) + .addMultiField(new StringFieldMapper.Builder("not_indexed").index(false).store(true)) ), indexService.mapperService()).build(indexService.mapperService()); String builtMapping = builderDocMapper.mappingSource().string(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java index ed9792fb44e..9280e5a297a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java @@ -28,15 +28,14 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParseContext.Document; +import org.elasticsearch.index.mapper.core.StringFieldMapper; +import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.mapper.object.RootObjectMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.test.ESSingleNodeTestCase; import java.nio.charset.StandardCharsets; -import static org.elasticsearch.index.mapper.MapperBuilders.doc; -import static org.elasticsearch.index.mapper.MapperBuilders.object; -import static org.elasticsearch.index.mapper.MapperBuilders.rootObject; -import static org.elasticsearch.index.mapper.MapperBuilders.stringField; import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.hamcrest.Matchers.equalTo; @@ -47,9 +46,9 @@ import static org.hamcrest.Matchers.equalTo; public class SimpleMapperTests extends ESSingleNodeTestCase { public void testSimpleMapper() throws Exception { IndexService indexService = createIndex("test"); - DocumentMapper docMapper = doc( - rootObject("person") - .add(object("name").add(stringField("first").store(true).index(false))), + DocumentMapper docMapper = new DocumentMapper.Builder( + new RootObjectMapper.Builder("person") + .add(new ObjectMapper.Builder("name").add(new StringFieldMapper.Builder("first").store(true).index(false))), indexService.mapperService()).build(indexService.mapperService()); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json")); @@ -107,9 +106,9 @@ public class SimpleMapperTests extends ESSingleNodeTestCase { public void testNoDocumentSent() throws Exception { IndexService indexService = createIndex("test"); - DocumentMapper docMapper = doc( - rootObject("person") - .add(object("name").add(stringField("first").store(true).index(false))), + DocumentMapper docMapper = new DocumentMapper.Builder( + new RootObjectMapper.Builder("person") + .add(new ObjectMapper.Builder("name").add(new StringFieldMapper.Builder("first").store(true).index(false))), indexService.mapperService()).build(indexService.mapperService()); BytesReference json = new BytesArray("".getBytes(StandardCharsets.UTF_8)); diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java index 5dc8528c00a..92a0a7a6fde 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java @@ -39,7 +39,6 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.query.IdsQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -281,7 +280,7 @@ public class HighlightBuilderTests extends ESTestCase { QueryShardContext mockShardContext = new QueryShardContext(idxSettings, null, null, null, null, null, null, indicesQueriesRegistry) { @Override public MappedFieldType fieldMapper(String name) { - StringFieldMapper.Builder builder = MapperBuilders.stringField(name); + StringFieldMapper.Builder builder = new StringFieldMapper.Builder(name); return builder.build(new Mapper.BuilderContext(idxSettings.getSettings(), new ContentPath(1))).fieldType(); } }; diff --git a/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java b/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java index 01f7e332446..a792a04dcae 100644 --- a/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java @@ -39,7 +39,6 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -163,7 +162,7 @@ public class QueryRescoreBuilderTests extends ESTestCase { QueryShardContext mockShardContext = new QueryShardContext(idxSettings, null, null, null, null, null, null, indicesQueriesRegistry) { @Override public MappedFieldType fieldMapper(String name) { - StringFieldMapper.Builder builder = MapperBuilders.stringField(name); + StringFieldMapper.Builder builder = new StringFieldMapper.Builder(name); return builder.build(new Mapper.BuilderContext(idxSettings.getSettings(), new ContentPath(1))).fieldType(); } }; diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java index 02826b9a7eb..9da0ac2e47a 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java @@ -37,7 +37,6 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper.StringFieldType; @@ -174,7 +173,7 @@ public class DirectCandidateGeneratorTests extends ESTestCase{ QueryShardContext mockShardContext = new QueryShardContext(idxSettings, null, null, null, mockMapperService, null, null, null) { @Override public MappedFieldType fieldMapper(String name) { - StringFieldMapper.Builder builder = MapperBuilders.stringField(name); + StringFieldMapper.Builder builder = new StringFieldMapper.Builder(name); return builder.build(new Mapper.BuilderContext(idxSettings.getSettings(), new ContentPath(1))).fieldType(); } }; diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java index 082adb958ec..58e93c9dd64 100644 --- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java +++ b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java @@ -37,6 +37,9 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.core.DateFieldMapper; +import org.elasticsearch.index.mapper.core.IntegerFieldMapper; +import org.elasticsearch.index.mapper.core.StringFieldMapper; import java.io.IOException; import java.util.Arrays; @@ -44,9 +47,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import static org.elasticsearch.index.mapper.MapperBuilders.dateField; -import static org.elasticsearch.index.mapper.MapperBuilders.integerField; -import static org.elasticsearch.index.mapper.MapperBuilders.stringField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; /** @@ -130,26 +130,26 @@ public class AttachmentMapper extends FieldMapper { private Mapper.Builder contentBuilder; - private Mapper.Builder titleBuilder = stringField(FieldNames.TITLE); + private Mapper.Builder titleBuilder = new StringFieldMapper.Builder(FieldNames.TITLE); - private Mapper.Builder nameBuilder = stringField(FieldNames.NAME); + private Mapper.Builder nameBuilder = new StringFieldMapper.Builder(FieldNames.NAME); - private Mapper.Builder authorBuilder = stringField(FieldNames.AUTHOR); + private Mapper.Builder authorBuilder = new StringFieldMapper.Builder(FieldNames.AUTHOR); - private Mapper.Builder keywordsBuilder = stringField(FieldNames.KEYWORDS); + private Mapper.Builder keywordsBuilder = new StringFieldMapper.Builder(FieldNames.KEYWORDS); - private Mapper.Builder dateBuilder = dateField(FieldNames.DATE); + private Mapper.Builder dateBuilder = new DateFieldMapper.Builder(FieldNames.DATE); - private Mapper.Builder contentTypeBuilder = stringField(FieldNames.CONTENT_TYPE); + private Mapper.Builder contentTypeBuilder = new StringFieldMapper.Builder(FieldNames.CONTENT_TYPE); - private Mapper.Builder contentLengthBuilder = integerField(FieldNames.CONTENT_LENGTH); + private Mapper.Builder contentLengthBuilder = new IntegerFieldMapper.Builder(FieldNames.CONTENT_LENGTH); - private Mapper.Builder languageBuilder = stringField(FieldNames.LANGUAGE); + private Mapper.Builder languageBuilder = new StringFieldMapper.Builder(FieldNames.LANGUAGE); public Builder(String name) { super(name, new AttachmentFieldType(), new AttachmentFieldType()); this.builder = this; - this.contentBuilder = stringField(FieldNames.CONTENT); + this.contentBuilder = new StringFieldMapper.Builder(FieldNames.CONTENT); } public Builder content(Mapper.Builder content) { From bc47c577d2d4192a2dd248e774d9e5028125a0bf Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 27 Jan 2016 15:02:41 +0100 Subject: [PATCH 14/22] Add a new `keyword` field. The `keyword` field is intended to replace `not_analyzed` string fields. It is indexed and has doc values by default, and doesn't support enabling term vectors. Although it doesn't support setting an analyzer for now, there are plans for it to support basic normalization in the future such as case folding. --- .../fielddata/IndexFieldDataService.java | 6 + .../index/mapper/core/KeywordFieldMapper.java | 274 ++++++++++++++++++ .../mapper/internal/ParentFieldMapper.java | 12 +- .../percolator/PercolatorFieldMapper.java | 17 +- .../index/termvectors/TermVectorsService.java | 4 +- .../elasticsearch/indices/IndicesModule.java | 2 + .../indices/stats/IndicesStatsTests.java | 10 +- .../action/termvectors/GetTermVectorsIT.java | 2 +- .../cluster/SimpleClusterStateIT.java | 2 +- .../org/elasticsearch/cluster/ack/AckIT.java | 4 +- .../gateway/RecoveryFromGatewayIT.java | 4 +- .../index/IndexWithShadowReplicasIT.java | 4 +- .../index/mapper/DynamicMappingTests.java | 3 +- .../copyto/CopyToMapperIntegrationIT.java | 2 +- .../mapper/core/KeywordFieldMapperTests.java | 203 +++++++++++++ .../mapper/core/KeywordFieldTypeTests.java | 29 ++ .../ExternalValuesMapperIntegrationIT.java | 3 +- .../SimpleExternalMappingTests.java | 5 +- .../multifield/MultiFieldsIntegrationIT.java | 33 +-- .../object/SimpleObjectMappingTests.java | 3 +- .../mapping/SimpleGetFieldMappingsIT.java | 5 +- .../mapping/UpdateMappingIntegrationIT.java | 4 +- .../RandomExceptionCircuitBreakerIT.java | 3 +- .../indices/state/OpenCloseIndexIT.java | 3 +- .../template/IndexTemplateBlocksIT.java | 2 +- .../template/SimpleIndexTemplateIT.java | 14 +- .../percolator/PercolatorIT.java | 2 +- .../aggregations/bucket/ChildrenIT.java | 4 +- .../aggregations/bucket/GeoDistanceIT.java | 4 +- .../aggregations/bucket/GeoHashGridIT.java | 4 +- .../search/aggregations/bucket/NestedIT.java | 6 +- .../search/aggregations/bucket/SamplerIT.java | 6 +- .../aggregations/bucket/ShardSizeTermsIT.java | 10 +- .../bucket/SignificantTermsIT.java | 2 +- .../SignificantTermsSignificanceScoreIT.java | 2 +- .../metrics/AbstractGeoTestCase.java | 6 +- .../aggregations/metrics/TopHitsIT.java | 3 +- .../basic/SearchWithRandomExceptionsIT.java | 3 +- .../basic/SearchWithRandomIOExceptionsIT.java | 3 +- .../search/query/SearchQueryIT.java | 3 +- .../search/searchafter/SearchAfterIT.java | 2 +- .../search/sort/FieldSortIT.java | 21 +- .../messy/tests/EquivalenceTests.java | 8 +- .../messy/tests/SearchFieldsTests.java | 4 +- .../messy/tests/SimpleSortTests.java | 4 +- .../test/indices.put_mapping/10_basic.yaml | 5 +- 46 files changed, 624 insertions(+), 131 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java create mode 100644 core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldMapperTests.java create mode 100644 core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldTypeTests.java diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java index f02f924bc39..78bdcb0f7f3 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java @@ -36,6 +36,7 @@ import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.core.BooleanFieldMapper; +import org.elasticsearch.index.mapper.core.KeywordFieldMapper; import org.elasticsearch.index.mapper.internal.IndexFieldMapper; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.shard.ShardId; @@ -94,6 +95,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo static { Map buildersByTypeBuilder = new HashMap<>(); buildersByTypeBuilder.put("string", new PagedBytesIndexFieldData.Builder()); + buildersByTypeBuilder.put(KeywordFieldMapper.CONTENT_TYPE, MISSING_DOC_VALUES_BUILDER); buildersByTypeBuilder.put("float", MISSING_DOC_VALUES_BUILDER); buildersByTypeBuilder.put("double", MISSING_DOC_VALUES_BUILDER); buildersByTypeBuilder.put("byte", MISSING_DOC_VALUES_BUILDER); @@ -110,6 +112,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo docValuesBuildersByType = MapBuilder.newMapBuilder() .put("string", new DocValuesIndexFieldData.Builder()) + .put(KeywordFieldMapper.CONTENT_TYPE, new DocValuesIndexFieldData.Builder()) .put("float", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.FLOAT)) .put("double", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.DOUBLE)) .put("byte", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BYTE)) @@ -126,6 +129,9 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo .put(Tuple.tuple("string", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder()) .put(Tuple.tuple("string", DISABLED_FORMAT), DISABLED_BUILDER) + .put(Tuple.tuple(KeywordFieldMapper.CONTENT_TYPE, DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder()) + .put(Tuple.tuple(KeywordFieldMapper.CONTENT_TYPE, DISABLED_FORMAT), DISABLED_BUILDER) + .put(Tuple.tuple("float", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.FLOAT)) .put(Tuple.tuple("float", DISABLED_FORMAT), DISABLED_BUILDER) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java new file mode 100644 index 00000000000..35f1ee7ad58 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java @@ -0,0 +1,274 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.core; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.internal.AllFieldMapper; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.index.mapper.core.TypeParsers.parseField; +import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; + +/** + * A field mapper for keywords. This mapper accepts strings and indexes them as-is. + */ +public final class KeywordFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll { + + public static final String CONTENT_TYPE = "keyword"; + + public static class Defaults { + public static final MappedFieldType FIELD_TYPE = new KeywordFieldType(); + + static { + FIELD_TYPE.setTokenized(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); + FIELD_TYPE.freeze(); + } + + public static final String NULL_VALUE = null; + public static final int IGNORE_ABOVE = Integer.MAX_VALUE; + } + + public static class Builder extends FieldMapper.Builder { + + protected String nullValue = Defaults.NULL_VALUE; + protected int ignoreAbove = Defaults.IGNORE_ABOVE; + + public Builder(String name) { + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); + builder = this; + } + + public Builder ignoreAbove(int ignoreAbove) { + if (ignoreAbove < 0) { + throw new IllegalArgumentException("[ignore_above] must be positive, got " + ignoreAbove); + } + this.ignoreAbove = ignoreAbove; + return this; + } + + @Override + public Builder indexOptions(IndexOptions indexOptions) { + if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) > 0) { + throw new IllegalArgumentException("The [keyword] field does not support positions, got [index_options]=" + + indexOptionToString(fieldType.indexOptions())); + } + return super.indexOptions(indexOptions); + } + + @Override + public KeywordFieldMapper build(BuilderContext context) { + setupFieldType(context); + KeywordFieldMapper fieldMapper = new KeywordFieldMapper( + name, fieldType, defaultFieldType, ignoreAbove, + context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); + return fieldMapper.includeInAll(includeInAll); + } + } + + public static class TypeParser implements Mapper.TypeParser { + @Override + public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + KeywordFieldMapper.Builder builder = new KeywordFieldMapper.Builder(name); + parseField(builder, name, node, parserContext); + for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { + Map.Entry entry = iterator.next(); + String propName = Strings.toUnderscoreCase(entry.getKey()); + Object propNode = entry.getValue(); + if (propName.equals("null_value")) { + if (propNode == null) { + throw new MapperParsingException("Property [null_value] cannot be null."); + } + builder.nullValue(propNode.toString()); + iterator.remove(); + } else if (propName.equals("ignore_above")) { + builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1)); + iterator.remove(); + } else if (parseMultiField(builder, name, parserContext, propName, propNode)) { + iterator.remove(); + } + } + return builder; + } + } + + public static final class KeywordFieldType extends MappedFieldType { + + public KeywordFieldType() {} + + protected KeywordFieldType(KeywordFieldType ref) { + super(ref); + } + + public KeywordFieldType clone() { + return new KeywordFieldType(this); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public String value(Object value) { + if (value == null) { + return null; + } + return value.toString(); + } + + @Override + public Query nullValueQuery() { + if (nullValue() == null) { + return null; + } + return termQuery(nullValue(), null); + } + } + + private Boolean includeInAll; + private int ignoreAbove; + + protected KeywordFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, + int ignoreAbove, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); + assert fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) <= 0; + this.ignoreAbove = ignoreAbove; + } + + @Override + protected KeywordFieldMapper clone() { + return (KeywordFieldMapper) super.clone(); + } + + @Override + public KeywordFieldMapper includeInAll(Boolean includeInAll) { + if (includeInAll != null) { + KeywordFieldMapper clone = clone(); + clone.includeInAll = includeInAll; + return clone; + } else { + return this; + } + } + + @Override + public KeywordFieldMapper includeInAllIfNotSet(Boolean includeInAll) { + if (includeInAll != null && this.includeInAll == null) { + KeywordFieldMapper clone = clone(); + clone.includeInAll = includeInAll; + return clone; + } else { + return this; + } + } + + @Override + public KeywordFieldMapper unsetIncludeInAll() { + if (includeInAll != null) { + KeywordFieldMapper clone = clone(); + clone.includeInAll = null; + return clone; + } else { + return this; + } + } + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + final String value; + if (context.externalValueSet()) { + value = context.externalValue().toString(); + } else { + XContentParser parser = context.parser(); + if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { + value = fieldType().nullValueAsString(); + } else { + value = parser.textOrNull(); + } + } + + if (value == null || value.length() > ignoreAbove) { + return; + } + + if (context.includeInAll(includeInAll, this)) { + context.allEntries().addText(fieldType().name(), value, fieldType().boost()); + } + + if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { + Field field = new Field(fieldType().name(), value, fieldType()); + fields.add(field); + } + if (fieldType().hasDocValues()) { + fields.add(new SortedSetDocValuesField(fieldType().name(), new BytesRef(value))); + } + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); + this.includeInAll = ((KeywordFieldMapper) mergeWith).includeInAll; + this.ignoreAbove = ((KeywordFieldMapper) mergeWith).ignoreAbove; + } + + @Override + protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { + super.doXContentBody(builder, includeDefaults, params); + + if (includeDefaults || fieldType().nullValue() != null) { + builder.field("null_value", fieldType().nullValue()); + } + + if (includeInAll != null) { + builder.field("include_in_all", includeInAll); + } else if (includeDefaults) { + builder.field("include_in_all", true); + } + + if (includeDefaults || ignoreAbove != Defaults.IGNORE_ABOVE) { + builder.field("ignore_above", ignoreAbove); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java index e5b0d0caaf2..66e754e5fda 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java @@ -42,7 +42,7 @@ import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.core.StringFieldMapper; +import org.elasticsearch.index.mapper.core.KeywordFieldMapper; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; @@ -131,15 +131,15 @@ public class ParentFieldMapper extends MetadataFieldMapper { @Override public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { - StringFieldMapper parentJoinField = createParentJoinFieldMapper(typeName, new BuilderContext(indexSettings, new ContentPath(0))); + KeywordFieldMapper parentJoinField = createParentJoinFieldMapper(typeName, new BuilderContext(indexSettings, new ContentPath(0))); MappedFieldType childJoinFieldType = Defaults.FIELD_TYPE.clone(); childJoinFieldType.setName(joinField(null)); return new ParentFieldMapper(parentJoinField, childJoinFieldType, null, indexSettings); } } - static StringFieldMapper createParentJoinFieldMapper(String docType, BuilderContext context) { - StringFieldMapper.Builder parentJoinField = new StringFieldMapper.Builder(joinField(docType)); + static KeywordFieldMapper createParentJoinFieldMapper(String docType, BuilderContext context) { + KeywordFieldMapper.Builder parentJoinField = new KeywordFieldMapper.Builder(joinField(docType)); parentJoinField.indexOptions(IndexOptions.NONE); parentJoinField.docValues(true); parentJoinField.fieldType().setDocValuesType(DocValuesType.SORTED); @@ -205,9 +205,9 @@ public class ParentFieldMapper extends MetadataFieldMapper { private final String parentType; // has no impact of field data settings, is just here for creating a join field, // the parent field mapper in the child type pointing to this type determines the field data settings for this join field - private final StringFieldMapper parentJoinField; + private final KeywordFieldMapper parentJoinField; - private ParentFieldMapper(StringFieldMapper parentJoinField, MappedFieldType childJoinFieldType, String parentType, Settings indexSettings) { + private ParentFieldMapper(KeywordFieldMapper parentJoinField, MappedFieldType childJoinFieldType, String parentType, Settings indexSettings) { super(NAME, childJoinFieldType, Defaults.FIELD_TYPE, indexSettings); this.parentType = parentType; this.parentJoinField = parentJoinField; diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java index 21082805f22..f44d454655e 100644 --- a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java @@ -28,7 +28,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.core.StringFieldMapper; +import org.elasticsearch.index.mapper.core.KeywordFieldMapper; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; @@ -60,17 +60,16 @@ public class PercolatorFieldMapper extends FieldMapper { @Override public PercolatorFieldMapper build(BuilderContext context) { context.path().add(name); - StringFieldMapper extractedTermsField = createStringFieldBuilder(EXTRACTED_TERMS_FIELD_NAME).build(context); - StringFieldMapper unknownQueryField = createStringFieldBuilder(UNKNOWN_QUERY_FIELD_NAME).build(context); + KeywordFieldMapper extractedTermsField = createStringFieldBuilder(EXTRACTED_TERMS_FIELD_NAME).build(context); + KeywordFieldMapper unknownQueryField = createStringFieldBuilder(UNKNOWN_QUERY_FIELD_NAME).build(context); context.path().remove(); return new PercolatorFieldMapper(name(), fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo, queryShardContext, extractedTermsField, unknownQueryField); } - static StringFieldMapper.Builder createStringFieldBuilder(String name) { - StringFieldMapper.Builder queryMetaDataFieldBuilder = new StringFieldMapper.Builder(name); + static KeywordFieldMapper.Builder createStringFieldBuilder(String name) { + KeywordFieldMapper.Builder queryMetaDataFieldBuilder = new KeywordFieldMapper.Builder(name); queryMetaDataFieldBuilder.docValues(false); queryMetaDataFieldBuilder.store(false); - queryMetaDataFieldBuilder.tokenized(false); queryMetaDataFieldBuilder.indexOptions(IndexOptions.DOCS); return queryMetaDataFieldBuilder; } @@ -110,10 +109,10 @@ public class PercolatorFieldMapper extends FieldMapper { private final boolean mapUnmappedFieldAsString; private final QueryShardContext queryShardContext; - private final StringFieldMapper queryTermsField; - private final StringFieldMapper unknownQueryField; + private final KeywordFieldMapper queryTermsField; + private final KeywordFieldMapper unknownQueryField; - public PercolatorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, QueryShardContext queryShardContext, StringFieldMapper queryTermsField, StringFieldMapper unknownQueryField) { + public PercolatorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, QueryShardContext queryShardContext, KeywordFieldMapper queryTermsField, KeywordFieldMapper unknownQueryField) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); this.queryShardContext = queryShardContext; this.queryTermsField = queryTermsField; diff --git a/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index 271d5a353bc..e3885816cd2 100644 --- a/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -44,6 +44,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.core.KeywordFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.shard.IndexShard; @@ -158,7 +159,8 @@ public class TermVectorsService { private static boolean isValidField(MappedFieldType fieldType) { // must be a string - if (!(fieldType instanceof StringFieldMapper.StringFieldType)) { + if (fieldType instanceof StringFieldMapper.StringFieldType == false + && fieldType instanceof KeywordFieldMapper.KeywordFieldType == false) { return false; } // and must be indexed diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index b94ef19ec23..eab8faab8eb 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import org.elasticsearch.index.mapper.core.FloatFieldMapper; import org.elasticsearch.index.mapper.core.IntegerFieldMapper; +import org.elasticsearch.index.mapper.core.KeywordFieldMapper; import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.index.mapper.core.ShortFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper; @@ -96,6 +97,7 @@ public class IndicesModule extends AbstractModule { registerMapper(DateFieldMapper.CONTENT_TYPE, new DateFieldMapper.TypeParser()); registerMapper(IpFieldMapper.CONTENT_TYPE, new IpFieldMapper.TypeParser()); registerMapper(StringFieldMapper.CONTENT_TYPE, new StringFieldMapper.TypeParser()); + registerMapper(KeywordFieldMapper.CONTENT_TYPE, new KeywordFieldMapper.TypeParser()); registerMapper(TokenCountFieldMapper.CONTENT_TYPE, new TokenCountFieldMapper.TypeParser()); registerMapper(ObjectMapper.CONTENT_TYPE, new ObjectMapper.TypeParser()); registerMapper(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser()); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java index 1f18da58204..13e973504a3 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java @@ -50,10 +50,12 @@ public class IndicesStatsTests extends ESSingleNodeTestCase { .startObject("doc") .startObject("properties") .startObject("foo") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .field("doc_values", true) .field("store", true) + .endObject() + .startObject("bar") + .field("type", "string") .field("term_vector", "with_positions_offsets_payloads") .endObject() .endObject() @@ -61,7 +63,7 @@ public class IndicesStatsTests extends ESSingleNodeTestCase { .endObject(); assertAcked(client().admin().indices().prepareCreate("test").addMapping("doc", mapping)); ensureGreen("test"); - client().prepareIndex("test", "doc", "1").setSource("foo", "bar").get(); + client().prepareIndex("test", "doc", "1").setSource("foo", "bar", "bar", "baz").get(); client().admin().indices().prepareRefresh("test").get(); IndicesStatsResponse rsp = client().admin().indices().prepareStats("test").get(); @@ -73,7 +75,7 @@ public class IndicesStatsTests extends ESSingleNodeTestCase { assertThat(stats.getDocValuesMemoryInBytes(), greaterThan(0L)); // now check multiple segments stats are merged together - client().prepareIndex("test", "doc", "2").setSource("foo", "bar").get(); + client().prepareIndex("test", "doc", "2").setSource("foo", "bar", "bar", "baz").get(); client().admin().indices().prepareRefresh("test").get(); rsp = client().admin().indices().prepareStats("test").get(); diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index b788b9ba230..d4f61393010 100644 --- a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -153,7 +153,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { "field1", "type=string,index=no", // no tvs "field2", "type=string,index=no,store=true", // no tvs "field3", "type=string,index=no,term_vector=yes", // no tvs - "field4", "type=string,index=not_analyzed", // yes tvs + "field4", "type=keyword", // yes tvs "field5", "type=string,index=analyzed")); // yes tvs ensureYellow(); diff --git a/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 11d176b4566..550891d4906 100644 --- a/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -91,7 +91,7 @@ public class SimpleClusterStateIT extends ESIntegTestCase { .setOrder(0) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("field1").field("type", "string").field("store", true).endObject() - .startObject("field2").field("type", "string").field("store", true).field("index", "not_analyzed").endObject() + .startObject("field2").field("type", "keyword").field("store", true).endObject() .endObject().endObject().endObject()) .get(); diff --git a/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java b/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java index 2ec3b11a164..eb58e597c14 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java @@ -270,7 +270,7 @@ public class AckIT extends ESIntegTestCase { createIndex("test"); ensureGreen(); - assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=string,index=not_analyzed")); + assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=keyword")); for (Client client : clients()) { assertThat(getLocalClusterState(client).metaData().indices().get("test").mapping("test"), notNullValue()); @@ -281,7 +281,7 @@ public class AckIT extends ESIntegTestCase { createIndex("test"); ensureGreen(); - PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=string,index=not_analyzed").setTimeout("0s").get(); + PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=keyword").setTimeout("0s").get(); assertThat(putMappingResponse.isAcknowledged(), equalTo(false)); } diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index dd398bd48cd..fc8452ba81d 100644 --- a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -301,8 +301,8 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { .setTemplate("te*") .setOrder(0) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") - .startObject("field1").field("type", "string").field("store", "yes").endObject() - .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject() + .startObject("field1").field("type", "string").field("store", true).endObject() + .startObject("field2").field("type", "keyword").field("store", true).endObject() .endObject().endObject().endObject()) .execute().actionGet(); client.admin().indices().prepareAliases().addAlias("test", "test_alias", QueryBuilders.termQuery("field", "value")).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java index 4ef513a9be3..d04f772ddba 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java +++ b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java @@ -643,7 +643,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) .build(); - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string,index=not_analyzed").get(); + prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=keyword").get(); ensureGreen(IDX); client().prepareIndex(IDX, "doc", "1").setSource("foo", "foo").get(); @@ -725,7 +725,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { .build(); // only one node, so all primaries will end up on node1 - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string,index=not_analyzed").get(); + prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=keyword").get(); ensureGreen(IDX); // Index some documents diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index 6de49877ce1..1a8ffd9e6f4 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -473,8 +473,7 @@ public class DynamicMappingTests extends ESSingleNodeTestCase { .field("type", "string") .startObject("fields") .startObject("raw") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .endObject() .endObject() .endObject() diff --git a/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationIT.java index 4a010747624..8f1e61d8d5b 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationIT.java @@ -96,7 +96,7 @@ public class CopyToMapperIntegrationIT extends ESIntegTestCase { .startObject().startObject("template_raw") .field("match", "*_raw") .field("match_mapping_type", "string") - .startObject("mapping").field("type", "string").field("index", "not_analyzed").endObject() + .startObject("mapping").field("type", "keyword").endObject() .endObject().endObject() .startObject().startObject("template_all") diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldMapperTests.java new file mode 100644 index 00000000000..bdb3f9762ef --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldMapperTests.java @@ -0,0 +1,203 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.core; + +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.IndexableFieldType; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.DocumentMapperParser; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.junit.Before; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class KeywordFieldMapperTests extends ESSingleNodeTestCase { + + IndexService indexService; + DocumentMapperParser parser; + + @Before + public void before() { + indexService = createIndex("test"); + parser = indexService.mapperService().documentMapperParser(); + } + + public void testDefaults() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword").endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("field", "1234") + .endObject() + .bytes()); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(2, fields.length); + + assertEquals("1234", fields[0].stringValue()); + IndexableFieldType fieldType = fields[0].fieldType(); + assertThat(fieldType.omitNorms(), equalTo(true)); + assertFalse(fieldType.tokenized()); + assertFalse(fieldType.stored()); + assertThat(fieldType.indexOptions(), equalTo(IndexOptions.DOCS)); + assertThat(fieldType.storeTermVectors(), equalTo(false)); + assertThat(fieldType.storeTermVectorOffsets(), equalTo(false)); + assertThat(fieldType.storeTermVectorPositions(), equalTo(false)); + assertThat(fieldType.storeTermVectorPayloads(), equalTo(false)); + assertEquals(DocValuesType.NONE, fieldType.docValuesType()); + + assertEquals(new BytesRef("1234"), fields[1].binaryValue()); + fieldType = fields[1].fieldType(); + assertThat(fieldType.indexOptions(), equalTo(IndexOptions.NONE)); + assertEquals(DocValuesType.SORTED_SET, fieldType.docValuesType()); + } + + public void testIgnoreAbove() throws IOException { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword").field("ignore_above", 5).endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("field", "elk") + .endObject() + .bytes()); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(2, fields.length); + + doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("field", "elasticsearch") + .endObject() + .bytes()); + + fields = doc.rootDoc().getFields("field"); + assertEquals(0, fields.length); + } + + public void testNullValue() throws IOException { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword").field("null_value", "uri").endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .endObject() + .bytes()); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(0, fields.length); + + doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .nullField("field") + .endObject() + .bytes()); + + fields = doc.rootDoc().getFields("field"); + assertEquals(2, fields.length); + assertEquals("uri", fields[0].stringValue()); + } + + public void testEnableStore() throws IOException { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword").field("store", true).endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("field", "1234") + .endObject() + .bytes()); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(2, fields.length); + assertTrue(fields[0].fieldType().stored()); + } + + public void testDisableIndex() throws IOException { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword").field("index", false).endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("field", "1234") + .endObject() + .bytes()); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(1, fields.length); + assertEquals(IndexOptions.NONE, fields[0].fieldType().indexOptions()); + assertEquals(DocValuesType.SORTED_SET, fields[0].fieldType().docValuesType()); + } + + public void testDisableDocValues() throws IOException { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword").field("doc_values", false).endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("field", "1234") + .endObject() + .bytes()); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(1, fields.length); + assertEquals(DocValuesType.NONE, fields[0].fieldType().docValuesType()); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldTypeTests.java new file mode 100644 index 00000000000..699717b5893 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldTypeTests.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.mapper.core; + +import org.elasticsearch.index.mapper.FieldTypeTestCase; +import org.elasticsearch.index.mapper.MappedFieldType; + +public class KeywordFieldTypeTests extends FieldTypeTestCase { + @Override + protected MappedFieldType createDefaultFieldType() { + return new KeywordFieldMapper.KeywordFieldType(); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java index f581f1f6a41..d90c39381f9 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java @@ -92,8 +92,7 @@ public class ExternalValuesMapperIntegrationIT extends ESIntegTestCase { .field("store", true) .startObject("fields") .startObject("raw") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .field("store", true) .endObject() .endObject() diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java index bf92991e039..8b8955d19d7 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.core.KeywordFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.plugins.Plugin; @@ -106,6 +107,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase { Map mapperParsers = new HashMap<>(); mapperParsers.put(ExternalMapperPlugin.EXTERNAL, new ExternalMapper.TypeParser(ExternalMapperPlugin.EXTERNAL, "foo")); mapperParsers.put(StringFieldMapper.CONTENT_TYPE, new StringFieldMapper.TypeParser()); + mapperParsers.put(KeywordFieldMapper.CONTENT_TYPE, new KeywordFieldMapper.TypeParser()); MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap()); DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), @@ -121,8 +123,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase { .field("store", true) .startObject("fields") .startObject("raw") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .field("store", true) .endObject() .endObject() diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationIT.java index 347e4dd9201..bb24a41c445 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationIT.java @@ -57,7 +57,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase { Map titleFields = ((Map) XContentMapValues.extractValue("properties.title.fields", mappingSource)); assertThat(titleFields.size(), equalTo(1)); assertThat(titleFields.get("not_analyzed"), notNullValue()); - assertThat(((Map)titleFields.get("not_analyzed")).get("index").toString(), equalTo("not_analyzed")); + assertThat(((Map)titleFields.get("not_analyzed")).get("type").toString(), equalTo("keyword")); client().prepareIndex("my-index", "my-type", "1") .setSource("title", "Multi fields") @@ -86,7 +86,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase { titleFields = ((Map) XContentMapValues.extractValue("properties.title.fields", mappingSource)); assertThat(titleFields.size(), equalTo(2)); assertThat(titleFields.get("not_analyzed"), notNullValue()); - assertThat(((Map)titleFields.get("not_analyzed")).get("index").toString(), equalTo("not_analyzed")); + assertThat(((Map)titleFields.get("not_analyzed")).get("type").toString(), equalTo("keyword")); assertThat(titleFields.get("uncased"), notNullValue()); assertThat(((Map)titleFields.get("uncased")).get("analyzer").toString(), equalTo("whitespace")); @@ -118,9 +118,8 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase { assertThat(aField.get("fields"), notNullValue()); Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource)); - assertThat(bField.size(), equalTo(2)); - assertThat(bField.get("type").toString(), equalTo("string")); - assertThat(bField.get("index").toString(), equalTo("not_analyzed")); + assertThat(bField.size(), equalTo(1)); + assertThat(bField.get("type").toString(), equalTo("keyword")); GeoPoint point = new GeoPoint(51, 19); client().prepareIndex("my-index", "my-type", "1").setSource("a", point.toString()).setRefresh(true).get(); @@ -142,8 +141,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase { .field("analyzer", "simple") .startObject("fields") .startObject("b") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .endObject() .endObject() .endObject() @@ -161,9 +159,8 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase { assertThat(aField.get("fields"), notNullValue()); Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource)); - assertThat(bField.size(), equalTo(2)); - assertThat(bField.get("type").toString(), equalTo("string")); - assertThat(bField.get("index").toString(), equalTo("not_analyzed")); + assertThat(bField.size(), equalTo(1)); + assertThat(bField.get("type").toString(), equalTo("keyword")); client().prepareIndex("my-index", "my-type", "1").setSource("a", "my tokens").setRefresh(true).get(); SearchResponse countResponse = client().prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", "my tokens")).get(); @@ -186,9 +183,8 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase { assertThat(aField.get("fields"), notNullValue()); Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource)); - assertThat(bField.size(), equalTo(2)); - assertThat(bField.get("type").toString(), equalTo("string")); - assertThat(bField.get("index").toString(), equalTo("not_analyzed")); + assertThat(bField.size(), equalTo(1)); + assertThat(bField.get("type").toString(), equalTo("keyword")); client().prepareIndex("my-index", "my-type", "1").setSource("a", "complete me").setRefresh(true).get(); SearchResponse countResponse = client().prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", "complete me")).get(); @@ -211,9 +207,8 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase { assertThat(aField.get("fields"), notNullValue()); Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource)); - assertThat(bField.size(), equalTo(2)); - assertThat(bField.get("type").toString(), equalTo("string")); - assertThat(bField.get("index").toString(), equalTo("not_analyzed")); + assertThat(bField.size(), equalTo(1)); + assertThat(bField.get("type").toString(), equalTo("keyword")); client().prepareIndex("my-index", "my-type", "1").setSource("a", "127.0.0.1").setRefresh(true).get(); SearchResponse countResponse = client().prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", "127.0.0.1")).get(); @@ -227,8 +222,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase { .field("type", fieldType) .startObject("fields") .startObject("b") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .endObject() .endObject() .endObject() @@ -243,8 +237,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase { .field("type", "string") .startObject("fields") .startObject("not_analyzed") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .endObject() .endObject() .endObject() diff --git a/core/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java index 885e038de60..423c3e04343 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java @@ -152,8 +152,7 @@ public class SimpleObjectMappingTests extends ESSingleNodeTestCase { .field("index", "analyzed") .startObject("fields") .startObject("raw") - .field("type", "string") - .field("index","not_analyzed") + .field("type", "keyword") .endObject() .endObject() .endObject() diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java b/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java index a993130a91a..070772356a6 100644 --- a/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java @@ -60,7 +60,7 @@ public class SimpleGetFieldMappingsIT extends ESIntegTestCase { private XContentBuilder getMappingForType(String type) throws IOException { return jsonBuilder().startObject().startObject(type).startObject("properties") .startObject("field1").field("type", "string").endObject() - .startObject("obj").startObject("properties").startObject("subfield").field("type", "string").field("index", "not_analyzed").endObject().endObject().endObject() + .startObject("obj").startObject("properties").startObject("subfield").field("type", "keyword").endObject().endObject().endObject() .endObject().endObject().endObject(); } @@ -147,8 +147,7 @@ public class SimpleGetFieldMappingsIT extends ESIntegTestCase { assertThat((Map) response.fieldMappings("test", "type", "num").sourceAsMap().get("num"), hasEntry("type", (Object) "long")); assertThat((Map) response.fieldMappings("test", "type", "field1").sourceAsMap().get("field1"), hasEntry("index", (Object) "analyzed")); assertThat((Map) response.fieldMappings("test", "type", "field1").sourceAsMap().get("field1"), hasEntry("type", (Object) "string")); - assertThat((Map) response.fieldMappings("test", "type", "obj.subfield").sourceAsMap().get("subfield"), hasEntry("index", (Object) "not_analyzed")); - assertThat((Map) response.fieldMappings("test", "type", "obj.subfield").sourceAsMap().get("subfield"), hasEntry("type", (Object) "string")); + assertThat((Map) response.fieldMappings("test", "type", "obj.subfield").sourceAsMap().get("subfield"), hasEntry("type", (Object) "keyword")); } diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 144587a1833..3c4dc3186bc 100644 --- a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -229,7 +229,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase { logger.info("Changing _default_ mappings field from analyzed to non-analyzed"); putResponse = client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource( JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) - .startObject("properties").startObject("f").field("type", "string").field("index", "not_analyzed").endObject().endObject() + .startObject("properties").startObject("f").field("type", "keyword").endObject().endObject() .endObject().endObject() ).get(); assertThat(putResponse.isAcknowledged(), equalTo(true)); @@ -238,7 +238,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase { getResponse = client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get(); defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); Map fieldSettings = (Map) ((Map) defaultMapping.get("properties")).get("f"); - assertThat(fieldSettings, hasEntry("index", (Object) "not_analyzed")); + assertThat(fieldSettings, hasEntry("type", (Object) "keyword")); // but we still validate the _default_ type logger.info("Confirming _default_ mappings validation"); diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java index a406106f110..35ed7a2c657 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java @@ -75,8 +75,7 @@ public class RandomExceptionCircuitBreakerIT extends ESIntegTestCase { .startObject("type") .startObject("properties") .startObject("test-str") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .field("doc_values", randomBoolean()) .endObject() // test-str .startObject("test-num") diff --git a/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index e17b2a5c7b4..8eef10d693b 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -332,8 +332,7 @@ public class OpenCloseIndexIT extends ESIntegTestCase { startObject("type"). startObject("properties"). startObject("test") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .endObject(). endObject(). endObject() diff --git a/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java b/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java index 11e2d7d2ac4..38269ebf8f1 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java +++ b/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java @@ -39,7 +39,7 @@ public class IndexTemplateBlocksIT extends ESIntegTestCase { .setOrder(0) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("field1").field("type", "string").field("store", true).endObject() - .startObject("field2").field("type", "string").field("store", true).field("index", "not_analyzed").endObject() + .startObject("field2").field("type", "keyword").field("store", true).endObject() .endObject().endObject().endObject()) .execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index fbfaa93df8c..5df1b2311ca 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -79,7 +79,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .setOrder(0) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("field1").field("type", "string").field("store", true).endObject() - .startObject("field2").field("type", "string").field("store", true).field("index", "not_analyzed").endObject() + .startObject("field2").field("type", "keyword").field("store", true).endObject() .endObject().endObject().endObject()) .get(); @@ -146,7 +146,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .setOrder(0) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("field1").field("type", "string").field("store", true).endObject() - .startObject("field2").field("type", "string").field("store", true).field("index", "not_analyzed").endObject() + .startObject("field2").field("type", "string").field("store", true).endObject() .endObject().endObject().endObject()) .execute().actionGet(); @@ -171,7 +171,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .setOrder(0) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("field1").field("type", "string").field("store", true).endObject() - .startObject("field2").field("type", "string").field("store", true).field("index", "not_analyzed").endObject() + .startObject("field2").field("type", "keyword").field("store", true).endObject() .endObject().endObject().endObject()) .execute().actionGet(); @@ -191,7 +191,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .setOrder(0) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("field1").field("type", "string").field("store", true).endObject() - .startObject("field2").field("type", "string").field("store", true).field("index", "not_analyzed").endObject() + .startObject("field2").field("type", "keyword").field("store", true).endObject() .endObject().endObject().endObject()) .execute().actionGet(); @@ -214,7 +214,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .setOrder(0) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("field1").field("type", "string").field("store", true).endObject() - .startObject("field2").field("type", "string").field("store", true).field("index", "not_analyzed").endObject() + .startObject("field2").field("type", "keyword").field("store", true).endObject() .endObject().endObject().endObject()) .execute().actionGet(); @@ -224,7 +224,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .setOrder(0) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("field1").field("type", "string").field("store", true).endObject() - .startObject("field2").field("type", "string").field("store", true).field("index", "not_analyzed").endObject() + .startObject("field2").field("type", "keyword").field("store", true).endObject() .endObject().endObject().endObject()) .execute().actionGet(); @@ -234,7 +234,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .setOrder(0) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("field1").field("type", "string").field("store", true).endObject() - .startObject("field2").field("type", "string").field("store", true).field("index", "not_analyzed").endObject() + .startObject("field2").field("type", "keyword").field("store", true).endObject() .endObject().endObject().endObject()) .execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java index be87cc0c9a8..6d187147f2b 100644 --- a/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java @@ -861,7 +861,7 @@ public class PercolatorIT extends ESIntegTestCase { public void testPercolateWithAliasFilter() throws Exception { assertAcked(prepareCreate("my-index") - .addMapping(PercolatorService.TYPE_NAME, "a", "type=string,index=not_analyzed") + .addMapping(PercolatorService.TYPE_NAME, "a", "type=keyword") .addAlias(new Alias("a").filter(QueryBuilders.termQuery("a", "a"))) .addAlias(new Alias("b").filter(QueryBuilders.termQuery("a", "b"))) .addAlias(new Alias("c").filter(QueryBuilders.termQuery("a", "c"))) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java index 2dcb79b8d50..8981a96facf 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java @@ -402,8 +402,8 @@ public class ChildrenIT extends ESIntegTestCase { assertAcked( prepareCreate("index") - .addMapping("parentType", "name", "type=string,index=not_analyzed", "town", "type=string,index=not_analyzed") - .addMapping("childType", "_parent", "type=parentType", "name", "type=string,index=not_analyzed", "age", "type=integer") + .addMapping("parentType", "name", "type=keyword", "town", "type=keyword") + .addMapping("childType", "_parent", "type=parentType", "name", "type=keyword", "age", "type=integer") ); List requests = new ArrayList<>(); requests.add(client().prepareIndex("index", "parentType", "1").setSource("name", "Bob", "town", "Memphis")); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 5d52d1442f7..b611f52ac4f 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -83,11 +83,11 @@ public class GeoDistanceIT extends ESIntegTestCase { public void setupSuiteScopeCluster() throws Exception { Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); prepareCreate("idx").setSettings(settings) - .addMapping("type", "location", "type=geo_point", "city", "type=string,index=not_analyzed") + .addMapping("type", "location", "type=geo_point", "city", "type=keyword") .execute().actionGet(); prepareCreate("idx-multi") - .addMapping("type", "location", "type=geo_point", "city", "type=string,index=not_analyzed") + .addMapping("type", "location", "type=geo_point", "city", "type=keyword") .execute().actionGet(); createIndex("idx_unmapped"); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index 672447a6811..7f3f51031d7 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -90,7 +90,7 @@ public class GeoHashGridIT extends ESIntegTestCase { Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("idx").setSettings(settings) - .addMapping("type", "location", "type=geo_point", "city", "type=string,index=not_analyzed")); + .addMapping("type", "location", "type=geo_point", "city", "type=keyword")); List cities = new ArrayList<>(); Random random = getRandom(); @@ -115,7 +115,7 @@ public class GeoHashGridIT extends ESIntegTestCase { indexRandom(true, cities); assertAcked(prepareCreate("multi_valued_idx").setSettings(settings) - .addMapping("type", "location", "type=geo_point", "city", "type=string,index=not_analyzed")); + .addMapping("type", "location", "type=geo_point", "city", "type=keyword")); cities = new ArrayList<>(); multiValuedExpectedDocCountsForGeoHash = new ObjectIntHashMap<>(numDocs * 2); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index 5317f2e15f1..af17ca8e212 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -367,12 +367,12 @@ public class NestedIT extends ESIntegTestCase { .field("type", "nested") .startObject("properties") .startObject("cid").field("type", "long").endObject() - .startObject("identifier").field("type", "string").field("index", "not_analyzed").endObject() + .startObject("identifier").field("type", "keyword").endObject() .startObject("tags") .field("type", "nested") .startObject("properties") .startObject("tid").field("type", "long").endObject() - .startObject("name").field("type", "string").field("index", "not_analyzed").endObject() + .startObject("name").field("type", "keyword").endObject() .endObject() .endObject() .endObject() @@ -386,7 +386,7 @@ public class NestedIT extends ESIntegTestCase { .startObject("properties") .startObject("end").field("type", "date").field("format", "dateOptionalTime").endObject() .startObject("start").field("type", "date").field("format", "dateOptionalTime").endObject() - .startObject("label").field("type", "string").field("index", "not_analyzed").endObject() + .startObject("label").field("type", "keyword").endObject() .endObject() .endObject() .endObject() diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java index 623d27b2cc7..b6088c88784 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java @@ -61,12 +61,12 @@ public class SamplerIT extends ESIntegTestCase { @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, NUM_SHARDS, SETTING_NUMBER_OF_REPLICAS, 0).addMapping( - "book", "author", "type=string,index=not_analyzed", "name", "type=string,index=analyzed", "genre", - "type=string,index=not_analyzed", "price", "type=float")); + "book", "author", "type=keyword", "name", "type=string,index=analyzed", "genre", + "type=keyword", "price", "type=float")); createIndex("idx_unmapped"); // idx_unmapped_author is same as main index but missing author field assertAcked(prepareCreate("idx_unmapped_author").setSettings(SETTING_NUMBER_OF_SHARDS, NUM_SHARDS, SETTING_NUMBER_OF_REPLICAS, 0) - .addMapping("book", "name", "type=string,index=analyzed", "genre", "type=string,index=not_analyzed", "price", "type=float")); + .addMapping("book", "name", "type=string,index=analyzed", "genre", "type=keyword", "price", "type=float")); ensureGreen(); String data[] = { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java index 0616fa01b17..ecc16b85f13 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.equalTo; public class ShardSizeTermsIT extends ShardSizeTestCase { public void testNoShardSizeString() throws Exception { - createIdx("type=string,index=not_analyzed"); + createIdx("type=keyword"); indexData(); @@ -55,7 +55,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { } public void testShardSizeEqualsSizeString() throws Exception { - createIdx("type=string,index=not_analyzed"); + createIdx("type=keyword"); indexData(); @@ -79,7 +79,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { public void testWithShardSizeString() throws Exception { - createIdx("type=string,index=not_analyzed"); + createIdx("type=keyword"); indexData(); @@ -103,7 +103,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { public void testWithShardSizeStringSingleShard() throws Exception { - createIdx("type=string,index=not_analyzed"); + createIdx("type=keyword"); indexData(); @@ -126,7 +126,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { } public void testNoShardSizeTermOrderString() throws Exception { - createIdx("type=string,index=not_analyzed"); + createIdx("type=keyword"); indexData(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsIT.java index 97a3cfa3ba2..450e029622f 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsIT.java @@ -75,7 +75,7 @@ public class SignificantTermsIT extends ESIntegTestCase { @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 5, SETTING_NUMBER_OF_REPLICAS, 0).addMapping("fact", - "_routing", "required=true", "routing_id", "type=string,index=not_analyzed", "fact_category", + "_routing", "required=true", "routing_id", "type=keyword", "fact_category", "type=integer,index=true", "description", "type=string,index=analyzed")); createIndex("idx_unmapped"); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 6c1e7dfcabf..6daa6f2dc28 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -282,7 +282,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { public void testDeletesIssue7951() throws Exception { String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}"; - String mappings = "{\"doc\": {\"properties\":{\"text\": {\"type\":\"string\",\"index\":\"not_analyzed\"}}}}"; + String mappings = "{\"doc\": {\"properties\":{\"text\": {\"type\":\"keyword\"}}}}"; assertAcked(prepareCreate(INDEX_NAME).setSettings(settings).addMapping("doc", mappings)); String[] cat1v1 = {"constant", "one"}; String[] cat1v2 = {"constant", "uno"}; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java index 695fb87efa9..cc0e8b2050e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java @@ -75,7 +75,7 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase { createIndex(UNMAPPED_IDX_NAME); assertAcked(prepareCreate(IDX_NAME) .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point,geohash_prefix=true,geohash_precision=12", - MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long", "tag", "type=string,index=not_analyzed")); + MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long", "tag", "type=keyword")); singleTopLeft = new GeoPoint(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY); singleBottomRight = new GeoPoint(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY); @@ -136,7 +136,7 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase { assertAcked(prepareCreate(EMPTY_IDX_NAME).addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point")); assertAcked(prepareCreate(DATELINE_IDX_NAME) - .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point", MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long", "tag", "type=string,index=not_analyzed")); + .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point", MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long", "tag", "type=keyword")); GeoPoint[] geoValues = new GeoPoint[5]; geoValues[0] = new GeoPoint(38, 178); @@ -154,7 +154,7 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase { .endObject())); } assertAcked(prepareCreate(HIGH_CARD_IDX_NAME).setSettings(Settings.builder().put("number_of_shards", 2)) - .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point", MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long,store=true", "tag", "type=string,index=not_analyzed")); + .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point", MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long,store=true", "tag", "type=keyword")); for (int i = 0; i < 2000; i++) { singleVal = singleValues[i % numUniqueGeoPoints]; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 90a52ed11b8..3d8259a6ed6 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -120,8 +120,7 @@ public class TopHitsIT extends ESIntegTestCase { .field("type", "nested") .startObject("properties") .startObject("name") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .endObject() .endObject() .endObject() diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java index c45b04f7825..d342402e4bf 100644 --- a/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java +++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java @@ -63,8 +63,7 @@ public class SearchWithRandomExceptionsIT extends ESIntegTestCase { startObject("type"). startObject("properties"). startObject("test") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .endObject(). endObject(). endObject() diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index a6b76359cff..36e3a6076d5 100644 --- a/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -57,8 +57,7 @@ public class SearchWithRandomIOExceptionsIT extends ESIntegTestCase { startObject("type"). startObject("properties"). startObject("test") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .endObject(). endObject(). endObject() diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index faa6b62d2ad..fdce6f6b424 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -1524,8 +1524,7 @@ public class SearchQueryIT extends ESIntegTestCase { .field("format", "epoch_millis") .endObject() .startObject("bs") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .endObject() .endObject() .endObject() diff --git a/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterIT.java b/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterIT.java index e4ac3b728ea..0274143a6ad 100644 --- a/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterIT.java +++ b/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterIT.java @@ -295,7 +295,7 @@ public class SearchAfterIT extends ESIntegTestCase { mappings.add("type=boolean"); } else if (types.get(i) instanceof Text) { mappings.add("field" + Integer.toString(i)); - mappings.add("type=string,index=not_analyzed"); + mappings.add("type=keyword"); } else { fail("Can't match type [" + type + "]"); } diff --git a/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java b/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java index 2b3093563a3..cdceb9e9951 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -187,8 +187,7 @@ public class FieldSortIT extends ESIntegTestCase { "{\"$type\": " + " {\"properties\": " + " {\"grantee\": " - + " {\"index\": " - + " \"not_analyzed\", " + + " { \"index\": \"not_analyzed\", " + " \"term_vector\": \"with_positions_offsets\", " + " \"type\": \"string\", " + " \"analyzer\": \"snowball\", " @@ -265,12 +264,10 @@ public class FieldSortIT extends ESIntegTestCase { .startObject("type") .startObject("properties") .startObject("sparse_bytes") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .endObject() .startObject("dense_bytes") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .endObject() .endObject() .endObject() @@ -518,7 +515,7 @@ public class FieldSortIT extends ESIntegTestCase { assertAcked(prepareCreate("test") .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("str_value") - .field("type", "string").field("index", "not_analyzed").startObject("fielddata") + .field("type", "keyword").startObject("fielddata") .field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject().startObject("boolean_value") .field("type", "boolean").endObject().startObject("byte_value").field("type", "byte").startObject("fielddata") .field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject().startObject("short_value") @@ -826,8 +823,7 @@ public class FieldSortIT extends ESIntegTestCase { .startObject("type1") .startObject("properties") .startObject("value") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .endObject() .endObject() .endObject() @@ -950,7 +946,7 @@ public class FieldSortIT extends ESIntegTestCase { .field("type", "float").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null) .endObject().endObject().startObject("double_values").field("type", "double").startObject("fielddata") .field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject().startObject("string_values") - .field("type", "string").field("index", "not_analyzed").startObject("fielddata") + .field("type", "keyword").startObject("fielddata") .field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject().endObject().endObject() .endObject())); ensureGreen(); @@ -1259,7 +1255,7 @@ public class FieldSortIT extends ESIntegTestCase { public void testSortOnRareField() throws IOException { assertAcked(prepareCreate("test").addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("string_values") - .field("type", "string").field("index", "not_analyzed").startObject("fielddata") + .field("type", "keyword").startObject("fielddata") .field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject().endObject().endObject() .endObject())); ensureGreen(); @@ -1437,8 +1433,7 @@ public class FieldSortIT extends ESIntegTestCase { .field("type", "string") .startObject("fields") .startObject("sub") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .endObject() .endObject() .endObject() diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java index 3c062f871d3..e9c2969b07c 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java @@ -188,13 +188,11 @@ public class EquivalenceTests extends ESIntegTestCase { .startObject("type") .startObject("properties") .startObject("string_values") - .field("type", "string") - .field("index", "not_analyzed") + .field("type", "keyword") .startObject("fields") .startObject("doc_values") - .field("type", "string") - .field("index", "no") - .field("doc_values", true) + .field("type", "keyword") + .field("index", false) .endObject() .endObject() .endObject() diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java index f38b4e36e4e..1b58bde91c8 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java @@ -614,10 +614,10 @@ public class SearchFieldsTests extends ESIntegTestCase { public void testScriptFields() throws Exception { assertAcked(prepareCreate("index").addMapping("type", - "s", "type=string,index=not_analyzed", + "s", "type=keyword", "l", "type=long", "d", "type=double", - "ms", "type=string,index=not_analyzed", + "ms", "type=keyword", "ml", "type=long", "md", "type=double").get()); final int numDocs = randomIntBetween(3, 8); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java index 096e30f40da..48570de1a81 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java @@ -65,7 +65,7 @@ public class SimpleSortTests extends ESIntegTestCase { Random random = random(); assertAcked(prepareCreate("test") .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") - .startObject("str_value").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject() + .startObject("str_value").field("type", "keyword").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject() .startObject("boolean_value").field("type", "boolean").endObject() .startObject("byte_value").field("type", "byte").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject() .startObject("short_value").field("type", "short").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject() @@ -226,7 +226,7 @@ public class SimpleSortTests extends ESIntegTestCase { // We have to specify mapping explicitly because by the time search is performed dynamic mapping might not // be propagated to all nodes yet and sort operation fail when the sort field is not defined String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties") - .startObject("svalue").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject() + .startObject("svalue").field("type", "keyword").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject() .endObject().endObject().endObject().string(); assertAcked(prepareCreate("test").addMapping("type1", mapping)); ensureGreen(); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yaml index efdcf15cf89..2a63411937e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yaml @@ -39,8 +39,7 @@ analyzer: whitespace fields: text_raw: - type: string - index: not_analyzed + type: keyword - do: @@ -48,4 +47,4 @@ index: test_index - match: {test_index.mappings.test_type.properties.text1.type: string} - - match: {test_index.mappings.test_type.properties.text1.fields.text_raw.index: not_analyzed} + - match: {test_index.mappings.test_type.properties.text1.fields.text_raw.type: keyword} From 8045e51ff3f29f175548fe1157a49e4525f809eb Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 11 Feb 2016 18:25:45 +0100 Subject: [PATCH 15/22] Corrected typos and removed unused import --- .../java/org/elasticsearch/common/settings/SettingsModule.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 23c67609f1b..973b9597ad7 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.settings; import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.tribe.TribeService; import java.util.HashMap; @@ -90,7 +89,7 @@ public class SettingsModule extends AbstractModule { /** * Registers a settings filter pattern that allows to filter out certain settings that for instance contain sensitive information - * or if a setting is for internal purposes only. The given patter must either be a valid settings key or a simple regesp pattern. + * or if a setting is for internal purposes only. The given pattern must either be a valid settings key or a simple regexp pattern. */ public void registerSettingsFilter(String filter) { if (SettingsFilter.isValidPattern(filter) == false) { From 37ff6af759c538d4caec86d8031fd153dc91272c Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 11 Feb 2016 18:26:15 +0100 Subject: [PATCH 16/22] environemnt -> environment --- .../index/analysis/AnalysisRegistry.java | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 1fd3a4d96b0..a8a7b4fe004 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -57,7 +57,7 @@ public final class AnalysisRegistry implements Closeable { private final Map cachedAnalyzer = new ConcurrentHashMap<>(); private final PrebuiltAnalysis prebuiltAnalysis; private final HunspellService hunspellService; - private final Environment environemnt; + private final Environment environment; public AnalysisRegistry(HunspellService hunspellService, Environment environment) { this(hunspellService, environment, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); @@ -70,7 +70,7 @@ public final class AnalysisRegistry implements Closeable { Map> analyzers) { prebuiltAnalysis = new PrebuiltAnalysis(); this.hunspellService = hunspellService; - this.environemnt = environment; + this.environment = environment; final Map> charFilterBuilder = new HashMap<>(charFilters); final Map> tokenFilterBuilder = new HashMap<>(tokenFilters); final Map> tokenizerBuilder = new HashMap<>(tokenizers); @@ -115,13 +115,13 @@ public final class AnalysisRegistry implements Closeable { AnalysisModule.AnalysisProvider provider = analyzers.get(analyzer); return provider == null ? null : cachedAnalyzer.computeIfAbsent(analyzer, (key) -> { try { - return provider.get(environemnt, key).get(); + return provider.get(environment, key).get(); } catch (IOException ex) { throw new ElasticsearchException("failed to load analyzer for name " + key, ex); }} ); } - return analyzerProvider.get(environemnt, analyzer).get(); + return analyzerProvider.get(environment, analyzer).get(); } @Override @@ -324,7 +324,7 @@ public final class AnalysisRegistry implements Closeable { if (type == null) { throw new IllegalArgumentException("Unknown " + toBuild + " type [" + typeName + "] for [" + name + "]"); } - factory = type.get(settings, environemnt, name, currentSettings); + factory = type.get(settings, environment, name, currentSettings); } factories.put(name, factory); } else { @@ -335,7 +335,7 @@ public final class AnalysisRegistry implements Closeable { if (type == null) { throw new IllegalArgumentException("Unknown " + toBuild + " type [" + typeName + "] for [" + name + "]"); } - final T factory = type.get(settings, environemnt, name, currentSettings); + final T factory = type.get(settings, environment, name, currentSettings); factories.put(name, factory); } @@ -355,9 +355,9 @@ public final class AnalysisRegistry implements Closeable { AnalysisModule.AnalysisProvider defaultProvider = defaultInstance.get(name); final T instance; if (defaultProvider == null) { - instance = provider.get(settings, environemnt, name, defaultSettings); + instance = provider.get(settings, environment, name, defaultSettings); } else { - instance = defaultProvider.get(settings, environemnt, name, defaultSettings); + instance = defaultProvider.get(settings, environment, name, defaultSettings); } factories.put(name, instance); String camelCase = Strings.toCamelCase(name); @@ -371,7 +371,7 @@ public final class AnalysisRegistry implements Closeable { final AnalysisModule.AnalysisProvider provider = entry.getValue(); final String camelCase = Strings.toCamelCase(name); if (factories.containsKey(name) == false || (defaultInstance.containsKey(camelCase) == false && factories.containsKey(camelCase) == false)) { - final T instance = provider.get(settings, environemnt, name, defaultSettings); + final T instance = provider.get(settings, environment, name, defaultSettings); if (factories.containsKey(name) == false) { factories.put(name, instance); } From 7a969c0bbe6281759ce3f880d3da82a9885db697 Mon Sep 17 00:00:00 2001 From: gmarz Date: Tue, 9 Feb 2016 14:29:16 -0500 Subject: [PATCH 17/22] Windows service: Use JAVA_HOME environment variable in registry This allows for updating Java without having to re-install the service. Closes #13521 --- .../src/main/resources/bin/service.bat | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/distribution/src/main/resources/bin/service.bat b/distribution/src/main/resources/bin/service.bat index f423bb9740f..22242e36ff9 100644 --- a/distribution/src/main/resources/bin/service.bat +++ b/distribution/src/main/resources/bin/service.bat @@ -110,21 +110,24 @@ echo Installing service : "%SERVICE_ID%" echo Using JAVA_HOME (%ARCH%): "%JAVA_HOME%" rem Check JVM server dll first -set JVM_DLL=%JAVA_HOME%\jre\bin\server\jvm.dll -if exist "%JVM_DLL%" goto foundJVM +if exist "%JAVA_HOME%"\jre\bin\server\jvm.dll ( + set JVM_DLL=\jre\bin\server\jvm.dll + goto foundJVM +) rem Check 'server' JRE (JRE installed on Windows Server) -set JVM_DLL=%JAVA_HOME%\bin\server\jvm.dll -if exist "%JVM_DLL%" goto foundJVM +if exist "%JAVA_HOME%"\bin\server\jvm.dll ( + set JVM_DLL=\bin\server\jvm.dll + goto foundJVM +) rem Fallback to 'client' JRE -set JVM_DLL=%JAVA_HOME%\bin\client\jvm.dll - -if exist "%JVM_DLL%" ( -echo Warning: JAVA_HOME points to a JRE and not JDK installation; a client (not a server^) JVM will be used... +if exist "%JAVA_HOME%"\bin\client\jvm.dll ( + set JVM_DLL=\bin\client\jvm.dll + echo Warning: JAVA_HOME points to a JRE and not JDK installation; a client (not a server^) JVM will be used... ) else ( -echo JAVA_HOME points to an invalid Java installation (no jvm.dll found in "%JAVA_HOME%"^). Existing... -goto:eof + echo JAVA_HOME points to an invalid Java installation (no jvm.dll found in "%JAVA_HOME%"^). Exiting... + goto:eof ) :foundJVM @@ -159,7 +162,7 @@ if not "%ES_JAVA_OPTS%" == "" set JVM_OPTS=%JVM_OPTS%;%JVM_ES_JAVA_OPTS% if "%ES_START_TYPE%" == "" set ES_START_TYPE=manual if "%ES_STOP_TIMEOUT%" == "" set ES_STOP_TIMEOUT=0 -"%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %ES_START_TYPE% --StopTimeout %ES_STOP_TIMEOUT% --StartClass org.elasticsearch.bootstrap.Elasticsearch --StopClass org.elasticsearch.bootstrap.Elasticsearch --StartMethod main --StopMethod close --Classpath "%ES_CLASSPATH%" --JvmSs %JVM_SS% --JvmMs %JVM_XMS% --JvmMx %JVM_XMX% --JvmOptions %JVM_OPTS% ++JvmOptions %ES_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "Elasticsearch %ES_VERSION% (%SERVICE_ID%)" --Description "Elasticsearch %ES_VERSION% Windows Service - http://elasticsearch.org" --Jvm "%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%ES_HOME%" ++StartParams start +"%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %ES_START_TYPE% --StopTimeout %ES_STOP_TIMEOUT% --StartClass org.elasticsearch.bootstrap.Elasticsearch --StopClass org.elasticsearch.bootstrap.Elasticsearch --StartMethod main --StopMethod close --Classpath "%ES_CLASSPATH%" --JvmSs %JVM_SS% --JvmMs %JVM_XMS% --JvmMx %JVM_XMX% --JvmOptions %JVM_OPTS% ++JvmOptions %ES_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "Elasticsearch %ES_VERSION% (%SERVICE_ID%)" --Description "Elasticsearch %ES_VERSION% Windows Service - http://elasticsearch.org" --Jvm "%%JAVA_HOME%%%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%ES_HOME%" ++StartParams start if not errorlevel 1 goto installed From 4a96f4f31078e91be5846434896722d45ea7ac23 Mon Sep 17 00:00:00 2001 From: gmarz Date: Thu, 11 Feb 2016 13:09:08 -0500 Subject: [PATCH 18/22] Fix failing stylecheck --- .../java/org/elasticsearch/search/aggregations/MetaDataIT.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java index 43ae33d6b2e..dc17e446055 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/MetaDataIT.java @@ -31,7 +31,8 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.search.aggregations.AggregationBuilders.*; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.maxBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; From 52ee4c70275a6b54ac193f5a13e185355dc962bc Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Wed, 10 Feb 2016 15:29:34 -0600 Subject: [PATCH 19/22] upgrade to lucene 5.5.0-snapshot-850c6c2 --- buildSrc/version.properties | 2 +- .../elasticsearch/common/geo/GeoPoint.java | 19 ++++---- .../elasticsearch/common/geo/GeoUtils.java | 5 +- .../plain/AbstractIndexGeoPointFieldData.java | 4 +- .../plain/GeoPointArrayIndexFieldData.java | 19 ++++---- .../mapper/geo/BaseGeoPointFieldMapper.java | 4 +- .../index/mapper/geo/GeoPointFieldMapper.java | 13 ++++-- .../query/GeoBoundingBoxQueryBuilder.java | 18 +++++--- .../index/query/GeoDistanceQueryBuilder.java | 12 +++-- .../query/GeoDistanceRangeQueryBuilder.java | 23 ++++++---- .../index/query/GeoPolygonQueryBuilder.java | 12 +++-- .../index/query/GeohashCellQuery.java | 2 +- .../bucket/geogrid/GeoHashGridParser.java | 2 +- .../bucket/geogrid/InternalGeoHashGrid.java | 2 +- .../geocentroid/GeoCentroidAggregator.java | 4 +- .../geocentroid/InternalGeoCentroid.java | 4 +- .../support/format/ValueFormatter.java | 2 +- .../completion/context/GeoContextMapping.java | 18 ++++---- .../elasticsearch/bootstrap/security.policy | 2 +- .../bootstrap/test-framework.policy | 2 +- .../common/geo/GeoHashTests.java | 8 ++-- .../AbstractGeoFieldDataTestCase.java | 26 +++++++++-- .../SimpleExternalMappingTests.java | 8 ++-- .../mapper/geo/GeoPointFieldMapperTests.java | 46 +++++++++---------- .../geo/GeohashMappingGeoPointTests.java | 12 ++--- .../mapper/multifield/MultiFieldTests.java | 8 ---- .../GeoBoundingBoxQueryBuilderTests.java | 2 +- .../query/GeoDistanceQueryBuilderTests.java | 6 +-- .../query/GeoDistanceRangeQueryTests.java | 4 +- .../query/GeoPolygonQueryBuilderTests.java | 2 +- .../search/geo/GeoPointParsingTests.java | 11 ++--- .../index/search/geo/GeoUtilsTests.java | 2 +- .../aggregations/bucket/GeoHashGridIT.java | 29 ++++++------ .../aggregations/bucket/ShardReduceIT.java | 2 +- .../metrics/AbstractGeoTestCase.java | 2 +- .../elasticsearch/search/geo/GeoFilterIT.java | 4 +- .../search/sort/GeoDistanceIT.java | 4 +- .../ContextCompletionSuggestSearchIT.java | 2 +- .../completion/GeoContextMappingTests.java | 26 +++++------ .../test/geo/RandomGeoGenerator.java | 2 +- ...ers-common-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...ers-common-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...ard-codecs-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...ard-codecs-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...ucene-core-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...ucene-core-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...e-grouping-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...e-grouping-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...ighlighter-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...ighlighter-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...ucene-join-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...ucene-join-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...ene-memory-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...ene-memory-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...ucene-misc-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...ucene-misc-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...ne-queries-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...ne-queries-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...ueryparser-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...ueryparser-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...ne-sandbox-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...ne-sandbox-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...ne-spatial-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...ne-spatial-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...-spatial3d-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...-spatial3d-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...ne-suggest-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...ne-suggest-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...xpressions-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...xpressions-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + .../messy/tests/SimpleSortTests.java | 2 +- ...lyzers-icu-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...lyzers-icu-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...s-kuromoji-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...s-kuromoji-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...s-phonetic-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...s-phonetic-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...rs-smartcn-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...rs-smartcn-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + ...rs-stempel-5.5.0-snapshot-4de5f1d.jar.sha1 | 1 - ...rs-stempel-5.5.0-snapshot-850c6c2.jar.sha1 | 1 + 81 files changed, 232 insertions(+), 185 deletions(-) delete mode 100644 distribution/licenses/lucene-analyzers-common-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 distribution/licenses/lucene-analyzers-common-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 distribution/licenses/lucene-backward-codecs-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 distribution/licenses/lucene-backward-codecs-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 distribution/licenses/lucene-core-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 distribution/licenses/lucene-core-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 distribution/licenses/lucene-grouping-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 distribution/licenses/lucene-grouping-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 distribution/licenses/lucene-highlighter-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 distribution/licenses/lucene-highlighter-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 distribution/licenses/lucene-join-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 distribution/licenses/lucene-join-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 distribution/licenses/lucene-memory-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 distribution/licenses/lucene-memory-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 distribution/licenses/lucene-misc-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 distribution/licenses/lucene-misc-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 distribution/licenses/lucene-queries-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 distribution/licenses/lucene-queries-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 distribution/licenses/lucene-queryparser-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 distribution/licenses/lucene-queryparser-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 distribution/licenses/lucene-sandbox-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 distribution/licenses/lucene-sandbox-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 distribution/licenses/lucene-spatial-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 distribution/licenses/lucene-spatial-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 distribution/licenses/lucene-spatial3d-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 distribution/licenses/lucene-spatial3d-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 distribution/licenses/lucene-suggest-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 distribution/licenses/lucene-suggest-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 modules/lang-expression/licenses/lucene-expressions-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.5.0-snapshot-850c6c2.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.5.0-snapshot-4de5f1d.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.5.0-snapshot-850c6c2.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 1c5b1e724fd..110765d3535 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 3.0.0-SNAPSHOT -lucene = 5.5.0-snapshot-4de5f1d +lucene = 5.5.0-snapshot-850c6c2 # optional dependencies spatial4j = 0.5 diff --git a/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java b/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java index 513a7977d67..e233c84f47c 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java +++ b/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java @@ -20,8 +20,11 @@ package org.elasticsearch.common.geo; import org.apache.lucene.util.BitUtil; -import org.apache.lucene.util.GeoHashUtils; -import org.apache.lucene.util.GeoUtils; + +import static org.apache.lucene.spatial.util.GeoHashUtils.mortonEncode; +import static org.apache.lucene.spatial.util.GeoHashUtils.stringEncode; +import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonUnhashLat; +import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonUnhashLon; /** * @@ -81,14 +84,14 @@ public final class GeoPoint { } public GeoPoint resetFromIndexHash(long hash) { - lon = GeoUtils.mortonUnhashLon(hash); - lat = GeoUtils.mortonUnhashLat(hash); + lon = mortonUnhashLon(hash); + lat = mortonUnhashLat(hash); return this; } public GeoPoint resetFromGeoHash(String geohash) { - final long hash = GeoHashUtils.mortonEncode(geohash); - return this.reset(GeoUtils.mortonUnhashLat(hash), GeoUtils.mortonUnhashLon(hash)); + final long hash = mortonEncode(geohash); + return this.reset(mortonUnhashLat(hash), mortonUnhashLon(hash)); } public GeoPoint resetFromGeoHash(long geohashLong) { @@ -113,11 +116,11 @@ public final class GeoPoint { } public final String geohash() { - return GeoHashUtils.stringEncode(lon, lat); + return stringEncode(lon, lat); } public final String getGeohash() { - return GeoHashUtils.stringEncode(lon, lat); + return stringEncode(lon, lat); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index c5c36b5b0c2..a0d74f993a5 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -21,7 +21,6 @@ package org.elasticsearch.common.geo; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; -import org.apache.lucene.util.GeoDistanceUtils; import org.apache.lucene.util.SloppyMath; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.unit.DistanceUnit; @@ -29,6 +28,8 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper; +import static org.apache.lucene.spatial.util.GeoDistanceUtils.maxRadialDistanceMeters; + import java.io.IOException; /** @@ -70,7 +71,7 @@ public class GeoUtils { * maximum distance/radius from the point 'center' before overlapping **/ public static double maxRadialDistance(GeoPoint center, double initialRadius) { - final double maxRadius = GeoDistanceUtils.maxRadialDistanceMeters(center.lon(), center.lat()); + final double maxRadius = maxRadialDistanceMeters(center.lon(), center.lat()); return Math.min(initialRadius, maxRadius); } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java index a8114c41f9b..e9dae4970a9 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java @@ -19,10 +19,12 @@ package org.elasticsearch.index.fielddata.plain; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.index.IndexSettings; @@ -45,7 +47,7 @@ abstract class AbstractIndexGeoPointFieldData extends AbstractIndexFieldData build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) { return new GeoPointArrayIndexFieldData(indexSettings, fieldType.name(), fieldType.fieldDataType(), cache, - breakerService, fieldType.fieldDataType().getSettings() - .getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).before(Version.V_2_2_0) || - indexSettings.getIndexVersionCreated().before(Version.V_2_2_0)); + breakerService); } } public GeoPointArrayIndexFieldData(IndexSettings indexSettings, String fieldName, - FieldDataType fieldDataType, IndexFieldDataCache cache, CircuitBreakerService breakerService, - final boolean indexCreatedBefore22) { + FieldDataType fieldDataType, IndexFieldDataCache cache, CircuitBreakerService breakerService) { super(indexSettings, fieldName, fieldDataType, cache); this.breakerService = breakerService; - this.indexCreatedBefore22 = indexCreatedBefore22; } @Override @@ -82,7 +78,8 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData estimator.afterLoad(null, data.ramBytesUsed()); return data; } - return (indexCreatedBefore22 == true) ? loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data); + return (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0) == true) ? + loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data); } /** @@ -95,7 +92,9 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO); boolean success = false; try (OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio)) { - final GeoPointTermsEnum iter = new GeoPointTermsEnum(builder.buildFromTerms(OrdinalsBuilder.wrapNumeric64Bit(terms.iterator()))); + final GeoPointField.TermEncoding termEncoding = indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_3_0) ? + GeoPointField.TermEncoding.PREFIX : GeoPointField.TermEncoding.NUMERIC; + final GeoPointTermsEnum iter = new GeoPointTermsEnum(builder.buildFromTerms(OrdinalsBuilder.wrapNumeric64Bit(terms.iterator())), termEncoding); Long hashedPoint; long numTerms = 0; while ((hashedPoint = iter.next()) != null) { @@ -181,4 +180,4 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData } } } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java index 426e64ceb12..69d5f60f114 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java @@ -21,7 +21,8 @@ package org.elasticsearch.index.mapper.geo; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.util.GeoHashUtils; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; +import org.apache.lucene.spatial.util.GeoHashUtils; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; @@ -29,7 +30,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java index 71309d2fa2d..9c8b4a1f468 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java @@ -20,9 +20,10 @@ package org.elasticsearch.index.mapper.geo; import org.apache.lucene.document.FieldType; -import org.apache.lucene.document.GeoPointField; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; +import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; @@ -59,8 +60,6 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper { FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); FIELD_TYPE.setTokenized(false); FIELD_TYPE.setOmitNorms(true); - FIELD_TYPE.setNumericType(FieldType.NumericType.LONG); - FIELD_TYPE.setNumericPrecisionStep(GeoPointField.PRECISION_STEP); FIELD_TYPE.setDocValuesType(DocValuesType.SORTED_NUMERIC); FIELD_TYPE.setHasDocValues(true); FIELD_TYPE.freeze(); @@ -83,6 +82,10 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper { DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit ignoreMalformed, CopyTo copyTo) { fieldType.setTokenized(false); + if (context.indexCreatedVersion().before(Version.V_2_3_0)) { + fieldType.setNumericPrecisionStep(GeoPointField.PRECISION_STEP); + fieldType.setNumericType(FieldType.NumericType.LONG); + } setupFieldType(context); return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields, ignoreMalformed, copyTo); @@ -90,6 +93,10 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper { @Override public GeoPointFieldMapper build(BuilderContext context) { + if (context.indexCreatedVersion().before(Version.V_2_3_0)) { + fieldType.setNumericPrecisionStep(GeoPointField.PRECISION_STEP); + fieldType.setNumericType(FieldType.NumericType.LONG); + } return super.build(context); } } diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index a0246f1f834..05c2a74bb9f 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -19,7 +19,8 @@ package org.elasticsearch.index.query; -import org.apache.lucene.search.GeoPointInBBoxQuery; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; +import org.apache.lucene.spatial.geopoint.search.GeoPointInBBoxQuery; import org.apache.lucene.search.Query; import org.elasticsearch.Version; import org.elasticsearch.common.Numbers; @@ -105,7 +106,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder use prefix encoded postings format + final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ? + GeoPointField.TermEncoding.NUMERIC : GeoPointField.TermEncoding.PREFIX; + return new GeoPointInBBoxQuery(fieldType.name(), encoding, luceneTopLeft.lon(), luceneBottomRight.lat(), luceneBottomRight.lon(), luceneTopLeft.lat()); } diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java index 43f55dc034b..784c924efcf 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java @@ -19,7 +19,8 @@ package org.elasticsearch.index.query; -import org.apache.lucene.search.GeoPointDistanceQuery; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; +import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceQuery; import org.apache.lucene.search.Query; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; @@ -229,14 +230,19 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder use prefix encoded postings format + final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ? + GeoPointField.TermEncoding.NUMERIC : GeoPointField.TermEncoding.PREFIX; normDistance = GeoUtils.maxRadialDistance(center, normDistance); - return new GeoPointDistanceQuery(fieldType.name(), center.lon(), center.lat(), normDistance); + return new GeoPointDistanceQuery(fieldType.name(), encoding, center.lon(), center.lat(), normDistance); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeQueryBuilder.java index dc1c3d69817..e7b3dca7051 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeQueryBuilder.java @@ -19,9 +19,10 @@ package org.elasticsearch.index.query; -import org.apache.lucene.search.GeoPointDistanceRangeQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.util.GeoDistanceUtils; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; +import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceRangeQuery; +import org.apache.lucene.spatial.util.GeoDistanceUtils; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoDistance; @@ -41,7 +42,7 @@ import java.io.IOException; import java.util.Locale; import java.util.Objects; -import static org.apache.lucene.util.GeoUtils.TOLERANCE; +import static org.apache.lucene.spatial.util.GeoEncodingUtils.TOLERANCE; public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder { @@ -267,16 +268,22 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder use prefix encoded postings format + final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ? + GeoPointField.TermEncoding.NUMERIC : GeoPointField.TermEncoding.PREFIX; + + return new GeoPointDistanceRangeQuery(fieldType.name(), encoding, point.lon(), point.lat(), + (includeLower) ? fromValue : fromValue + TOLERANCE, + (includeUpper) ? toValue : toValue - TOLERANCE); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java index 8817ac69894..53fab5a3f4a 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java @@ -19,7 +19,8 @@ package org.elasticsearch.index.query; -import org.apache.lucene.search.GeoPointInPolygonQuery; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; +import org.apache.lucene.spatial.geopoint.search.GeoPointInPolygonQuery; import org.apache.lucene.search.Query; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; @@ -136,7 +137,8 @@ public class GeoPolygonQueryBuilder extends AbstractQueryBuilder use prefix encoded postings format + final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ? + GeoPointField.TermEncoding.NUMERIC : GeoPointField.TermEncoding.PREFIX; + return new GeoPointInPolygonQuery(fieldType.name(), encoding, lons, lats); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/query/GeohashCellQuery.java b/core/src/main/java/org/elasticsearch/index/query/GeohashCellQuery.java index 07e92a6dc16..9f0d259374b 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeohashCellQuery.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeohashCellQuery.java @@ -20,7 +20,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; -import org.apache.lucene.util.GeoHashUtils; +import org.apache.lucene.spatial.util.GeoHashUtils; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java index 6473b5ae7f2..e1c52d50e8d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.util.GeoHashUtils; +import org.apache.lucene.spatial.util.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.MultiGeoPointValues; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java index e4c3fa2a521..538f1cb9650 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket.geogrid; -import org.apache.lucene.util.GeoHashUtils; +import org.apache.lucene.spatial.util.GeoHashUtils; import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java index b5739f53f46..064ee1f5da9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.aggregations.metrics.geocentroid; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.util.GeoUtils; +import org.apache.lucene.spatial.util.GeoEncodingUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; @@ -95,7 +95,7 @@ public final class GeoCentroidAggregator extends MetricsAggregator { pt[0] = pt[0] + (value.getLon() - pt[0]) / ++prevCounts; pt[1] = pt[1] + (value.getLat() - pt[1]) / prevCounts; } - centroids.set(bucket, GeoUtils.mortonHash(pt[0], pt[1])); + centroids.set(bucket, GeoEncodingUtils.mortonHash(pt[0], pt[1])); } } }; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java index b9eeb19354c..36a04e7e083 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.geocentroid; -import org.apache.lucene.util.GeoUtils; +import org.apache.lucene.spatial.util.GeoEncodingUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -140,7 +140,7 @@ public class InternalGeoCentroid extends InternalMetricsAggregation implements G out.writeVLong(count); if (centroid != null) { out.writeBoolean(true); - out.writeLong(GeoUtils.mortonHash(centroid.lon(), centroid.lat())); + out.writeLong(GeoEncodingUtils.mortonHash(centroid.lon(), centroid.lat())); } else { out.writeBoolean(false); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatter.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatter.java index a4b6c2cf282..555256b6810 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatter.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatter.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations.support.format; -import org.apache.lucene.util.GeoHashUtils; +import org.apache.lucene.spatial.util.GeoHashUtils; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java index f2f3d10215d..4af90ab24a2 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.suggest.completion.context; import org.apache.lucene.document.StringField; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.util.GeoHashUtils; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; @@ -44,6 +43,9 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import static org.apache.lucene.spatial.util.GeoHashUtils.addNeighbors; +import static org.apache.lucene.spatial.util.GeoHashUtils.stringEncode; + /** * A {@link ContextMapping} that uses a geo location/area as a * criteria. @@ -150,7 +152,7 @@ public class GeoContextMapping extends ContextMapping { if (parser.nextToken() == Token.VALUE_NUMBER) { double lat = parser.doubleValue(); if (parser.nextToken() == Token.END_ARRAY) { - contexts.add(GeoHashUtils.stringEncode(lon, lat, precision)); + contexts.add(stringEncode(lon, lat, precision)); } else { throw new ElasticsearchParseException("only two values [lon, lat] expected"); } @@ -160,7 +162,7 @@ public class GeoContextMapping extends ContextMapping { } else { while (token != Token.END_ARRAY) { GeoPoint point = GeoUtils.parseGeoPoint(parser); - contexts.add(GeoHashUtils.stringEncode(point.getLon(), point.getLat(), precision)); + contexts.add(stringEncode(point.getLon(), point.getLat(), precision)); token = parser.nextToken(); } } @@ -171,7 +173,7 @@ public class GeoContextMapping extends ContextMapping { } else { // or a single location GeoPoint point = GeoUtils.parseGeoPoint(parser); - contexts.add(GeoHashUtils.stringEncode(point.getLon(), point.getLat(), precision)); + contexts.add(stringEncode(point.getLon(), point.getLat(), precision)); } return contexts; } @@ -194,7 +196,7 @@ public class GeoContextMapping extends ContextMapping { // we write doc values fields differently: one field for all values, so we need to only care about indexed fields if (lonField.fieldType().docValuesType() == DocValuesType.NONE) { spare.reset(latField.numericValue().doubleValue(), lonField.numericValue().doubleValue()); - geohashes.add(GeoHashUtils.stringEncode(spare.getLon(), spare.getLat(), precision)); + geohashes.add(stringEncode(spare.getLon(), spare.getLat(), precision)); } } } @@ -261,16 +263,16 @@ public class GeoContextMapping extends ContextMapping { } GeoPoint point = queryContext.getGeoPoint(); final Collection locations = new HashSet<>(); - String geoHash = GeoHashUtils.stringEncode(point.getLon(), point.getLat(), minPrecision); + String geoHash = stringEncode(point.getLon(), point.getLat(), minPrecision); locations.add(geoHash); if (queryContext.getNeighbours().isEmpty() && geoHash.length() == this.precision) { - GeoHashUtils.addNeighbors(geoHash, locations); + addNeighbors(geoHash, locations); } else if (queryContext.getNeighbours().isEmpty() == false) { for (Integer neighbourPrecision : queryContext.getNeighbours()) { if (neighbourPrecision < geoHash.length()) { String truncatedGeoHash = geoHash.substring(0, neighbourPrecision); locations.add(truncatedGeoHash); - GeoHashUtils.addNeighbors(truncatedGeoHash, locations); + addNeighbors(truncatedGeoHash, locations); } } } diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy index ac7f849cda3..1077554aa23 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.0.jar}" { //// Very special jar permissions: //// These are dangerous permissions that we don't want to grant to everything. -grant codeBase "${codebase.lucene-core-5.5.0-snapshot-4de5f1d.jar}" { +grant codeBase "${codebase.lucene-core-5.5.0-snapshot-850c6c2.jar}" { // needed to allow MMapDirectory's "unmap hack" (die unmap hack, die) permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index 629eb75cf4a..5f393afbe62 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -31,7 +31,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; -grant codeBase "${codebase.lucene-test-framework-5.5.0-snapshot-4de5f1d.jar}" { +grant codeBase "${codebase.lucene-test-framework-5.5.0-snapshot-850c6c2.jar}" { // needed by RamUsageTester permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; diff --git a/core/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java b/core/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java index d9d1245fb42..e89aa4a8244 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java @@ -18,13 +18,11 @@ */ package org.elasticsearch.common.geo; -import org.apache.lucene.util.GeoHashUtils; +import org.apache.lucene.spatial.util.GeoHashUtils; import org.elasticsearch.test.ESTestCase; - - /** - * Tests for {@link org.apache.lucene.util.GeoHashUtils} + * Tests for {@link org.apache.lucene.spatial.util.GeoHashUtils} */ public class GeoHashTests extends ESTestCase { public void testGeohashAsLongRoutines() { @@ -60,4 +58,4 @@ public class GeoHashTests extends ESTestCase { } } } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java index 87cf5e1c570..f11d08f8165 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java @@ -20,9 +20,9 @@ package org.elasticsearch.index.fielddata; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.document.GeoPointField; import org.apache.lucene.document.StringField; -import org.apache.lucene.util.GeoUtils; +import org.apache.lucene.spatial.geopoint.document.GeoPointField; +import org.apache.lucene.spatial.util.GeoUtils; import org.elasticsearch.Version; import org.elasticsearch.common.geo.GeoPoint; @@ -45,7 +45,27 @@ public abstract class AbstractGeoFieldDataTestCase extends AbstractFieldDataImpl if (indexService.getIndexSettings().getIndexVersionCreated().before(Version.V_2_2_0)) { return new StringField(fieldName, point.lat()+","+point.lon(), store); } - return new GeoPointField(fieldName, point.lon(), point.lat(), store); + final GeoPointField.TermEncoding termEncoding; + termEncoding = indexService.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_2_3_0) ? + GeoPointField.TermEncoding.PREFIX : GeoPointField.TermEncoding.NUMERIC; + return new GeoPointField(fieldName, point.lon(), point.lat(), termEncoding, store); + } + + @Override + protected boolean hasDocValues() { + // prior to 22 docValues were not required + if (indexService.getIndexSettings().getIndexVersionCreated().before(Version.V_2_2_0)) { + return false; + } + return true; + } + + @Override + protected long minRamBytesUsed() { + if (indexService.getIndexSettings().getIndexVersionCreated().before(Version.V_2_2_0)) { + return super.minRamBytesUsed(); + } + return 0; } @Override diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java index 8b8955d19d7..11688152f94 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.mapper.externalvalues; -import org.apache.lucene.util.GeoUtils; +import org.apache.lucene.spatial.util.GeoEncodingUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; @@ -88,7 +88,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0")); } else { - assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoUtils.mortonHash(51.0, 42.0))); + assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoEncodingUtils.mortonHash(51.0, 42.0))); } assertThat(doc.rootDoc().getField("field.shape"), notNullValue()); @@ -146,7 +146,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0")); } else { - assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoUtils.mortonHash(51.0, 42.0))); + assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoEncodingUtils.mortonHash(51.0, 42.0))); } assertThat(doc.rootDoc().getField("field.shape"), notNullValue()); @@ -208,7 +208,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0")); } else { - assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoUtils.mortonHash(51.0, 42.0))); + assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoEncodingUtils.mortonHash(51.0, 42.0))); } assertThat(doc.rootDoc().getField("field.shape"), notNullValue()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java index db5781a77eb..ed6c574a865 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.index.mapper.geo; -import org.apache.lucene.util.GeoHashUtils; -import org.apache.lucene.util.GeoUtils; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -46,6 +44,8 @@ import java.util.Collection; import java.util.List; import java.util.Map; +import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonHash; +import static org.apache.lucene.spatial.util.GeoHashUtils.stringEncode; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.hamcrest.Matchers.containsString; @@ -86,7 +86,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (indexCreatedBefore22 == true) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoUtils.mortonHash(1.3, 1.2))); + assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(1.3, 1.2))); } } @@ -108,7 +108,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { assertThat(doc.rootDoc().getField("point.lat"), notNullValue()); assertThat(doc.rootDoc().getField("point.lon"), notNullValue()); - assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.stringEncode(1.3, 1.2))); + assertThat(doc.rootDoc().get("point.geohash"), equalTo(stringEncode(1.3, 1.2))); } public void testLatLonInOneValueWithGeohash() throws Exception { @@ -128,7 +128,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { assertThat(doc.rootDoc().getField("point.lat"), notNullValue()); assertThat(doc.rootDoc().getField("point.lon"), notNullValue()); - assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.stringEncode(1.3, 1.2))); + assertThat(doc.rootDoc().get("point.geohash"), equalTo(stringEncode(1.3, 1.2))); } public void testGeoHashIndexValue() throws Exception { @@ -142,13 +142,13 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() - .field("point", GeoHashUtils.stringEncode(1.3, 1.2)) + .field("point", stringEncode(1.3, 1.2)) .endObject() .bytes()); assertThat(doc.rootDoc().getField("point.lat"), notNullValue()); assertThat(doc.rootDoc().getField("point.lon"), notNullValue()); - assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.stringEncode(1.3, 1.2))); + assertThat(doc.rootDoc().get("point.geohash"), equalTo(stringEncode(1.3, 1.2))); } public void testGeoHashValue() throws Exception { @@ -162,7 +162,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() - .field("point", GeoHashUtils.stringEncode(1.3, 1.2)) + .field("point", stringEncode(1.3, 1.2)) .endObject() .bytes()); @@ -193,7 +193,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("89.0,1.0")); } else { - assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoUtils.mortonHash(1.0, 89.0))); + assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(1.0, 89.0))); } doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() @@ -205,7 +205,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("-89.0,-1.0")); } else { - assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoUtils.mortonHash(-1.0, -89.0))); + assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(-1.0, -89.0))); } doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() @@ -217,7 +217,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("-1.0,-179.0")); } else { - assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoUtils.mortonHash(-179.0, -1.0))); + assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(-179.0, -1.0))); } } @@ -350,7 +350,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoUtils.mortonHash(1.3, 1.2))); + assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(1.3, 1.2))); } } @@ -379,14 +379,14 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoUtils.mortonHash(1.3, 1.2))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.3, 1.2))); } assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4)); assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5)); if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoUtils.mortonHash(1.5, 1.4))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(mortonHash(1.5, 1.4))); } } @@ -410,7 +410,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoUtils.mortonHash(1.3, 1.2))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.3, 1.2))); } } @@ -436,7 +436,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoUtils.mortonHash(1.3, 1.2))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.3, 1.2))); } } @@ -465,14 +465,14 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoUtils.mortonHash(1.3, 1.2))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.3, 1.2))); } assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4)); assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5)); if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoUtils.mortonHash(1.5, 1.4))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(mortonHash(1.5, 1.4))); } } @@ -496,7 +496,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoUtils.mortonHash(1.3, 1.2))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.3, 1.2))); } } @@ -521,7 +521,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoUtils.mortonHash(1.3, 1.2))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.3, 1.2))); } } @@ -547,7 +547,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoUtils.mortonHash(1.3, 1.2))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.3, 1.2))); } } @@ -576,14 +576,14 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoUtils.mortonHash(1.3, 1.2))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.3, 1.2))); } assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4)); assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5)); if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoUtils.mortonHash(1.5, 1.4))); + assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(mortonHash(1.5, 1.4))); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java index c2cbee42cf2..5de6c517ab2 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.mapper.geo; -import org.apache.lucene.util.GeoHashUtils; -import org.apache.lucene.util.GeoUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; @@ -36,6 +34,8 @@ import org.elasticsearch.test.VersionUtils; import java.util.Collection; +import static org.apache.lucene.spatial.util.GeoHashUtils.stringEncode; +import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonHash; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -72,7 +72,7 @@ public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoUtils.mortonHash(1.3, 1.2))); + assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(1.3, 1.2))); } } @@ -96,7 +96,7 @@ public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase { if (version.before(Version.V_2_2_0)) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { - assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoUtils.mortonHash(1.3, 1.2))); + assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(1.3, 1.2))); } } @@ -111,13 +111,13 @@ public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase { ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() - .field("point", GeoHashUtils.stringEncode(1.3, 1.2)) + .field("point", stringEncode(1.3, 1.2)) .endObject() .bytes()); assertThat(doc.rootDoc().getField("point.lat"), nullValue()); assertThat(doc.rootDoc().getField("point.lon"), nullValue()); - assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.stringEncode(1.3, 1.2))); + assertThat(doc.rootDoc().get("point.geohash"), equalTo(stringEncode(1.3, 1.2))); assertThat(doc.rootDoc().get("point"), notNullValue()); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java index 82bd78b4967..29726efaad1 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java @@ -21,13 +21,9 @@ package org.elasticsearch.index.mapper.multifield; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.util.GeoUtils; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -38,15 +34,11 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParseContext.Document; -import org.elasticsearch.index.mapper.core.CompletionFieldMapper; import org.elasticsearch.index.mapper.core.DateFieldMapper; -import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.core.TokenCountFieldMapper; -import org.elasticsearch.index.mapper.geo.BaseGeoPointFieldMapper; import org.elasticsearch.index.mapper.object.RootObjectMapper; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.Arrays; diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java index f16e00416b2..290a05d1fa1 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java @@ -24,9 +24,9 @@ import com.spatial4j.core.shape.Rectangle; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.GeoPointInBBoxQuery; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.spatial.geopoint.search.GeoPointInBBoxQuery; import org.elasticsearch.Version; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java index 7511915c49c..e7b2d862f5b 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java @@ -20,9 +20,9 @@ package org.elasticsearch.index.query; import com.spatial4j.core.shape.Point; -import org.apache.lucene.search.GeoPointDistanceQuery; +import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.util.GeoUtils; +import org.apache.lucene.spatial.util.GeoEncodingUtils; import org.elasticsearch.Version; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; @@ -205,7 +205,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase 0; precision--) { - String hash = GeoHashUtils.stringEncode(lng, lat, precision); + for (int precision = PRECISION - 1; precision > 0; precision--) { + String hash = stringEncode(lng, lat, precision); if ((smallestGeoHash == null) || (hash.length() < smallestGeoHash.length())) { smallestGeoHash = hash; } @@ -128,8 +129,8 @@ public class GeoHashGridIT extends ESIntegTestCase { double lng = (360d * random.nextDouble()) - 180d; points.add(lat + "," + lng); // Update expected doc counts for all resolutions.. - for (int precision = GeoHashUtils.PRECISION; precision > 0; precision--) { - final String geoHash = GeoHashUtils.stringEncode(lng, lat, precision); + for (int precision = PRECISION; precision > 0; precision--) { + final String geoHash = stringEncode(lng, lat, precision); geoHashes.add(geoHash); } } @@ -144,7 +145,7 @@ public class GeoHashGridIT extends ESIntegTestCase { } public void testSimple() throws Exception { - for (int precision = 1; precision <= GeoHashUtils.PRECISION; precision++) { + for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx") .addAggregation(geohashGrid("geohashgrid") .field("location") @@ -168,14 +169,14 @@ public class GeoHashGridIT extends ESIntegTestCase { assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); GeoPoint geoPoint = (GeoPoint) propertiesKeys[i]; - assertThat(GeoHashUtils.stringEncode(geoPoint.lon(), geoPoint.lat(), precision), equalTo(geohash)); + assertThat(stringEncode(geoPoint.lon(), geoPoint.lat(), precision), equalTo(geohash)); assertThat((long) propertiesDocCounts[i], equalTo(bucketCount)); } } } public void testMultivalued() throws Exception { - for (int precision = 1; precision <= GeoHashUtils.PRECISION; precision++) { + for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("multi_valued_idx") .addAggregation(geohashGrid("geohashgrid") .field("location") @@ -201,7 +202,7 @@ public class GeoHashGridIT extends ESIntegTestCase { public void testFiltered() throws Exception { GeoBoundingBoxQueryBuilder bbox = new GeoBoundingBoxQueryBuilder("location"); bbox.setCorners(smallestGeoHash, smallestGeoHash).queryName("bbox"); - for (int precision = 1; precision <= GeoHashUtils.PRECISION; precision++) { + for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx") .addAggregation( AggregationBuilders.filter("filtered").filter(bbox) @@ -232,7 +233,7 @@ public class GeoHashGridIT extends ESIntegTestCase { } public void testUnmapped() throws Exception { - for (int precision = 1; precision <= GeoHashUtils.PRECISION; precision++) { + for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx_unmapped") .addAggregation(geohashGrid("geohashgrid") .field("location") @@ -249,7 +250,7 @@ public class GeoHashGridIT extends ESIntegTestCase { } public void testPartiallyUnmapped() throws Exception { - for (int precision = 1; precision <= GeoHashUtils.PRECISION; precision++) { + for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx", "idx_unmapped") .addAggregation(geohashGrid("geohashgrid") .field("location") @@ -273,7 +274,7 @@ public class GeoHashGridIT extends ESIntegTestCase { } public void testTopMatch() throws Exception { - for (int precision = 1; precision <= GeoHashUtils.PRECISION; precision++) { + for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx") .addAggregation(geohashGrid("geohashgrid") .field("location") @@ -306,7 +307,7 @@ public class GeoHashGridIT extends ESIntegTestCase { // making sure this doesn't runs into an OOME public void testSizeIsZero() { - for (int precision = 1; precision <= GeoHashUtils.PRECISION; precision++) { + for (int precision = 1; precision <= PRECISION; precision++) { final int size = randomBoolean() ? 0 : randomIntBetween(1, Integer.MAX_VALUE); final int shardSize = randomBoolean() ? -1 : 0; SearchResponse response = client().prepareSearch("idx") diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java index d138c0ccd3e..3e2e1319019 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket; -import org.apache.lucene.util.GeoHashUtils; +import org.apache.lucene.spatial.util.GeoHashUtils; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.QueryBuilders; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java index cc0e8b2050e..26cb3e6ad54 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java @@ -23,7 +23,7 @@ import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntMap; import com.carrotsearch.hppc.ObjectObjectHashMap; import com.carrotsearch.hppc.ObjectObjectMap; -import org.apache.lucene.util.GeoHashUtils; +import org.apache.lucene.spatial.util.GeoHashUtils; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.geo.GeoPoint; diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java index ac8e9e029b5..8a060af2ab0 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java @@ -29,8 +29,8 @@ import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; import org.apache.lucene.spatial.query.UnsupportedSpatialOperation; -import org.apache.lucene.util.GeoHashUtils; -import org.apache.lucene.util.GeoProjectionUtils; +import org.apache.lucene.spatial.util.GeoHashUtils; +import org.apache.lucene.spatial.util.GeoProjectionUtils; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.bulk.BulkItemResponse; diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java index ac8e988394a..5b0bc4d9ffe 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java +++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.sort; -import org.apache.lucene.util.GeoHashUtils; +import org.apache.lucene.spatial.util.GeoHashUtils; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -34,8 +34,6 @@ import org.elasticsearch.index.query.GeoDistanceQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.sort.SortBuilders; -import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; diff --git a/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java index 18d6d9b99f9..7096a0f34ac 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.suggest; import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import org.apache.lucene.util.GeoHashUtils; +import org.apache.lucene.spatial.util.GeoHashUtils; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.suggest.SuggestResponse; diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java index b42af82433b..471de9c3e93 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.suggest.completion; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.util.GeoHashUtils; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -39,6 +38,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; +import static org.apache.lucene.spatial.util.GeoHashUtils.addNeighbors; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.search.suggest.completion.CategoryContextMappingTests.assertContextSuggestFields; import static org.hamcrest.Matchers.equalTo; @@ -206,7 +206,7 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase { assertThat(queryContexts.size(), equalTo(1 + 8)); Collection locations = new ArrayList<>(); locations.add("ezs42e"); - GeoHashUtils.addNeighbors("ezs42e", GeoContextMapping.DEFAULT_PRECISION, locations); + addNeighbors("ezs42e", GeoContextMapping.DEFAULT_PRECISION, locations); for (ContextMapping.QueryContext queryContext : queryContexts) { assertThat(queryContext.context, isIn(locations)); assertThat(queryContext.boost, equalTo(1)); @@ -225,7 +225,7 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase { assertThat(queryContexts.size(), equalTo(1 + 8)); Collection locations = new ArrayList<>(); locations.add("wh0n94"); - GeoHashUtils.addNeighbors("wh0n94", GeoContextMapping.DEFAULT_PRECISION, locations); + addNeighbors("wh0n94", GeoContextMapping.DEFAULT_PRECISION, locations); for (ContextMapping.QueryContext queryContext : queryContexts) { assertThat(queryContext.context, isIn(locations)); assertThat(queryContext.boost, equalTo(1)); @@ -249,11 +249,11 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase { Collection locations = new ArrayList<>(); locations.add("wh0n94"); locations.add("w"); - GeoHashUtils.addNeighbors("w", 1, locations); + addNeighbors("w", 1, locations); locations.add("wh"); - GeoHashUtils.addNeighbors("wh", 2, locations); + addNeighbors("wh", 2, locations); locations.add("wh0"); - GeoHashUtils.addNeighbors("wh0", 3, locations); + addNeighbors("wh0", 3, locations); for (ContextMapping.QueryContext queryContext : queryContexts) { assertThat(queryContext.context, isIn(locations)); assertThat(queryContext.boost, equalTo(10)); @@ -287,15 +287,15 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase { Collection firstLocations = new ArrayList<>(); firstLocations.add("wh0n94"); firstLocations.add("w"); - GeoHashUtils.addNeighbors("w", 1, firstLocations); + addNeighbors("w", 1, firstLocations); firstLocations.add("wh"); - GeoHashUtils.addNeighbors("wh", 2, firstLocations); + addNeighbors("wh", 2, firstLocations); firstLocations.add("wh0"); - GeoHashUtils.addNeighbors("wh0", 3, firstLocations); + addNeighbors("wh0", 3, firstLocations); Collection secondLocations = new ArrayList<>(); secondLocations.add("w5cx04"); secondLocations.add("w5cx0"); - GeoHashUtils.addNeighbors("w5cx0", 5, secondLocations); + addNeighbors("w5cx0", 5, secondLocations); for (ContextMapping.QueryContext queryContext : queryContexts) { if (firstLocations.contains(queryContext.context)) { assertThat(queryContext.boost, equalTo(10)); @@ -330,12 +330,12 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase { Collection firstLocations = new ArrayList<>(); firstLocations.add("wh0n94"); firstLocations.add("w"); - GeoHashUtils.addNeighbors("w", 1, firstLocations); + addNeighbors("w", 1, firstLocations); firstLocations.add("wh"); - GeoHashUtils.addNeighbors("wh", 2, firstLocations); + addNeighbors("wh", 2, firstLocations); Collection secondLocations = new ArrayList<>(); secondLocations.add("w5cx04"); - GeoHashUtils.addNeighbors("w5cx04", 6, secondLocations); + addNeighbors("w5cx04", 6, secondLocations); for (ContextMapping.QueryContext queryContext : queryContexts) { if (firstLocations.contains(queryContext.context)) { assertThat(queryContext.boost, equalTo(10)); diff --git a/core/src/test/java/org/elasticsearch/test/geo/RandomGeoGenerator.java b/core/src/test/java/org/elasticsearch/test/geo/RandomGeoGenerator.java index ad94c4e5ab4..fc1267a8bf0 100644 --- a/core/src/test/java/org/elasticsearch/test/geo/RandomGeoGenerator.java +++ b/core/src/test/java/org/elasticsearch/test/geo/RandomGeoGenerator.java @@ -19,7 +19,7 @@ package org.elasticsearch.test.geo; -import org.apache.lucene.util.GeoUtils; +import org.apache.lucene.spatial.util.GeoUtils; import org.elasticsearch.common.geo.GeoPoint; import java.util.Random; diff --git a/distribution/licenses/lucene-analyzers-common-5.5.0-snapshot-4de5f1d.jar.sha1 b/distribution/licenses/lucene-analyzers-common-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index 162824e5b15..00000000000 --- a/distribution/licenses/lucene-analyzers-common-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c1a6adaf97f1f341b311ddf050d2b19c79fb1945 \ No newline at end of file diff --git a/distribution/licenses/lucene-analyzers-common-5.5.0-snapshot-850c6c2.jar.sha1 b/distribution/licenses/lucene-analyzers-common-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..24263216da9 --- /dev/null +++ b/distribution/licenses/lucene-analyzers-common-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +94f03500c4b0256199b4dfcecf20be5b71c29177 \ No newline at end of file diff --git a/distribution/licenses/lucene-backward-codecs-5.5.0-snapshot-4de5f1d.jar.sha1 b/distribution/licenses/lucene-backward-codecs-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index 56a223db820..00000000000 --- a/distribution/licenses/lucene-backward-codecs-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52e20edd7a5fc828cd19bb49a603d57d7d4f2cd7 \ No newline at end of file diff --git a/distribution/licenses/lucene-backward-codecs-5.5.0-snapshot-850c6c2.jar.sha1 b/distribution/licenses/lucene-backward-codecs-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..c9df04f4d6b --- /dev/null +++ b/distribution/licenses/lucene-backward-codecs-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +44365f83efda343500793c43a16903f2aa74ddbd \ No newline at end of file diff --git a/distribution/licenses/lucene-core-5.5.0-snapshot-4de5f1d.jar.sha1 b/distribution/licenses/lucene-core-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index edb26e275b1..00000000000 --- a/distribution/licenses/lucene-core-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c28b1829a7510a59316761f0805072cf7441df24 \ No newline at end of file diff --git a/distribution/licenses/lucene-core-5.5.0-snapshot-850c6c2.jar.sha1 b/distribution/licenses/lucene-core-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..103b8e1258c --- /dev/null +++ b/distribution/licenses/lucene-core-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +7aca3e6bfe610df9cdc1b8fd671eac071016c228 \ No newline at end of file diff --git a/distribution/licenses/lucene-grouping-5.5.0-snapshot-4de5f1d.jar.sha1 b/distribution/licenses/lucene-grouping-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index b5d09d3ca8b..00000000000 --- a/distribution/licenses/lucene-grouping-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c2e5d4357f2dad4aff99b9457ea916d259cb09f4 \ No newline at end of file diff --git a/distribution/licenses/lucene-grouping-5.5.0-snapshot-850c6c2.jar.sha1 b/distribution/licenses/lucene-grouping-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..861f05f5c5d --- /dev/null +++ b/distribution/licenses/lucene-grouping-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +8c588d4d4c8fc6894dd6725dcf69ffa690c260f7 \ No newline at end of file diff --git a/distribution/licenses/lucene-highlighter-5.5.0-snapshot-4de5f1d.jar.sha1 b/distribution/licenses/lucene-highlighter-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index f43270c764f..00000000000 --- a/distribution/licenses/lucene-highlighter-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -112959bececacfeaa72533ac94cca3d3d164550b \ No newline at end of file diff --git a/distribution/licenses/lucene-highlighter-5.5.0-snapshot-850c6c2.jar.sha1 b/distribution/licenses/lucene-highlighter-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..b64c63539c7 --- /dev/null +++ b/distribution/licenses/lucene-highlighter-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +3ccad9ccffe94decc7c8c2a97fee3574c54b804c \ No newline at end of file diff --git a/distribution/licenses/lucene-join-5.5.0-snapshot-4de5f1d.jar.sha1 b/distribution/licenses/lucene-join-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index 80f9298ad1d..00000000000 --- a/distribution/licenses/lucene-join-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -975f42fac508bc999386955e449f5b91d123b569 \ No newline at end of file diff --git a/distribution/licenses/lucene-join-5.5.0-snapshot-850c6c2.jar.sha1 b/distribution/licenses/lucene-join-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..85c0b7df5e7 --- /dev/null +++ b/distribution/licenses/lucene-join-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +b7eba4721b52f0490e71d8fdbc92112be538592b \ No newline at end of file diff --git a/distribution/licenses/lucene-memory-5.5.0-snapshot-4de5f1d.jar.sha1 b/distribution/licenses/lucene-memory-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index 25261388b00..00000000000 --- a/distribution/licenses/lucene-memory-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3744a71c00220ef98dfcffc8265325709224fee5 \ No newline at end of file diff --git a/distribution/licenses/lucene-memory-5.5.0-snapshot-850c6c2.jar.sha1 b/distribution/licenses/lucene-memory-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..492e7193486 --- /dev/null +++ b/distribution/licenses/lucene-memory-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +6dde326efe42926c57dc49153536c689b9951203 \ No newline at end of file diff --git a/distribution/licenses/lucene-misc-5.5.0-snapshot-4de5f1d.jar.sha1 b/distribution/licenses/lucene-misc-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index b4832785994..00000000000 --- a/distribution/licenses/lucene-misc-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e1fb855fc6711bc977587aecf42060d958f9f32b \ No newline at end of file diff --git a/distribution/licenses/lucene-misc-5.5.0-snapshot-850c6c2.jar.sha1 b/distribution/licenses/lucene-misc-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..ea0e372cd6e --- /dev/null +++ b/distribution/licenses/lucene-misc-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +3b8008f6b4195009960516fb1978912c0e068df2 \ No newline at end of file diff --git a/distribution/licenses/lucene-queries-5.5.0-snapshot-4de5f1d.jar.sha1 b/distribution/licenses/lucene-queries-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index 88f7841b967..00000000000 --- a/distribution/licenses/lucene-queries-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74914a9410a5f8a43e72ff77532ae481c61f6384 \ No newline at end of file diff --git a/distribution/licenses/lucene-queries-5.5.0-snapshot-850c6c2.jar.sha1 b/distribution/licenses/lucene-queries-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..5c1d70e5800 --- /dev/null +++ b/distribution/licenses/lucene-queries-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +00c681bca8129811901d2eff850e8b7855385448 \ No newline at end of file diff --git a/distribution/licenses/lucene-queryparser-5.5.0-snapshot-4de5f1d.jar.sha1 b/distribution/licenses/lucene-queryparser-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index 43311609747..00000000000 --- a/distribution/licenses/lucene-queryparser-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3a5c7242ecee80e80e5da0ff328897452cbec77 \ No newline at end of file diff --git a/distribution/licenses/lucene-queryparser-5.5.0-snapshot-850c6c2.jar.sha1 b/distribution/licenses/lucene-queryparser-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..c554fcdc765 --- /dev/null +++ b/distribution/licenses/lucene-queryparser-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +f8856c8286fde66ffa3d4745306f3849b4be808b \ No newline at end of file diff --git a/distribution/licenses/lucene-sandbox-5.5.0-snapshot-4de5f1d.jar.sha1 b/distribution/licenses/lucene-sandbox-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index a1c02df0e24..00000000000 --- a/distribution/licenses/lucene-sandbox-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -054bd6d6e3762af6828ae29805e2c6ccd136aaf8 \ No newline at end of file diff --git a/distribution/licenses/lucene-sandbox-5.5.0-snapshot-850c6c2.jar.sha1 b/distribution/licenses/lucene-sandbox-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..b986aa67de5 --- /dev/null +++ b/distribution/licenses/lucene-sandbox-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +dd5e43774a033b65c66c5e877104ffaf6a17c0b8 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-5.5.0-snapshot-4de5f1d.jar.sha1 b/distribution/licenses/lucene-spatial-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index f7940115911..00000000000 --- a/distribution/licenses/lucene-spatial-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2580c4ccce1258580dbf8035e9e4ff1cf73b1cff \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-5.5.0-snapshot-850c6c2.jar.sha1 b/distribution/licenses/lucene-spatial-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..44d3e9f616d --- /dev/null +++ b/distribution/licenses/lucene-spatial-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +29fcb449512c0095e77ad2c96eca03b36e59745f \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial3d-5.5.0-snapshot-4de5f1d.jar.sha1 b/distribution/licenses/lucene-spatial3d-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index 37f825eb26e..00000000000 --- a/distribution/licenses/lucene-spatial3d-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -56ddb993dda8b6c0d68d64b1d4be6e088df29669 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial3d-5.5.0-snapshot-850c6c2.jar.sha1 b/distribution/licenses/lucene-spatial3d-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..6ec23a5e5cf --- /dev/null +++ b/distribution/licenses/lucene-spatial3d-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +ea8d939136c58dbc388939ddc50bf9f6315528a4 \ No newline at end of file diff --git a/distribution/licenses/lucene-suggest-5.5.0-snapshot-4de5f1d.jar.sha1 b/distribution/licenses/lucene-suggest-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index b039cbf4d46..00000000000 --- a/distribution/licenses/lucene-suggest-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bce01a0ba74c0df5caaf2b112537024371d03df4 \ No newline at end of file diff --git a/distribution/licenses/lucene-suggest-5.5.0-snapshot-850c6c2.jar.sha1 b/distribution/licenses/lucene-suggest-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..f725c019f7d --- /dev/null +++ b/distribution/licenses/lucene-suggest-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +b6dfab425bb5a0cbaf6adeb9ebec770cdce00046 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-5.5.0-snapshot-4de5f1d.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index 2de57e08624..00000000000 --- a/modules/lang-expression/licenses/lucene-expressions-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -477099ede788272484648ecd05d39d8745c74d6e \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-5.5.0-snapshot-850c6c2.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..c0305515558 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +4017aff15660b508221e482c19ac6323b601229e \ No newline at end of file diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java index 48570de1a81..57bfd3fe10d 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java @@ -40,7 +40,7 @@ import java.util.Collections; import java.util.List; import java.util.Random; -import static org.apache.lucene.util.GeoUtils.TOLERANCE; +import static org.apache.lucene.spatial.util.GeoEncodingUtils.TOLERANCE; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.5.0-snapshot-4de5f1d.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index c0a48ca4b3b..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc33b8449a6423132bf618bb1d32f464d191686d \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.5.0-snapshot-850c6c2.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..39c8b9e5bb9 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +c0d6b8f891a803dc0ce92da01e868a6ef31f0f09 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.5.0-snapshot-4de5f1d.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index c3deab027b7..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d71ffab4f99835d863cd4b7b280469e62a98db61 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.5.0-snapshot-850c6c2.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..a1c1b8ff80e --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +8a8bcbbdc2d44ae64885e1e353b2cb66e1f906f5 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.5.0-snapshot-4de5f1d.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index beca1c6f1b1..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30a9da299d3e4190833aebd07e814ce8fb9e9f78 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.5.0-snapshot-850c6c2.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..36b1fb24495 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +9f176b3bdd40c6ccfcce53e9f4eae5273a71958f \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.5.0-snapshot-4de5f1d.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index 7908b9cdab9..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a5f2374bc9180d842e823b681726ae2663ab1ebd \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.5.0-snapshot-850c6c2.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..f58e5538717 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +f2b1d0e000be8bfad3e3c88ba9d19f5b31edf69e \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.5.0-snapshot-4de5f1d.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.5.0-snapshot-4de5f1d.jar.sha1 deleted file mode 100644 index 7b68617f3b8..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.5.0-snapshot-4de5f1d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7d0ae501ad604447e02206f86e6592bcafd6a3f1 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.5.0-snapshot-850c6c2.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.5.0-snapshot-850c6c2.jar.sha1 new file mode 100644 index 00000000000..4b4ed2950fa --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.5.0-snapshot-850c6c2.jar.sha1 @@ -0,0 +1 @@ +619040b891af8d2427a9f324148bb2e491685511 \ No newline at end of file From 8bc2332d9ab028a5415a9606cd349790d3f5dc99 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 11 Feb 2016 09:45:43 +0100 Subject: [PATCH 20/22] Decouple recovery source/target logic and transport piping The current logic for doing recovery from a source to a target shourd is tightly coupled with the underlying network pipes. This changes decouple the two, making it easier to add unit tests for shard recovery that doesn't involve the node and network environment. On top that, RecoveryTarget is renamed to RecoveryTargetService leaving space to renaming RecoveryStatus to RecoveryTarget (and thus avoid the confusion we have today with RecoveryState). Correspondingly RecoverySource is renamed to RecoverySourceService. Closes #16605 --- .../resources/checkstyle_suppressions.xml | 3 - .../common/util/CancellableThreads.java | 38 +- .../elasticsearch/indices/IndicesModule.java | 4 +- .../cluster/IndicesClusterStateService.java | 16 +- .../recovery/RecoveriesCollection.java | 60 +- .../indices/recovery/RecoverySource.java | 7 +- .../recovery/RecoverySourceHandler.java | 183 ++-- .../indices/recovery/RecoveryStatus.java | 288 ------ .../indices/recovery/RecoveryTarget.java | 817 ++++++++---------- .../recovery/RecoveryTargetHandler.java | 74 ++ .../recovery/RecoveryTargetService.java | 470 ++++++++++ .../recovery/RemoteRecoveryTargetHandler.java | 154 ++++ .../SharedFSRecoverySourceHandler.java | 11 +- .../common/util/CancellableThreadsTests.java | 141 ++- .../index/IndexWithShadowReplicasIT.java | 4 +- .../index/store/CorruptedFileIT.java | 8 +- .../indices/recovery/IndexRecoveryIT.java | 11 +- .../recovery/RecoverySourceHandlerTests.java | 8 +- .../indices/recovery/RecoveryStatusTests.java | 2 +- ...ateTests.java => RecoveryTargetTests.java} | 16 +- .../recovery/RecoveriesCollectionTests.java | 36 +- .../elasticsearch/recovery/RelocationIT.java | 4 +- .../recovery/TruncatedRecoveryIT.java | 4 +- 23 files changed, 1333 insertions(+), 1026 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/indices/recovery/RecoveryStatus.java create mode 100644 core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java create mode 100644 core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java create mode 100644 core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java rename core/src/test/java/org/elasticsearch/indices/recovery/{RecoveryStateTests.java => RecoveryTargetTests.java} (97%) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index b824ad94998..a940da95ce9 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -662,9 +662,6 @@ - - - diff --git a/core/src/main/java/org/elasticsearch/common/util/CancellableThreads.java b/core/src/main/java/org/elasticsearch/common/util/CancellableThreads.java index d45afead715..b6edb2db8da 100644 --- a/core/src/main/java/org/elasticsearch/common/util/CancellableThreads.java +++ b/core/src/main/java/org/elasticsearch/common/util/CancellableThreads.java @@ -80,14 +80,32 @@ public class CancellableThreads { * @param interruptable code to run */ public void execute(Interruptable interruptable) { + try { + executeIO(interruptable); + } catch (IOException e) { + assert false : "the passed interruptable can not result in an IOException"; + throw new RuntimeException("unexpected IO exception", e); + } + } + /** + * run the Interruptable, capturing the executing thread. Concurrent calls to {@link #cancel(String)} will interrupt this thread + * causing the call to prematurely return. + * + * @param interruptable code to run + */ + public void executeIO(IOInterruptable interruptable) throws IOException { boolean wasInterrupted = add(); - RuntimeException throwable = null; + RuntimeException runtimeException = null; + IOException ioException = null; + try { interruptable.run(); } catch (InterruptedException | ThreadInterruptedException e) { // assume this is us and ignore } catch (RuntimeException t) { - throwable = t; + runtimeException = t; + } catch (IOException e) { + ioException = e; } finally { remove(); } @@ -101,10 +119,14 @@ public class CancellableThreads { } synchronized (this) { if (isCancelled()) { - onCancel(reason, throwable); - } else if (throwable != null) { + onCancel(reason, ioException != null ? ioException : runtimeException); + } else if (ioException != null) { // if we're not canceling, we throw the original exception - throw throwable; + throw ioException; + } + if (runtimeException != null) { + // if we're not canceling, we throw the original exception + throw runtimeException; } } } @@ -131,10 +153,14 @@ public class CancellableThreads { } - public interface Interruptable { + public interface Interruptable extends IOInterruptable { void run() throws InterruptedException; } + public interface IOInterruptable { + void run() throws IOException, InterruptedException; + } + public static class ExecutionCancelledException extends ElasticsearchException { public ExecutionCancelledException(String msg) { diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index eab8faab8eb..955a95676bd 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -61,7 +61,7 @@ import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoverySource; -import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.indices.recovery.RecoveryTargetService; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; import org.elasticsearch.indices.ttl.IndicesTTLService; @@ -155,7 +155,7 @@ public class IndicesModule extends AbstractModule { bind(IndicesService.class).asEagerSingleton(); bind(RecoverySettings.class).asEagerSingleton(); - bind(RecoveryTarget.class).asEagerSingleton(); + bind(RecoveryTargetService.class).asEagerSingleton(); bind(RecoverySource.class).asEagerSingleton(); bind(IndicesStore.class).asEagerSingleton(); bind(IndicesClusterStateService.class).asEagerSingleton(); diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 98bbd5fe000..7998afb7656 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -63,7 +63,7 @@ import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoverySource; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.indices.recovery.RecoveryTargetService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.search.SearchService; import org.elasticsearch.snapshots.RestoreService; @@ -83,7 +83,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent !status.sourceNode().equals(sourceNode))) { + if (recoveryTargetService.cancelRecoveriesForShard(indexShard.shardId(), "recovery source node changed", status -> !status.sourceNode().equals(sourceNode))) { logger.debug("[{}][{}] removing shard (recovery source changed), current [{}], global [{}])", shardRouting.index(), shardRouting.id(), currentRoutingEntry, shardRouting); // closing the shard will also cancel any ongoing recovery. indexService.removeShard(shardRouting.id(), "removing shard (recovery source node changed)"); @@ -609,7 +609,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent onGoingRecoveries = ConcurrentCollections.newConcurrentMap(); + private final ConcurrentMap onGoingRecoveries = ConcurrentCollections.newConcurrentMap(); final private ESLogger logger; final private ThreadPool threadPool; @@ -59,9 +59,9 @@ public class RecoveriesCollection { * @return the id of the new recovery. */ public long startRecovery(IndexShard indexShard, DiscoveryNode sourceNode, - RecoveryTarget.RecoveryListener listener, TimeValue activityTimeout) { - RecoveryStatus status = new RecoveryStatus(indexShard, sourceNode, listener); - RecoveryStatus existingStatus = onGoingRecoveries.putIfAbsent(status.recoveryId(), status); + RecoveryTargetService.RecoveryListener listener, TimeValue activityTimeout) { + RecoveryTarget status = new RecoveryTarget(indexShard, sourceNode, listener); + RecoveryTarget existingStatus = onGoingRecoveries.putIfAbsent(status.recoveryId(), status); assert existingStatus == null : "found two RecoveryStatus instances with the same id"; logger.trace("{} started recovery from {}, id [{}]", indexShard.shardId(), sourceNode, status.recoveryId()); threadPool.schedule(activityTimeout, ThreadPool.Names.GENERIC, @@ -70,33 +70,33 @@ public class RecoveriesCollection { } /** - * gets the {@link RecoveryStatus } for a given id. The RecoveryStatus returned has it's ref count already incremented - * to make sure it's safe to use. However, you must call {@link RecoveryStatus#decRef()} when you are done with it, typically + * gets the {@link RecoveryTarget } for a given id. The RecoveryStatus returned has it's ref count already incremented + * to make sure it's safe to use. However, you must call {@link RecoveryTarget#decRef()} when you are done with it, typically * by using this method in a try-with-resources clause. *

* Returns null if recovery is not found */ - public StatusRef getStatus(long id) { - RecoveryStatus status = onGoingRecoveries.get(id); + public RecoveryRef getRecovery(long id) { + RecoveryTarget status = onGoingRecoveries.get(id); if (status != null && status.tryIncRef()) { - return new StatusRef(status); + return new RecoveryRef(status); } return null; } - /** Similar to {@link #getStatus(long)} but throws an exception if no recovery is found */ - public StatusRef getStatusSafe(long id, ShardId shardId) { - StatusRef statusRef = getStatus(id); - if (statusRef == null) { + /** Similar to {@link #getRecovery(long)} but throws an exception if no recovery is found */ + public RecoveryRef getRecoverySafe(long id, ShardId shardId) { + RecoveryRef recoveryRef = getRecovery(id); + if (recoveryRef == null) { throw new IndexShardClosedException(shardId); } - assert statusRef.status().shardId().equals(shardId); - return statusRef; + assert recoveryRef.status().shardId().equals(shardId); + return recoveryRef; } /** cancel the recovery with the given id (if found) and remove it from the recovery collection */ public boolean cancelRecovery(long id, String reason) { - RecoveryStatus removed = onGoingRecoveries.remove(id); + RecoveryTarget removed = onGoingRecoveries.remove(id); boolean cancelled = false; if (removed != null) { logger.trace("{} canceled recovery from {}, id [{}] (reason [{}])", @@ -115,7 +115,7 @@ public class RecoveriesCollection { * @param sendShardFailure true a shard failed message should be sent to the master */ public void failRecovery(long id, RecoveryFailedException e, boolean sendShardFailure) { - RecoveryStatus removed = onGoingRecoveries.remove(id); + RecoveryTarget removed = onGoingRecoveries.remove(id); if (removed != null) { logger.trace("{} failing recovery from {}, id [{}]. Send shard failure: [{}]", removed.shardId(), removed.sourceNode(), removed.recoveryId(), sendShardFailure); removed.fail(e, sendShardFailure); @@ -124,7 +124,7 @@ public class RecoveriesCollection { /** mark the recovery with the given id as done (if found) */ public void markRecoveryAsDone(long id) { - RecoveryStatus removed = onGoingRecoveries.remove(id); + RecoveryTarget removed = onGoingRecoveries.remove(id); if (removed != null) { logger.trace("{} marking recovery from {} as done, id [{}]", removed.shardId(), removed.sourceNode(), removed.recoveryId()); removed.markAsDone(); @@ -151,9 +151,9 @@ public class RecoveriesCollection { * already issued outstanding references. * @return true if a recovery was cancelled */ - public boolean cancelRecoveriesForShard(ShardId shardId, String reason, Predicate shouldCancel) { + public boolean cancelRecoveriesForShard(ShardId shardId, String reason, Predicate shouldCancel) { boolean cancelled = false; - for (RecoveryStatus status : onGoingRecoveries.values()) { + for (RecoveryTarget status : onGoingRecoveries.values()) { if (status.shardId().equals(shardId)) { boolean cancel = false; // if we can't increment the status, the recovery is not there any more. @@ -174,20 +174,20 @@ public class RecoveriesCollection { /** - * a reference to {@link RecoveryStatus}, which implements {@link AutoCloseable}. closing the reference - * causes {@link RecoveryStatus#decRef()} to be called. This makes sure that the underlying resources - * will not be freed until {@link RecoveriesCollection.StatusRef#close()} is called. + * a reference to {@link RecoveryTarget}, which implements {@link AutoCloseable}. closing the reference + * causes {@link RecoveryTarget#decRef()} to be called. This makes sure that the underlying resources + * will not be freed until {@link RecoveryRef#close()} is called. */ - public static class StatusRef implements AutoCloseable { + public static class RecoveryRef implements AutoCloseable { - private final RecoveryStatus status; + private final RecoveryTarget status; private final AtomicBoolean closed = new AtomicBoolean(false); /** - * Important: {@link org.elasticsearch.indices.recovery.RecoveryStatus#tryIncRef()} should + * Important: {@link RecoveryTarget#tryIncRef()} should * be *successfully* called on status before */ - public StatusRef(RecoveryStatus status) { + public RecoveryRef(RecoveryTarget status) { this.status = status; this.status.setLastAccessTime(); } @@ -199,7 +199,7 @@ public class RecoveriesCollection { } } - public RecoveryStatus status() { + public RecoveryTarget status() { return status; } } @@ -223,7 +223,7 @@ public class RecoveriesCollection { @Override protected void doRun() throws Exception { - RecoveryStatus status = onGoingRecoveries.get(recoveryId); + RecoveryTarget status = onGoingRecoveries.get(recoveryId); if (status == null) { logger.trace("[monitor] no status found for [{}], shutting down", recoveryId); return; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java index 105dd3c445f..9a5c23fc2e1 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java @@ -120,10 +120,13 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe logger.trace("[{}][{}] starting recovery to {}", request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode()); final RecoverySourceHandler handler; + final RemoteRecoveryTargetHandler recoveryTarget = + new RemoteRecoveryTargetHandler(request.recoveryId(), request.shardId(), transportService, request.targetNode(), + recoverySettings, throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime)); if (shard.indexSettings().isOnSharedFilesystem()) { - handler = new SharedFSRecoverySourceHandler(shard, request, recoverySettings, transportService, logger); + handler = new SharedFSRecoverySourceHandler(shard, recoveryTarget, request, logger); } else { - handler = new RecoverySourceHandler(shard, request, recoverySettings, transportService, logger); + handler = new RecoverySourceHandler(shard, recoveryTarget, request, recoverySettings.getChunkSize().bytesAsInt(), logger); } ongoingRecoveries.add(shard, handler); try { diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index adb775df27b..b92e2066af2 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -38,7 +38,6 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.CancellableThreads; -import org.elasticsearch.common.util.CancellableThreads.Interruptable; import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexShard; @@ -47,18 +46,13 @@ import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.transport.EmptyTransportResponseHandler; import org.elasticsearch.transport.RemoteTransportException; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportService; import java.io.BufferedOutputStream; import java.io.IOException; import java.io.OutputStream; import java.util.ArrayList; -import java.util.Comparator; import java.util.List; -import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; import java.util.stream.StreamSupport; @@ -82,9 +76,8 @@ public class RecoverySourceHandler { private final int shardId; // Request containing source and target node information private final StartRecoveryRequest request; - private final RecoverySettings recoverySettings; - private final TransportService transportService; private final int chunkSizeInBytes; + private final RecoveryTargetHandler recoveryTarget; protected final RecoveryResponse response; @@ -104,16 +97,17 @@ public class RecoverySourceHandler { } }; - public RecoverySourceHandler(final IndexShard shard, final StartRecoveryRequest request, final RecoverySettings recoverySettings, - final TransportService transportService, final ESLogger logger) { + public RecoverySourceHandler(final IndexShard shard, RecoveryTargetHandler recoveryTarget, + final StartRecoveryRequest request, + final int fileChunkSizeInBytes, + final ESLogger logger) { this.shard = shard; + this.recoveryTarget = recoveryTarget; this.request = request; - this.recoverySettings = recoverySettings; this.logger = logger; - this.transportService = transportService; this.indexName = this.request.shardId().getIndex().getName(); this.shardId = this.request.shardId().id(); - this.chunkSizeInBytes = recoverySettings.getChunkSize().bytesAsInt(); + this.chunkSizeInBytes = fileChunkSizeInBytes; this.response = new RecoveryResponse(); } @@ -200,11 +194,14 @@ public class RecoverySourceHandler { final long numDocsTarget = request.metadataSnapshot().getNumDocs(); final long numDocsSource = recoverySourceMetadata.getNumDocs(); if (numDocsTarget != numDocsSource) { - throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number of docs differ: " + numDocsTarget + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsSource + "(" + request.targetNode().getName() + ")"); + throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number " + + "of docs differ: " + numDocsTarget + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsSource + + "(" + request.targetNode().getName() + ")"); } // we shortcut recovery here because we have nothing to copy. but we must still start the engine on the target. // so we don't return here - logger.trace("[{}][{}] skipping [phase1] to {} - identical sync id [{}] found on both source and target", indexName, shardId, + logger.trace("[{}][{}] skipping [phase1] to {} - identical sync id [{}] found on both source and target", indexName, + shardId, request.targetNode(), recoverySourceSyncId); } else { final Store.RecoveryDiff diff = recoverySourceMetadata.recoveryDiff(request.metadataSnapshot()); @@ -213,7 +210,8 @@ public class RecoverySourceHandler { response.phase1ExistingFileSizes.add(md.length()); existingTotalSize += md.length(); if (logger.isTraceEnabled()) { - logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]", + logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}]," + + " size [{}]", indexName, shardId, request.targetNode(), md.name(), md.checksum(), md.length()); } totalSize += md.length(); @@ -223,7 +221,8 @@ public class RecoverySourceHandler { phase1Files.addAll(diff.missing); for (StoreFileMetaData md : phase1Files) { if (request.metadataSnapshot().asMap().containsKey(md.name())) { - logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", + logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote " + + "[{}], local [{}]", indexName, shardId, request.targetNode(), md.name(), request.metadataSnapshot().asMap().get(md.name()), md); } else { logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote", @@ -237,20 +236,16 @@ public class RecoverySourceHandler { response.phase1TotalSize = totalSize; response.phase1ExistingTotalSize = existingTotalSize; - logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", + logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with " + + "total_size [{}]", indexName, shardId, request.targetNode(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize)); - cancellableThreads.execute(() -> { - RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.recoveryId(), request.shardId(), - response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes, - translogView.totalOperations()); - transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest, - TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), - EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); - }); + cancellableThreads.execute(() -> + recoveryTarget.receiveFileInfo(response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, + response.phase1ExistingFileSizes, translogView.totalOperations())); // How many bytes we've copied since we last called RateLimiter.pause - final AtomicLong bytesSinceLastPause = new AtomicLong(); - final Function outputStreamFactories = (md) -> new BufferedOutputStream(new RecoveryOutputStream(md, bytesSinceLastPause, translogView), chunkSizeInBytes); + final Function outputStreamFactories = + md -> new BufferedOutputStream(new RecoveryOutputStream(md, translogView), chunkSizeInBytes); sendFiles(store, phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]), outputStreamFactories); // Send the CLEAN_FILES request, which takes all of the files that // were transferred and renames them from their temporary file @@ -261,23 +256,19 @@ public class RecoverySourceHandler { // related to this recovery (out of date segments, for example) // are deleted try { - cancellableThreads.execute(() -> { - transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES, - new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), recoverySourceMetadata, translogView.totalOperations()), - TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), - EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); - }); - } catch (RemoteTransportException remoteException) { + cancellableThreads.executeIO(() -> recoveryTarget.cleanFiles(translogView.totalOperations(), recoverySourceMetadata)); + } catch (RemoteTransportException | IOException targetException) { final IOException corruptIndexException; // we realized that after the index was copied and we wanted to finalize the recovery // the index was corrupted: // - maybe due to a broken segments file on an empty index (transferred with no checksum) // - maybe due to old segments without checksums or length only checks - if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(remoteException)) != null) { + if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(targetException)) != null) { try { final Store.MetadataSnapshot recoverySourceMetadata1 = store.getMetadata(snapshot); StoreFileMetaData[] metadata = - StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(size -> new StoreFileMetaData[size]); + StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(size -> new + StoreFileMetaData[size]); ArrayUtil.timSort(metadata, (o1, o2) -> { return Long.compare(o1.length(), o2.length()); // check small files first }); @@ -291,17 +282,18 @@ public class RecoverySourceHandler { } } } catch (IOException ex) { - remoteException.addSuppressed(ex); - throw remoteException; + targetException.addSuppressed(ex); + throw targetException; } // corruption has happened on the way to replica - RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but checksums are ok", null); - exception.addSuppressed(remoteException); + RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + + "checksums are ok", null); + exception.addSuppressed(targetException); logger.warn("{} Remote file corruption during finalization on node {}, recovering {}. local checksum OK", corruptIndexException, shard.shardId(), request.targetNode()); throw exception; } else { - throw remoteException; + throw targetException; } } } @@ -318,22 +310,14 @@ public class RecoverySourceHandler { } - protected void prepareTargetForTranslog(final int totalTranslogOps) { + protected void prepareTargetForTranslog(final int totalTranslogOps) throws IOException { StopWatch stopWatch = new StopWatch().start(); logger.trace("{} recovery [phase1] to {}: prepare remote engine for translog", request.shardId(), request.targetNode()); final long startEngineStart = stopWatch.totalTime().millis(); - cancellableThreads.execute(new Interruptable() { - @Override - public void run() throws InterruptedException { - // Send a request preparing the new shard's translog to receive - // operations. This ensures the shard engine is started and disables - // garbage collection (not the JVM's GC!) of tombstone deletes - transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG, - new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId(), totalTranslogOps), - TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); - } - }); - + // Send a request preparing the new shard's translog to receive + // operations. This ensures the shard engine is started and disables + // garbage collection (not the JVM's GC!) of tombstone deletes + cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(totalTranslogOps)); stopWatch.stop(); response.startTime = stopWatch.totalTime().millis() - startEngineStart; @@ -378,20 +362,7 @@ public class RecoverySourceHandler { logger.trace("[{}][{}] finalizing recovery to {}", indexName, shardId, request.targetNode()); - cancellableThreads.execute(new Interruptable() { - @Override - public void run() throws InterruptedException { - // Send the FINALIZE request to the target node. The finalize request - // clears unreferenced translog files, refreshes the engine now that - // new segments are available, and enables garbage collection of - // tombstone files. The shard is also moved to the POST_RECOVERY phase - // during this time - transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FINALIZE, - new RecoveryFinalizeRecoveryRequest(request.recoveryId(), request.shardId()), - TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(), - EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); - } - }); + cancellableThreads.execute(recoveryTarget::finalizeRecovery); if (isPrimaryRelocation()) { /** @@ -408,7 +379,7 @@ public class RecoverySourceHandler { } stopWatch.stop(); logger.trace("[{}][{}] finalizing recovery to {}: took [{}]", - indexName, shardId, request.targetNode(), stopWatch.totalTime()); + indexName, shardId, request.targetNode(), stopWatch.totalTime()); } protected boolean isPrimaryRelocation() { @@ -435,12 +406,6 @@ public class RecoverySourceHandler { throw new ElasticsearchException("failed to get next operation from translog", ex); } - final TransportRequestOptions recoveryOptions = TransportRequestOptions.builder() - .withCompress(true) - .withType(TransportRequestOptions.Type.RECOVERY) - .withTimeout(recoverySettings.internalActionLongTimeout()) - .build(); - if (operation == null) { logger.trace("[{}][{}] no translog operations to send to {}", indexName, shardId, request.targetNode()); @@ -464,12 +429,7 @@ public class RecoverySourceHandler { // index docs to replicas while the index files are recovered // the lock can potentially be removed, in which case, it might // make sense to re-enable throttling in this phase - cancellableThreads.execute(() -> { - final RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest( - request.recoveryId(), request.shardId(), operations, snapshot.totalOperations()); - transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest, - recoveryOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); - }); + cancellableThreads.execute(() -> recoveryTarget.indexTranslogOperations(operations, snapshot.totalOperations())); if (logger.isTraceEnabled()) { logger.trace("[{}][{}] sent batch of [{}][{}] (total: [{}]) translog operations to {}", indexName, shardId, ops, new ByteSizeValue(size), @@ -489,12 +449,7 @@ public class RecoverySourceHandler { } // send the leftover if (!operations.isEmpty()) { - cancellableThreads.execute(() -> { - RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest( - request.recoveryId(), request.shardId(), operations, snapshot.totalOperations()); - transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest, - recoveryOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); - }); + cancellableThreads.execute(() -> recoveryTarget.indexTranslogOperations(operations, snapshot.totalOperations())); } if (logger.isTraceEnabled()) { @@ -525,13 +480,11 @@ public class RecoverySourceHandler { final class RecoveryOutputStream extends OutputStream { private final StoreFileMetaData md; - private final AtomicLong bytesSinceLastPause; private final Translog.View translogView; private long position = 0; - RecoveryOutputStream(StoreFileMetaData md, AtomicLong bytesSinceLastPause, Translog.View translogView) { + RecoveryOutputStream(StoreFileMetaData md, Translog.View translogView) { this.md = md; - this.bytesSinceLastPause = bytesSinceLastPause; this.translogView = translogView; } @@ -548,43 +501,10 @@ public class RecoverySourceHandler { } private void sendNextChunk(long position, BytesArray content, boolean lastChunk) throws IOException { - final TransportRequestOptions chunkSendOptions = TransportRequestOptions.builder() - .withCompress(false) // lucene files are already compressed and therefore compressing this won't really help much so we are safing the cpu for other things - .withType(TransportRequestOptions.Type.RECOVERY) - .withTimeout(recoverySettings.internalActionTimeout()) - .build(); - cancellableThreads.execute(() -> { - // Pause using the rate limiter, if desired, to throttle the recovery - final long throttleTimeInNanos; - // always fetch the ratelimiter - it might be updated in real-time on the recovery settings - final RateLimiter rl = recoverySettings.rateLimiter(); - if (rl != null) { - long bytes = bytesSinceLastPause.addAndGet(content.length()); - if (bytes > rl.getMinPauseCheckBytes()) { - // Time to pause - bytesSinceLastPause.addAndGet(-bytes); - try { - throttleTimeInNanos = rl.pause(bytes); - shard.recoveryStats().addThrottleTime(throttleTimeInNanos); - } catch (IOException e) { - throw new ElasticsearchException("failed to pause recovery", e); - } - } else { - throttleTimeInNanos = 0; - } - } else { - throttleTimeInNanos = 0; - } - // Actually send the file chunk to the target node, waiting for it to complete - transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILE_CHUNK, - new RecoveryFileChunkRequest(request.recoveryId(), request.shardId(), md, position, content, lastChunk, - translogView.totalOperations(), - /* we send totalOperations with every request since we collect stats on the target and that way we can - * see how many translog ops we accumulate while copying files across the network. A future optimization - * would be in to restart file copy again (new deltas) if we have too many translog ops are piling up. - */ - throttleTimeInNanos), chunkSendOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); - }); + // Actually send the file chunk to the target node, waiting for it to complete + cancellableThreads.executeIO(() -> + recoveryTarget.writeFileChunk(md, position, content, lastChunk, translogView.totalOperations()) + ); if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us throw new IndexShardClosedException(request.shardId()); } @@ -594,7 +514,7 @@ public class RecoverySourceHandler { void sendFiles(Store store, StoreFileMetaData[] files, Function outputStreamFactory) throws Throwable { store.incRef(); try { - ArrayUtil.timSort(files, (a,b) -> Long.compare(a.length(), b.length())); // send smallest first + ArrayUtil.timSort(files, (a, b) -> Long.compare(a.length(), b.length())); // send smallest first for (int i = 0; i < files.length; i++) { final StoreFileMetaData md = files[i]; try (final IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE)) { @@ -609,10 +529,11 @@ public class RecoverySourceHandler { failEngine(corruptIndexException); throw corruptIndexException; } else { // corruption has happened on the way to replica - RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but checksums are ok", null); + RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + + "checksums are ok", null); exception.addSuppressed(t); logger.warn("{} Remote file corruption on node {}, recovering {}. local checksum OK", - corruptIndexException, shardId, request.targetNode(), md); + corruptIndexException, shardId, request.targetNode(), md); throw exception; } } else { diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryStatus.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryStatus.java deleted file mode 100644 index 0064021dd33..00000000000 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryStatus.java +++ /dev/null @@ -1,288 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.indices.recovery; - -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexOutput; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.util.CancellableThreads; -import org.elasticsearch.common.util.concurrent.AbstractRefCounted; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.store.StoreFileMetaData; - -import java.io.IOException; -import java.util.Iterator; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; - -/** - * - */ - - -public class RecoveryStatus extends AbstractRefCounted { - - private final ESLogger logger; - - private final static AtomicLong idGenerator = new AtomicLong(); - - private final String RECOVERY_PREFIX = "recovery."; - - private final ShardId shardId; - private final long recoveryId; - private final IndexShard indexShard; - private final DiscoveryNode sourceNode; - private final String tempFilePrefix; - private final Store store; - private final RecoveryTarget.RecoveryListener listener; - - private final AtomicBoolean finished = new AtomicBoolean(); - - private final ConcurrentMap openIndexOutputs = ConcurrentCollections.newConcurrentMap(); - private final Store.LegacyChecksums legacyChecksums = new Store.LegacyChecksums(); - - private final CancellableThreads cancellableThreads = new CancellableThreads(); - - // last time this status was accessed - private volatile long lastAccessTime = System.nanoTime(); - - public RecoveryStatus(IndexShard indexShard, DiscoveryNode sourceNode, RecoveryTarget.RecoveryListener listener) { - - super("recovery_status"); - this.recoveryId = idGenerator.incrementAndGet(); - this.listener = listener; - this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); - this.indexShard = indexShard; - this.sourceNode = sourceNode; - this.shardId = indexShard.shardId(); - this.tempFilePrefix = RECOVERY_PREFIX + indexShard.recoveryState().getTimer().startTime() + "."; - this.store = indexShard.store(); - // make sure the store is not released until we are done. - store.incRef(); - indexShard.recoveryStats().incCurrentAsTarget(); - } - - private final Map tempFileNames = ConcurrentCollections.newConcurrentMap(); - - public long recoveryId() { - return recoveryId; - } - - public ShardId shardId() { - return shardId; - } - - public IndexShard indexShard() { - ensureRefCount(); - return indexShard; - } - - public DiscoveryNode sourceNode() { - return this.sourceNode; - } - - public RecoveryState state() { - return indexShard.recoveryState(); - } - - public CancellableThreads CancellableThreads() { - return cancellableThreads; - } - - /** return the last time this RecoveryStatus was used (based on System.nanoTime() */ - public long lastAccessTime() { - return lastAccessTime; - } - - /** sets the lasAccessTime flag to now */ - public void setLastAccessTime() { - lastAccessTime = System.nanoTime(); - } - - public Store store() { - ensureRefCount(); - return store; - } - - public RecoveryState.Stage stage() { - return state().getStage(); - } - - public Store.LegacyChecksums legacyChecksums() { - return legacyChecksums; - } - - /** renames all temporary files to their true name, potentially overriding existing files */ - public void renameAllTempFiles() throws IOException { - ensureRefCount(); - store.renameTempFilesSafe(tempFileNames); - } - - /** - * cancel the recovery. calling this method will clean temporary files and release the store - * unless this object is in use (in which case it will be cleaned once all ongoing users call - * {@link #decRef()} - *

- * if {@link #CancellableThreads()} was used, the threads will be interrupted. - */ - public void cancel(String reason) { - if (finished.compareAndSet(false, true)) { - try { - logger.debug("recovery canceled (reason: [{}])", reason); - cancellableThreads.cancel(reason); - } finally { - // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now - decRef(); - } - } - } - - /** - * fail the recovery and call listener - * - * @param e exception that encapsulating the failure - * @param sendShardFailure indicates whether to notify the master of the shard failure - */ - public void fail(RecoveryFailedException e, boolean sendShardFailure) { - if (finished.compareAndSet(false, true)) { - try { - listener.onRecoveryFailure(state(), e, sendShardFailure); - } finally { - try { - cancellableThreads.cancel("failed recovery [" + ExceptionsHelper.stackTrace(e) + "]"); - } finally { - // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now - decRef(); - } - } - } - } - - /** mark the current recovery as done */ - public void markAsDone() { - if (finished.compareAndSet(false, true)) { - assert tempFileNames.isEmpty() : "not all temporary files are renamed"; - try { - // this might still throw an exception ie. if the shard is CLOSED due to some other event. - // it's safer to decrement the reference in a try finally here. - indexShard.postRecovery("peer recovery done"); - } finally { - // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now - decRef(); - } - listener.onRecoveryDone(state()); - } - } - - /** Get a temporary name for the provided file name. */ - public String getTempNameForFile(String origFile) { - return tempFilePrefix + origFile; - } - - public IndexOutput getOpenIndexOutput(String key) { - ensureRefCount(); - return openIndexOutputs.get(key); - } - - /** remove and {@link org.apache.lucene.store.IndexOutput} for a given file. It is the caller's responsibility to close it */ - public IndexOutput removeOpenIndexOutputs(String name) { - ensureRefCount(); - return openIndexOutputs.remove(name); - } - - /** - * Creates an {@link org.apache.lucene.store.IndexOutput} for the given file name. Note that the - * IndexOutput actually point at a temporary file. - *

- * Note: You can use {@link #getOpenIndexOutput(String)} with the same filename to retrieve the same IndexOutput - * at a later stage - */ - public IndexOutput openAndPutIndexOutput(String fileName, StoreFileMetaData metaData, Store store) throws IOException { - ensureRefCount(); - String tempFileName = getTempNameForFile(fileName); - if (tempFileNames.containsKey(tempFileName)) { - throw new IllegalStateException("output for file [" + fileName + "] has already been created"); - } - // add first, before it's created - tempFileNames.put(tempFileName, fileName); - IndexOutput indexOutput = store.createVerifyingOutput(tempFileName, metaData, IOContext.DEFAULT); - openIndexOutputs.put(fileName, indexOutput); - return indexOutput; - } - - public void resetRecovery() throws IOException { - cleanOpenFiles(); - indexShard().performRecoveryRestart(); - } - - @Override - protected void closeInternal() { - try { - cleanOpenFiles(); - } finally { - // free store. increment happens in constructor - store.decRef(); - indexShard.recoveryStats().decCurrentAsTarget(); - } - } - - protected void cleanOpenFiles() { - // clean open index outputs - Iterator> iterator = openIndexOutputs.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry entry = iterator.next(); - logger.trace("closing IndexOutput file [{}]", entry.getValue()); - try { - entry.getValue().close(); - } catch (Throwable t) { - logger.debug("error while closing recovery output [{}]", t, entry.getValue()); - } - iterator.remove(); - } - // trash temporary files - for (String file : tempFileNames.keySet()) { - logger.trace("cleaning temporary file [{}]", file); - store.deleteQuiet(file); - } - legacyChecksums.clear(); - } - - @Override - public String toString() { - return shardId + " [" + recoveryId + "]"; - } - - private void ensureRefCount() { - if (refCount() <= 0) { - throw new ElasticsearchException("RecoveryStatus is used but it's refcount is 0. Probably a mismatch between incRef/decRef calls"); - } - } - -} diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 727bd0b6441..ec40a0431c1 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -22,505 +22,392 @@ package org.elasticsearch.indices.recovery; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; -import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.store.RateLimiter; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CancellableThreads; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.engine.RecoveryEngineException; -import org.elasticsearch.index.mapper.MapperException; -import org.elasticsearch.index.shard.IllegalIndexShardStateException; -import org.elasticsearch.index.shard.IndexEventListener; +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.store.Store; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.FutureTransportResponseHandler; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportService; +import org.elasticsearch.index.store.StoreFileMetaData; +import org.elasticsearch.index.translog.Translog; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Predicate; - -import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; /** - * The recovery target handles recoveries of peer shards of the shard+node to recover to. - *

- * Note, it can be safely assumed that there will only be a single recovery per shard (index+id) and - * not several of them (since we don't allocate several shard replicas to the same node). + * */ -public class RecoveryTarget extends AbstractComponent implements IndexEventListener { - public static class Actions { - public static final String FILES_INFO = "internal:index/shard/recovery/filesInfo"; - public static final String FILE_CHUNK = "internal:index/shard/recovery/file_chunk"; - public static final String CLEAN_FILES = "internal:index/shard/recovery/clean_files"; - public static final String TRANSLOG_OPS = "internal:index/shard/recovery/translog_ops"; - public static final String PREPARE_TRANSLOG = "internal:index/shard/recovery/prepare_translog"; - public static final String FINALIZE = "internal:index/shard/recovery/finalize"; + +public class RecoveryTarget extends AbstractRefCounted implements RecoveryTargetHandler { + + private final ESLogger logger; + + private final static AtomicLong idGenerator = new AtomicLong(); + + private final String RECOVERY_PREFIX = "recovery."; + + private final ShardId shardId; + private final long recoveryId; + private final IndexShard indexShard; + private final DiscoveryNode sourceNode; + private final String tempFilePrefix; + private final Store store; + private final RecoveryTargetService.RecoveryListener listener; + + private final AtomicBoolean finished = new AtomicBoolean(); + + private final ConcurrentMap openIndexOutputs = ConcurrentCollections.newConcurrentMap(); + private final Store.LegacyChecksums legacyChecksums = new Store.LegacyChecksums(); + + private final CancellableThreads cancellableThreads = new CancellableThreads(); + + // last time this status was accessed + private volatile long lastAccessTime = System.nanoTime(); + + private final Map tempFileNames = ConcurrentCollections.newConcurrentMap(); + + public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, RecoveryTargetService.RecoveryListener listener) { + + super("recovery_status"); + this.recoveryId = idGenerator.incrementAndGet(); + this.listener = listener; + this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); + this.indexShard = indexShard; + this.sourceNode = sourceNode; + this.shardId = indexShard.shardId(); + this.tempFilePrefix = RECOVERY_PREFIX + indexShard.recoveryState().getTimer().startTime() + "."; + this.store = indexShard.store(); + indexShard.recoveryStats().incCurrentAsTarget(); + // make sure the store is not released until we are done. + store.incRef(); } - private final ThreadPool threadPool; - - private final TransportService transportService; - - private final RecoverySettings recoverySettings; - private final ClusterService clusterService; - - private final RecoveriesCollection onGoingRecoveries; - - @Inject - public RecoveryTarget(Settings settings, ThreadPool threadPool, TransportService transportService, RecoverySettings recoverySettings, ClusterService clusterService) { - super(settings); - this.threadPool = threadPool; - this.transportService = transportService; - this.recoverySettings = recoverySettings; - this.clusterService = clusterService; - this.onGoingRecoveries = new RecoveriesCollection(logger, threadPool); - - transportService.registerRequestHandler(Actions.FILES_INFO, RecoveryFilesInfoRequest::new, ThreadPool.Names.GENERIC, new FilesInfoRequestHandler()); - transportService.registerRequestHandler(Actions.FILE_CHUNK, RecoveryFileChunkRequest::new, ThreadPool.Names.GENERIC, new FileChunkTransportRequestHandler()); - transportService.registerRequestHandler(Actions.CLEAN_FILES, RecoveryCleanFilesRequest::new, ThreadPool.Names.GENERIC, new CleanFilesRequestHandler()); - transportService.registerRequestHandler(Actions.PREPARE_TRANSLOG, RecoveryPrepareForTranslogOperationsRequest::new, ThreadPool.Names.GENERIC, new PrepareForTranslogOperationsRequestHandler()); - transportService.registerRequestHandler(Actions.TRANSLOG_OPS, RecoveryTranslogOperationsRequest::new, ThreadPool.Names.GENERIC, new TranslogOperationsRequestHandler()); - transportService.registerRequestHandler(Actions.FINALIZE, RecoveryFinalizeRecoveryRequest::new, ThreadPool.Names.GENERIC, new FinalizeRecoveryRequestHandler()); + public long recoveryId() { + return recoveryId; } - @Override - public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { - if (indexShard != null) { - onGoingRecoveries.cancelRecoveriesForShard(shardId, "shard closed"); + public ShardId shardId() { + return shardId; + } + + public IndexShard indexShard() { + ensureRefCount(); + return indexShard; + } + + public DiscoveryNode sourceNode() { + return this.sourceNode; + } + + public RecoveryState state() { + return indexShard.recoveryState(); + } + + public CancellableThreads CancellableThreads() { + return cancellableThreads; + } + + /** return the last time this RecoveryStatus was used (based on System.nanoTime() */ + public long lastAccessTime() { + return lastAccessTime; + } + + /** sets the lasAccessTime flag to now */ + public void setLastAccessTime() { + lastAccessTime = System.nanoTime(); + } + + public Store store() { + ensureRefCount(); + return store; + } + + public RecoveryState.Stage stage() { + return state().getStage(); + } + + public Store.LegacyChecksums legacyChecksums() { + return legacyChecksums; + } + + /** renames all temporary files to their true name, potentially overriding existing files */ + public void renameAllTempFiles() throws IOException { + ensureRefCount(); + store.renameTempFilesSafe(tempFileNames); + } + + /** + * cancel the recovery. calling this method will clean temporary files and release the store + * unless this object is in use (in which case it will be cleaned once all ongoing users call + * {@link #decRef()} + *

+ * if {@link #CancellableThreads()} was used, the threads will be interrupted. + */ + public void cancel(String reason) { + if (finished.compareAndSet(false, true)) { + try { + logger.debug("recovery canceled (reason: [{}])", reason); + cancellableThreads.cancel(reason); + } finally { + // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now + decRef(); + } } } /** - * cancel all ongoing recoveries for the given shard, if their status match a predicate + * fail the recovery and call listener * - * @param reason reason for cancellation - * @param shardId shardId for which to cancel recoveries - * @param shouldCancel a predicate to check if a recovery should be cancelled or not. Null means cancel without an extra check. - * note that the recovery state can change after this check, but before it is being cancelled via other - * already issued outstanding references. - * @return true if a recovery was cancelled + * @param e exception that encapsulating the failure + * @param sendShardFailure indicates whether to notify the master of the shard failure */ - public boolean cancelRecoveriesForShard(ShardId shardId, String reason, @Nullable Predicate shouldCancel) { - return onGoingRecoveries.cancelRecoveriesForShard(shardId, reason, shouldCancel); - } - - public void startRecovery(final IndexShard indexShard, final RecoveryState.Type recoveryType, final DiscoveryNode sourceNode, final RecoveryListener listener) { - // create a new recovery status, and process... - final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout()); - threadPool.generic().execute(new RecoveryRunner(recoveryId)); - } - - protected void retryRecovery(final RecoveryStatus recoveryStatus, final Throwable reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) { - logger.trace("will retry recovery with id [{}] in [{}]", reason, recoveryStatus.recoveryId(), retryAfter); - retryRecovery(recoveryStatus, retryAfter, currentRequest); - } - - protected void retryRecovery(final RecoveryStatus recoveryStatus, final String reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) { - logger.trace("will retry recovery with id [{}] in [{}] (reason [{}])", recoveryStatus.recoveryId(), retryAfter, reason); - retryRecovery(recoveryStatus, retryAfter, currentRequest); - } - - private void retryRecovery(final RecoveryStatus recoveryStatus, TimeValue retryAfter, final StartRecoveryRequest currentRequest) { - try { - recoveryStatus.resetRecovery(); - } catch (Throwable e) { - onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(currentRequest, e), true); - } - threadPool.schedule(retryAfter, ThreadPool.Names.GENERIC, new RecoveryRunner(recoveryStatus.recoveryId())); - } - - private void doRecovery(final RecoveryStatus recoveryStatus) { - assert recoveryStatus.sourceNode() != null : "can't do a recovery without a source node"; - - logger.trace("collecting local files for {}", recoveryStatus); - Store.MetadataSnapshot metadataSnapshot = null; - try { - metadataSnapshot = recoveryStatus.store().getMetadataOrEmpty(); - } catch (IOException e) { - logger.warn("error while listing local files, recover as if there are none", e); - metadataSnapshot = Store.MetadataSnapshot.EMPTY; - } catch (Exception e) { - // this will be logged as warning later on... - logger.trace("unexpected error while listing local files, failing recovery", e); - onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), - new RecoveryFailedException(recoveryStatus.state(), "failed to list local files", e), true); - return; - } - final StartRecoveryRequest request = new StartRecoveryRequest(recoveryStatus.shardId(), recoveryStatus.sourceNode(), clusterService.localNode(), - metadataSnapshot, recoveryStatus.state().getType(), recoveryStatus.recoveryId()); - - final AtomicReference responseHolder = new AtomicReference<>(); - try { - logger.trace("[{}][{}] starting recovery from {}", request.shardId().getIndex().getName(), request.shardId().id(), request.sourceNode()); - recoveryStatus.indexShard().prepareForIndexRecovery(); - recoveryStatus.CancellableThreads().execute(new CancellableThreads.Interruptable() { - @Override - public void run() throws InterruptedException { - responseHolder.set(transportService.submitRequest(request.sourceNode(), RecoverySource.Actions.START_RECOVERY, request, new FutureTransportResponseHandler() { - @Override - public RecoveryResponse newInstance() { - return new RecoveryResponse(); - } - }).txGet()); - } - }); - final RecoveryResponse recoveryResponse = responseHolder.get(); - assert responseHolder != null; - final TimeValue recoveryTime = new TimeValue(recoveryStatus.state().getTimer().time()); - // do this through ongoing recoveries to remove it from the collection - onGoingRecoveries.markRecoveryAsDone(recoveryStatus.recoveryId()); - if (logger.isTraceEnabled()) { - StringBuilder sb = new StringBuilder(); - sb.append('[').append(request.shardId().getIndex().getName()).append(']').append('[').append(request.shardId().id()).append("] "); - sb.append("recovery completed from ").append(request.sourceNode()).append(", took[").append(recoveryTime).append("]\n"); - sb.append(" phase1: recovered_files [").append(recoveryResponse.phase1FileNames.size()).append("]").append(" with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1TotalSize)).append("]") - .append(", took [").append(timeValueMillis(recoveryResponse.phase1Time)).append("], throttling_wait [").append(timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime)).append(']') - .append("\n"); - sb.append(" : reusing_files [").append(recoveryResponse.phase1ExistingFileNames.size()).append("] with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize)).append("]\n"); - sb.append(" phase2: start took [").append(timeValueMillis(recoveryResponse.startTime)).append("]\n"); - sb.append(" : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log operations") - .append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]") - .append("\n"); - logger.trace(sb.toString()); - } else { - logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), recoveryStatus.sourceNode(), recoveryTime); - } - } catch (CancellableThreads.ExecutionCancelledException e) { - logger.trace("recovery cancelled", e); - } catch (Throwable e) { - if (logger.isTraceEnabled()) { - logger.trace("[{}][{}] Got exception on recovery", e, request.shardId().getIndex().getName(), request.shardId().id()); - } - Throwable cause = ExceptionsHelper.unwrapCause(e); - if (cause instanceof CancellableThreads.ExecutionCancelledException) { - // this can also come from the source wrapped in a RemoteTransportException - onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, "source has canceled the recovery", cause), false); - return; - } - if (cause instanceof RecoveryEngineException) { - // unwrap an exception that was thrown as part of the recovery - cause = cause.getCause(); - } - // do it twice, in case we have double transport exception - cause = ExceptionsHelper.unwrapCause(cause); - if (cause instanceof RecoveryEngineException) { - // unwrap an exception that was thrown as part of the recovery - cause = cause.getCause(); - } - - // here, we would add checks against exception that need to be retried (and not removeAndClean in this case) - - if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexNotFoundException || cause instanceof ShardNotFoundException) { - // if the target is not ready yet, retry - retryRecovery(recoveryStatus, "remote shard not ready", recoverySettings.retryDelayStateSync(), request); - return; - } - - if (cause instanceof DelayRecoveryException) { - retryRecovery(recoveryStatus, cause, recoverySettings.retryDelayStateSync(), request); - return; - } - - if (cause instanceof ConnectTransportException) { - logger.debug("delaying recovery of {} for [{}] due to networking error [{}]", recoveryStatus.shardId(), recoverySettings.retryDelayNetwork(), cause.getMessage()); - retryRecovery(recoveryStatus, cause.getMessage(), recoverySettings.retryDelayNetwork(), request); - return; - } - - if (cause instanceof IndexShardClosedException) { - onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, "source shard is closed", cause), false); - return; - } - - if (cause instanceof AlreadyClosedException) { - onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, "source shard is closed", cause), false); - return; - } - onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, e), true); - } - } - - public interface RecoveryListener { - void onRecoveryDone(RecoveryState state); - - void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure); - } - - class PrepareForTranslogOperationsRequestHandler implements TransportRequestHandler { - - @Override - public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception { - try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) { - final RecoveryStatus recoveryStatus = statusRef.status(); - recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps()); - recoveryStatus.indexShard().skipTranslogRecovery(); - } - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - } - - class FinalizeRecoveryRequestHandler implements TransportRequestHandler { - - @Override - public void messageReceived(RecoveryFinalizeRecoveryRequest request, TransportChannel channel) throws Exception { - try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) { - final RecoveryStatus recoveryStatus = statusRef.status(); - recoveryStatus.indexShard().finalizeRecovery(); - } - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - } - - class TranslogOperationsRequestHandler implements TransportRequestHandler { - - @Override - public void messageReceived(final RecoveryTranslogOperationsRequest request, final TransportChannel channel) throws Exception { - try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) { - final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); - final RecoveryStatus recoveryStatus = statusRef.status(); - final RecoveryState.Translog translog = recoveryStatus.state().getTranslog(); - translog.totalOperations(request.totalTranslogOps()); - assert recoveryStatus.indexShard().recoveryState() == recoveryStatus.state(); - try { - recoveryStatus.indexShard().performBatchRecovery(request.operations()); - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } catch (TranslogRecoveryPerformer.BatchOperationException exception) { - MapperException mapperException = (MapperException) ExceptionsHelper.unwrap(exception, MapperException.class); - if (mapperException == null) { - throw exception; - } - // in very rare cases a translog replay from primary is processed before a mapping update on this node - // which causes local mapping changes. we want to wait until these mappings are processed. - logger.trace("delaying recovery due to missing mapping changes (rolling back stats for [{}] ops)", exception, exception.completedOperations()); - translog.decrementRecoveredOperations(exception.completedOperations()); - // we do not need to use a timeout here since the entire recovery mechanism has an inactivity protection (it will be - // canceled) - observer.waitForNextChange(new ClusterStateObserver.Listener() { - @Override - public void onNewClusterState(ClusterState state) { - try { - messageReceived(request, channel); - } catch (Exception e) { - onFailure(e); - } - } - - protected void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (IOException e1) { - logger.warn("failed to send error back to recovery source", e1); - } - } - - @Override - public void onClusterServiceClose() { - onFailure(new ElasticsearchException("cluster service was closed while waiting for mapping updates")); - } - - @Override - public void onTimeout(TimeValue timeout) { - // note that we do not use a timeout (see comment above) - onFailure(new ElasticsearchTimeoutException("timed out waiting for mapping updates (timeout [" + timeout + "])")); - } - }); - } - } - } - } - - class FilesInfoRequestHandler implements TransportRequestHandler { - - @Override - public void messageReceived(RecoveryFilesInfoRequest request, TransportChannel channel) throws Exception { - try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) { - final RecoveryStatus recoveryStatus = statusRef.status(); - final RecoveryState.Index index = recoveryStatus.state().getIndex(); - for (int i = 0; i < request.phase1ExistingFileNames.size(); i++) { - index.addFileDetail(request.phase1ExistingFileNames.get(i), request.phase1ExistingFileSizes.get(i), true); - } - for (int i = 0; i < request.phase1FileNames.size(); i++) { - index.addFileDetail(request.phase1FileNames.get(i), request.phase1FileSizes.get(i), false); - } - recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps); - recoveryStatus.state().getTranslog().totalOperationsOnStart(request.totalTranslogOps); - // recoveryBytesCount / recoveryFileCount will be set as we go... - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - } - } - - class CleanFilesRequestHandler implements TransportRequestHandler { - - @Override - public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel) throws Exception { - try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) { - final RecoveryStatus recoveryStatus = statusRef.status(); - recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps()); - // first, we go and move files that were created with the recovery id suffix to - // the actual names, its ok if we have a corrupted index here, since we have replicas - // to recover from in case of a full cluster shutdown just when this code executes... - recoveryStatus.indexShard().deleteShardState(); // we have to delete it first since even if we fail to rename the shard might be invalid - recoveryStatus.renameAllTempFiles(); - final Store store = recoveryStatus.store(); - // now write checksums - recoveryStatus.legacyChecksums().write(store); - Store.MetadataSnapshot sourceMetaData = request.sourceMetaSnapshot(); - try { - store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData); - } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { - // this is a fatal exception at this stage. - // this means we transferred files from the remote that have not be checksummed and they are - // broken. We have to clean up this shard entirely, remove all files and bubble it up to the - // source shard since this index might be broken there as well? The Source can handle this and checks - // its content on disk if possible. - try { - try { - store.removeCorruptionMarker(); - } finally { - Lucene.cleanLuceneIndex(store.directory()); // clean up and delete all files - } - } catch (Throwable e) { - logger.debug("Failed to clean lucene index", e); - ex.addSuppressed(e); - } - RecoveryFailedException rfe = new RecoveryFailedException(recoveryStatus.state(), "failed to clean after recovery", ex); - recoveryStatus.fail(rfe, true); - throw rfe; - } catch (Exception ex) { - RecoveryFailedException rfe = new RecoveryFailedException(recoveryStatus.state(), "failed to clean after recovery", ex); - recoveryStatus.fail(rfe, true); - throw rfe; - } - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - } - } - - class FileChunkTransportRequestHandler implements TransportRequestHandler { - - // How many bytes we've copied since we last called RateLimiter.pause - final AtomicLong bytesSinceLastPause = new AtomicLong(); - - @Override - public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel) throws Exception { - try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) { - final RecoveryStatus recoveryStatus = statusRef.status(); - final Store store = recoveryStatus.store(); - recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps()); - final RecoveryState.Index indexState = recoveryStatus.state().getIndex(); - if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) { - indexState.addSourceThrottling(request.sourceThrottleTimeInNanos()); - } - IndexOutput indexOutput; - if (request.position() == 0) { - indexOutput = recoveryStatus.openAndPutIndexOutput(request.name(), request.metadata(), store); - } else { - indexOutput = recoveryStatus.getOpenIndexOutput(request.name()); - } - BytesReference content = request.content(); - if (!content.hasArray()) { - content = content.toBytesArray(); - } - RateLimiter rl = recoverySettings.rateLimiter(); - if (rl != null) { - long bytes = bytesSinceLastPause.addAndGet(content.length()); - if (bytes > rl.getMinPauseCheckBytes()) { - // Time to pause - bytesSinceLastPause.addAndGet(-bytes); - long throttleTimeInNanos = rl.pause(bytes); - indexState.addTargetThrottling(throttleTimeInNanos); - recoveryStatus.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos); - } - } - indexOutput.writeBytes(content.array(), content.arrayOffset(), content.length()); - indexState.addRecoveredBytesToFile(request.name(), content.length()); - if (indexOutput.getFilePointer() >= request.length() || request.lastChunk()) { - try { - Store.verify(indexOutput); - } finally { - // we are done - indexOutput.close(); - } - // write the checksum - recoveryStatus.legacyChecksums().add(request.metadata()); - final String temporaryFileName = recoveryStatus.getTempNameForFile(request.name()); - assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName); - store.directory().sync(Collections.singleton(temporaryFileName)); - IndexOutput remove = recoveryStatus.removeOpenIndexOutputs(request.name()); - assert remove == null || remove == indexOutput; // remove maybe null if we got finished - } - } - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - } - - class RecoveryRunner extends AbstractRunnable { - - final long recoveryId; - - RecoveryRunner(long recoveryId) { - this.recoveryId = recoveryId; - } - - @Override - public void onFailure(Throwable t) { - try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatus(recoveryId)) { - if (statusRef != null) { - logger.error("unexpected error during recovery [{}], failing shard", t, recoveryId); - onGoingRecoveries.failRecovery(recoveryId, - new RecoveryFailedException(statusRef.status().state(), "unexpected error", t), - true // be safe - ); - } else { - logger.debug("unexpected error during recovery, but recovery id [{}] is finished", t, recoveryId); - } - } - } - - @Override - public void doRun() { - RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatus(recoveryId); - if (statusRef == null) { - logger.trace("not running recovery with id [{}] - can't find it (probably finished)", recoveryId); - return; - } + public void fail(RecoveryFailedException e, boolean sendShardFailure) { + if (finished.compareAndSet(false, true)) { try { - doRecovery(statusRef.status()); + listener.onRecoveryFailure(state(), e, sendShardFailure); } finally { - statusRef.close(); + try { + cancellableThreads.cancel("failed recovery [" + ExceptionsHelper.stackTrace(e) + "]"); + } finally { + // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now + decRef(); + } } } } + /** mark the current recovery as done */ + public void markAsDone() { + if (finished.compareAndSet(false, true)) { + assert tempFileNames.isEmpty() : "not all temporary files are renamed"; + try { + // this might still throw an exception ie. if the shard is CLOSED due to some other event. + // it's safer to decrement the reference in a try finally here. + indexShard.postRecovery("peer recovery done"); + } finally { + // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now + decRef(); + } + listener.onRecoveryDone(state()); + } + } + + /** Get a temporary name for the provided file name. */ + public String getTempNameForFile(String origFile) { + return tempFilePrefix + origFile; + } + + public IndexOutput getOpenIndexOutput(String key) { + ensureRefCount(); + return openIndexOutputs.get(key); + } + + /** remove and {@link org.apache.lucene.store.IndexOutput} for a given file. It is the caller's responsibility to close it */ + public IndexOutput removeOpenIndexOutputs(String name) { + ensureRefCount(); + return openIndexOutputs.remove(name); + } + + /** + * Creates an {@link org.apache.lucene.store.IndexOutput} for the given file name. Note that the + * IndexOutput actually point at a temporary file. + *

+ * Note: You can use {@link #getOpenIndexOutput(String)} with the same filename to retrieve the same IndexOutput + * at a later stage + */ + public IndexOutput openAndPutIndexOutput(String fileName, StoreFileMetaData metaData, Store store) throws IOException { + ensureRefCount(); + String tempFileName = getTempNameForFile(fileName); + if (tempFileNames.containsKey(tempFileName)) { + throw new IllegalStateException("output for file [" + fileName + "] has already been created"); + } + // add first, before it's created + tempFileNames.put(tempFileName, fileName); + IndexOutput indexOutput = store.createVerifyingOutput(tempFileName, metaData, IOContext.DEFAULT); + openIndexOutputs.put(fileName, indexOutput); + return indexOutput; + } + + public void resetRecovery() throws IOException { + cleanOpenFiles(); + indexShard().performRecoveryRestart(); + } + + @Override + protected void closeInternal() { + try { + cleanOpenFiles(); + } finally { + // free store. increment happens in constructor + store.decRef(); + indexShard.recoveryStats().decCurrentAsTarget(); + } + } + + protected void cleanOpenFiles() { + // clean open index outputs + Iterator> iterator = openIndexOutputs.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry entry = iterator.next(); + logger.trace("closing IndexOutput file [{}]", entry.getValue()); + try { + entry.getValue().close(); + } catch (Throwable t) { + logger.debug("error while closing recovery output [{}]", t, entry.getValue()); + } + iterator.remove(); + } + // trash temporary files + for (String file : tempFileNames.keySet()) { + logger.trace("cleaning temporary file [{}]", file); + store.deleteQuiet(file); + } + legacyChecksums.clear(); + } + + @Override + public String toString() { + return shardId + " [" + recoveryId + "]"; + } + + private void ensureRefCount() { + if (refCount() <= 0) { + throw new ElasticsearchException("RecoveryStatus is used but it's refcount is 0. Probably a mismatch between incRef/decRef " + + "calls"); + } + } + + /*** Implementation of {@link RecoveryTargetHandler } */ + + @Override + public void prepareForTranslogOperations(int totalTranslogOps) throws IOException { + state().getTranslog().totalOperations(totalTranslogOps); + indexShard().skipTranslogRecovery(); + } + + @Override + public void finalizeRecovery() { + indexShard().finalizeRecovery(); + } + + @Override + public void indexTranslogOperations(List operations, int totalTranslogOps) throws TranslogRecoveryPerformer + .BatchOperationException { + final RecoveryState.Translog translog = state().getTranslog(); + translog.totalOperations(totalTranslogOps); + assert indexShard().recoveryState() == state(); + indexShard().performBatchRecovery(operations); + } + + @Override + public void receiveFileInfo(List phase1FileNames, + List phase1FileSizes, + List phase1ExistingFileNames, + List phase1ExistingFileSizes, + int totalTranslogOps) { + final RecoveryState.Index index = state().getIndex(); + for (int i = 0; i < phase1ExistingFileNames.size(); i++) { + index.addFileDetail(phase1ExistingFileNames.get(i), phase1ExistingFileSizes.get(i), true); + } + for (int i = 0; i < phase1FileNames.size(); i++) { + index.addFileDetail(phase1FileNames.get(i), phase1FileSizes.get(i), false); + } + state().getTranslog().totalOperations(totalTranslogOps); + state().getTranslog().totalOperationsOnStart(totalTranslogOps); + + } + + @Override + public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException { + state().getTranslog().totalOperations(totalTranslogOps); + // first, we go and move files that were created with the recovery id suffix to + // the actual names, its ok if we have a corrupted index here, since we have replicas + // to recover from in case of a full cluster shutdown just when this code executes... + indexShard().deleteShardState(); // we have to delete it first since even if we fail to rename the shard + // might be invalid + renameAllTempFiles(); + final Store store = store(); + // now write checksums + legacyChecksums().write(store); + try { + store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData); + } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { + // this is a fatal exception at this stage. + // this means we transferred files from the remote that have not be checksummed and they are + // broken. We have to clean up this shard entirely, remove all files and bubble it up to the + // source shard since this index might be broken there as well? The Source can handle this and checks + // its content on disk if possible. + try { + try { + store.removeCorruptionMarker(); + } finally { + Lucene.cleanLuceneIndex(store.directory()); // clean up and delete all files + } + } catch (Throwable e) { + logger.debug("Failed to clean lucene index", e); + ex.addSuppressed(e); + } + RecoveryFailedException rfe = new RecoveryFailedException(state(), "failed to clean after recovery", ex); + fail(rfe, true); + throw rfe; + } catch (Exception ex) { + RecoveryFailedException rfe = new RecoveryFailedException(state(), "failed to clean after recovery", ex); + fail(rfe, true); + throw rfe; + } + } + + @Override + public void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesReference content, + boolean lastChunk, int totalTranslogOps) throws IOException { + final Store store = store(); + final String name = fileMetaData.name(); + state().getTranslog().totalOperations(totalTranslogOps); + final RecoveryState.Index indexState = state().getIndex(); + IndexOutput indexOutput; + if (position == 0) { + indexOutput = openAndPutIndexOutput(name, fileMetaData, store); + } else { + indexOutput = getOpenIndexOutput(name); + } + if (content.hasArray() == false) { + content = content.toBytesArray(); + } + indexOutput.writeBytes(content.array(), content.arrayOffset(), content.length()); + indexState.addRecoveredBytesToFile(name, content.length()); + if (indexOutput.getFilePointer() >= fileMetaData.length() || lastChunk) { + try { + Store.verify(indexOutput); + } finally { + // we are done + indexOutput.close(); + } + // write the checksum + legacyChecksums().add(fileMetaData); + final String temporaryFileName = getTempNameForFile(name); + assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName); + store.directory().sync(Collections.singleton(temporaryFileName)); + IndexOutput remove = removeOpenIndexOutputs(name); + assert remove == null || remove == indexOutput; // remove maybe null if we got finished + } + } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java new file mode 100644 index 00000000000..4772e2d0a8b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.indices.recovery; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.store.StoreFileMetaData; +import org.elasticsearch.index.translog.Translog; + +import java.io.IOException; +import java.util.List; + + +public interface RecoveryTargetHandler { + + /** + * Prepares the tranget to receive translog operations, after all file have been copied + * + * @param totalTranslogOps total translog operations expected to be sent + */ + void prepareForTranslogOperations(int totalTranslogOps) throws IOException; + + /** + * The finalize request clears unreferenced translog files, refreshes the engine now that + * new segments are available, and enables garbage collection of + * tombstone files. The shard is also moved to the POST_RECOVERY phase during this time + **/ + void finalizeRecovery(); + + /** + * Index a set of translog operations on the target + * @param operations operations to index + * @param totalTranslogOps current number of total operations expected to be indexed + */ + void indexTranslogOperations(List operations, int totalTranslogOps); + + /** + * Notifies the target of the files it is going to receive + */ + void receiveFileInfo(List phase1FileNames, + List phase1FileSizes, + List phase1ExistingFileNames, + List phase1ExistingFileSizes, + int totalTranslogOps); + + /** + * After all source files has been sent over, this command is sent to the target so it can clean any local + * files that are not part of the source store + * @param totalTranslogOps an update number of translog operations that will be replayed later on + * @param sourceMetaData meta data of the source store + */ + void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException; + + /** writes a partial file chunk to the target store */ + void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesReference content, + boolean lastChunk, int totalTranslogOps) throws IOException; + +} diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java new file mode 100644 index 00000000000..dcbb0c7bedf --- /dev/null +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java @@ -0,0 +1,470 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.recovery; + +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.store.RateLimiter; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.CancellableThreads; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.engine.RecoveryEngineException; +import org.elasticsearch.index.mapper.MapperException; +import org.elasticsearch.index.shard.IllegalIndexShardStateException; +import org.elasticsearch.index.shard.IndexEventListener; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardClosedException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.shard.TranslogRecoveryPerformer; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.FutureTransportResponseHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; + +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; + +/** + * The recovery target handles recoveries of peer shards of the shard+node to recover to. + *

+ * Note, it can be safely assumed that there will only be a single recovery per shard (index+id) and + * not several of them (since we don't allocate several shard replicas to the same node). + */ +public class RecoveryTargetService extends AbstractComponent implements IndexEventListener { + + public static class Actions { + public static final String FILES_INFO = "internal:index/shard/recovery/filesInfo"; + public static final String FILE_CHUNK = "internal:index/shard/recovery/file_chunk"; + public static final String CLEAN_FILES = "internal:index/shard/recovery/clean_files"; + public static final String TRANSLOG_OPS = "internal:index/shard/recovery/translog_ops"; + public static final String PREPARE_TRANSLOG = "internal:index/shard/recovery/prepare_translog"; + public static final String FINALIZE = "internal:index/shard/recovery/finalize"; + } + + private final ThreadPool threadPool; + + private final TransportService transportService; + + private final RecoverySettings recoverySettings; + private final ClusterService clusterService; + + private final RecoveriesCollection onGoingRecoveries; + + @Inject + public RecoveryTargetService(Settings settings, ThreadPool threadPool, TransportService transportService, RecoverySettings + recoverySettings, + ClusterService clusterService) { + super(settings); + this.threadPool = threadPool; + this.transportService = transportService; + this.recoverySettings = recoverySettings; + this.clusterService = clusterService; + this.onGoingRecoveries = new RecoveriesCollection(logger, threadPool); + + transportService.registerRequestHandler(Actions.FILES_INFO, RecoveryFilesInfoRequest::new, ThreadPool.Names.GENERIC, new + FilesInfoRequestHandler()); + transportService.registerRequestHandler(Actions.FILE_CHUNK, RecoveryFileChunkRequest::new, ThreadPool.Names.GENERIC, new + FileChunkTransportRequestHandler()); + transportService.registerRequestHandler(Actions.CLEAN_FILES, RecoveryCleanFilesRequest::new, ThreadPool.Names.GENERIC, new + CleanFilesRequestHandler()); + transportService.registerRequestHandler(Actions.PREPARE_TRANSLOG, RecoveryPrepareForTranslogOperationsRequest::new, ThreadPool + .Names.GENERIC, new PrepareForTranslogOperationsRequestHandler()); + transportService.registerRequestHandler(Actions.TRANSLOG_OPS, RecoveryTranslogOperationsRequest::new, ThreadPool.Names.GENERIC, + new TranslogOperationsRequestHandler()); + transportService.registerRequestHandler(Actions.FINALIZE, RecoveryFinalizeRecoveryRequest::new, ThreadPool.Names.GENERIC, new + FinalizeRecoveryRequestHandler()); + } + + @Override + public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { + if (indexShard != null) { + onGoingRecoveries.cancelRecoveriesForShard(shardId, "shard closed"); + } + } + + /** + * cancel all ongoing recoveries for the given shard, if their status match a predicate + * + * @param reason reason for cancellation + * @param shardId shardId for which to cancel recoveries + * @param shouldCancel a predicate to check if a recovery should be cancelled or not. Null means cancel without an extra check. + * note that the recovery state can change after this check, but before it is being cancelled via other + * already issued outstanding references. + * @return true if a recovery was cancelled + */ + public boolean cancelRecoveriesForShard(ShardId shardId, String reason, @Nullable Predicate shouldCancel) { + return onGoingRecoveries.cancelRecoveriesForShard(shardId, reason, shouldCancel); + } + + public void startRecovery(final IndexShard indexShard, final RecoveryState.Type recoveryType, final DiscoveryNode sourceNode, final + RecoveryListener listener) { + // create a new recovery status, and process... + final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout()); + threadPool.generic().execute(new RecoveryRunner(recoveryId)); + } + + protected void retryRecovery(final RecoveryTarget recoveryTarget, final Throwable reason, TimeValue retryAfter, final + StartRecoveryRequest currentRequest) { + logger.trace("will retry recovery with id [{}] in [{}]", reason, recoveryTarget.recoveryId(), retryAfter); + retryRecovery(recoveryTarget, retryAfter, currentRequest); + } + + protected void retryRecovery(final RecoveryTarget recoveryTarget, final String reason, TimeValue retryAfter, final + StartRecoveryRequest currentRequest) { + logger.trace("will retry recovery with id [{}] in [{}] (reason [{}])", recoveryTarget.recoveryId(), retryAfter, reason); + retryRecovery(recoveryTarget, retryAfter, currentRequest); + } + + private void retryRecovery(final RecoveryTarget recoveryTarget, TimeValue retryAfter, final StartRecoveryRequest currentRequest) { + try { + recoveryTarget.resetRecovery(); + } catch (Throwable e) { + onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(currentRequest, e), true); + } + threadPool.schedule(retryAfter, ThreadPool.Names.GENERIC, new RecoveryRunner(recoveryTarget.recoveryId())); + } + + private void doRecovery(final RecoveryTarget recoveryTarget) { + assert recoveryTarget.sourceNode() != null : "can't do a recovery without a source node"; + + logger.trace("collecting local files for {}", recoveryTarget); + Store.MetadataSnapshot metadataSnapshot = null; + try { + metadataSnapshot = recoveryTarget.store().getMetadataOrEmpty(); + } catch (IOException e) { + logger.warn("error while listing local files, recover as if there are none", e); + metadataSnapshot = Store.MetadataSnapshot.EMPTY; + } catch (Exception e) { + // this will be logged as warning later on... + logger.trace("unexpected error while listing local files, failing recovery", e); + onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), + new RecoveryFailedException(recoveryTarget.state(), "failed to list local files", e), true); + return; + } + final StartRecoveryRequest request = new StartRecoveryRequest(recoveryTarget.shardId(), recoveryTarget.sourceNode(), + clusterService.localNode(), + metadataSnapshot, recoveryTarget.state().getType(), recoveryTarget.recoveryId()); + + final AtomicReference responseHolder = new AtomicReference<>(); + try { + logger.trace("[{}][{}] starting recovery from {}", request.shardId().getIndex().getName(), request.shardId().id(), request + .sourceNode()); + recoveryTarget.indexShard().prepareForIndexRecovery(); + recoveryTarget.CancellableThreads().execute(() -> responseHolder.set( + transportService.submitRequest(request.sourceNode(), RecoverySource.Actions.START_RECOVERY, request, + new FutureTransportResponseHandler() { + @Override + public RecoveryResponse newInstance() { + return new RecoveryResponse(); + } + }).txGet())); + final RecoveryResponse recoveryResponse = responseHolder.get(); + assert responseHolder != null; + final TimeValue recoveryTime = new TimeValue(recoveryTarget.state().getTimer().time()); + // do this through ongoing recoveries to remove it from the collection + onGoingRecoveries.markRecoveryAsDone(recoveryTarget.recoveryId()); + if (logger.isTraceEnabled()) { + StringBuilder sb = new StringBuilder(); + sb.append('[').append(request.shardId().getIndex().getName()).append(']').append('[').append(request.shardId().id()) + .append("] "); + sb.append("recovery completed from ").append(request.sourceNode()).append(", took[").append(recoveryTime).append("]\n"); + sb.append(" phase1: recovered_files [").append(recoveryResponse.phase1FileNames.size()).append("]").append(" with " + + "total_size of [").append(new ByteSizeValue(recoveryResponse.phase1TotalSize)).append("]") + .append(", took [").append(timeValueMillis(recoveryResponse.phase1Time)).append("], throttling_wait [").append + (timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime)).append(']') + .append("\n"); + sb.append(" : reusing_files [").append(recoveryResponse.phase1ExistingFileNames.size()).append("] with " + + "total_size of [").append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize)).append("]\n"); + sb.append(" phase2: start took [").append(timeValueMillis(recoveryResponse.startTime)).append("]\n"); + sb.append(" : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log " + + "operations") + .append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]") + .append("\n"); + logger.trace(sb.toString()); + } else { + logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), recoveryTarget.sourceNode(), recoveryTime); + } + } catch (CancellableThreads.ExecutionCancelledException e) { + logger.trace("recovery cancelled", e); + } catch (Throwable e) { + if (logger.isTraceEnabled()) { + logger.trace("[{}][{}] Got exception on recovery", e, request.shardId().getIndex().getName(), request.shardId().id()); + } + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof CancellableThreads.ExecutionCancelledException) { + // this can also come from the source wrapped in a RemoteTransportException + onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(request, "source has canceled the" + + " recovery", cause), false); + return; + } + if (cause instanceof RecoveryEngineException) { + // unwrap an exception that was thrown as part of the recovery + cause = cause.getCause(); + } + // do it twice, in case we have double transport exception + cause = ExceptionsHelper.unwrapCause(cause); + if (cause instanceof RecoveryEngineException) { + // unwrap an exception that was thrown as part of the recovery + cause = cause.getCause(); + } + + // here, we would add checks against exception that need to be retried (and not removeAndClean in this case) + + if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexNotFoundException || cause instanceof + ShardNotFoundException) { + // if the target is not ready yet, retry + retryRecovery(recoveryTarget, "remote shard not ready", recoverySettings.retryDelayStateSync(), request); + return; + } + + if (cause instanceof DelayRecoveryException) { + retryRecovery(recoveryTarget, cause, recoverySettings.retryDelayStateSync(), request); + return; + } + + if (cause instanceof ConnectTransportException) { + logger.debug("delaying recovery of {} for [{}] due to networking error [{}]", recoveryTarget.shardId(), recoverySettings + .retryDelayNetwork(), cause.getMessage()); + retryRecovery(recoveryTarget, cause.getMessage(), recoverySettings.retryDelayNetwork(), request); + return; + } + + if (cause instanceof IndexShardClosedException) { + onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(request, "source shard is " + + "closed", cause), false); + return; + } + + if (cause instanceof AlreadyClosedException) { + onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(request, "source shard is " + + "closed", cause), false); + return; + } + onGoingRecoveries.failRecovery(recoveryTarget.recoveryId(), new RecoveryFailedException(request, e), true); + } + } + + public interface RecoveryListener { + void onRecoveryDone(RecoveryState state); + + void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure); + } + + class PrepareForTranslogOperationsRequestHandler implements TransportRequestHandler { + + @Override + public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception { + try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() + )) { + recoveryRef.status().prepareForTranslogOperations(request.totalTranslogOps()); + } + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } + } + + class FinalizeRecoveryRequestHandler implements TransportRequestHandler { + + @Override + public void messageReceived(RecoveryFinalizeRecoveryRequest request, TransportChannel channel) throws Exception { + try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() + )) { + recoveryRef.status().finalizeRecovery(); + } + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } + } + + class TranslogOperationsRequestHandler implements TransportRequestHandler { + + @Override + public void messageReceived(final RecoveryTranslogOperationsRequest request, final TransportChannel channel) throws IOException { + try (RecoveriesCollection.RecoveryRef recoveryRef = + onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { + final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); + final RecoveryTarget recoveryTarget = recoveryRef.status(); + try { + recoveryTarget.indexTranslogOperations(request.operations(), request.totalTranslogOps()); + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } catch (TranslogRecoveryPerformer.BatchOperationException exception) { + MapperException mapperException = (MapperException) ExceptionsHelper.unwrap(exception, MapperException.class); + if (mapperException == null) { + throw exception; + } + // in very rare cases a translog replay from primary is processed before a mapping update on this node + // which causes local mapping changes. we want to wait until these mappings are processed. + logger.trace("delaying recovery due to missing mapping changes (rolling back stats for [{}] ops)", exception, exception + .completedOperations()); + // we do not need to use a timeout here since the entire recovery mechanism has an inactivity protection (it will be + // canceled) + observer.waitForNextChange(new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState state) { + try { + messageReceived(request, channel); + } catch (Exception e) { + onFailure(e); + } + } + + protected void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (IOException e1) { + logger.warn("failed to send error back to recovery source", e1); + } + } + + @Override + public void onClusterServiceClose() { + onFailure(new ElasticsearchException("cluster service was closed while waiting for mapping updates")); + } + + @Override + public void onTimeout(TimeValue timeout) { + // note that we do not use a timeout (see comment above) + onFailure(new ElasticsearchTimeoutException("timed out waiting for mapping updates (timeout [" + timeout + + "])")); + } + }); + } + } + } + } + + class FilesInfoRequestHandler implements TransportRequestHandler { + + @Override + public void messageReceived(RecoveryFilesInfoRequest request, TransportChannel channel) throws Exception { + try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() + )) { + recoveryRef.status().receiveFileInfo(request.phase1FileNames, request.phase1FileSizes, request.phase1ExistingFileNames, + request.phase1ExistingFileSizes, request.totalTranslogOps); + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } + } + } + + class CleanFilesRequestHandler implements TransportRequestHandler { + + @Override + public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel) throws Exception { + try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() + )) { + recoveryRef.status().cleanFiles(request.totalTranslogOps(), request.sourceMetaSnapshot()); + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } + } + } + + class FileChunkTransportRequestHandler implements TransportRequestHandler { + + // How many bytes we've copied since we last called RateLimiter.pause + final AtomicLong bytesSinceLastPause = new AtomicLong(); + + @Override + public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel) throws Exception { + try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() + )) { + final RecoveryTarget status = recoveryRef.status(); + final RecoveryState.Index indexState = status.state().getIndex(); + if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) { + indexState.addSourceThrottling(request.sourceThrottleTimeInNanos()); + } + + RateLimiter rateLimiter = recoverySettings.rateLimiter(); + if (rateLimiter != null) { + long bytes = bytesSinceLastPause.addAndGet(request.content().length()); + if (bytes > rateLimiter.getMinPauseCheckBytes()) { + // Time to pause + bytesSinceLastPause.addAndGet(-bytes); + long throttleTimeInNanos = rateLimiter.pause(bytes); + indexState.addTargetThrottling(throttleTimeInNanos); + status.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos); + } + } + + status.writeFileChunk(request.metadata(), request.position(), request.content(), + request.lastChunk(), request.totalTranslogOps() + ); + } + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } + } + + class RecoveryRunner extends AbstractRunnable { + + final long recoveryId; + + RecoveryRunner(long recoveryId) { + this.recoveryId = recoveryId; + } + + @Override + public void onFailure(Throwable t) { + try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) { + if (recoveryRef != null) { + logger.error("unexpected error during recovery [{}], failing shard", t, recoveryId); + onGoingRecoveries.failRecovery(recoveryId, + new RecoveryFailedException(recoveryRef.status().state(), "unexpected error", t), + true // be safe + ); + } else { + logger.debug("unexpected error during recovery, but recovery id [{}] is finished", t, recoveryId); + } + } + } + + @Override + public void doRun() { + RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId); + if (recoveryRef == null) { + logger.trace("not running recovery with id [{}] - can't find it (probably finished)", recoveryId); + return; + } + try { + doRecovery(recoveryRef.status()); + } finally { + recoveryRef.close(); + } + } + } + +} diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java new file mode 100644 index 00000000000..edc6c520be0 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.indices.recovery; + +import org.apache.lucene.store.RateLimiter; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.store.StoreFileMetaData; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.transport.EmptyTransportResponseHandler; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; + +public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { + private final TransportService transportService; + private final long recoveryId; + private final ShardId shardId; + private final DiscoveryNode targetNode; + private final RecoverySettings recoverySettings; + + private final TransportRequestOptions translogOpsRequestOptions; + private final TransportRequestOptions fileChunkRequestOptions; + + private final AtomicLong bytesSinceLastPause = new AtomicLong(); + + private final Consumer onSourceThrottle; + + public RemoteRecoveryTargetHandler(long recoveryId, ShardId shardId, TransportService transportService, DiscoveryNode targetNode, + RecoverySettings recoverySettings, Consumer onSourceThrottle) { + this.transportService = transportService; + + + this.recoveryId = recoveryId; + this.shardId = shardId; + this.targetNode = targetNode; + this.recoverySettings = recoverySettings; + this.onSourceThrottle = onSourceThrottle; + this.translogOpsRequestOptions = TransportRequestOptions.builder() + .withCompress(true) + .withType(TransportRequestOptions.Type.RECOVERY) + .withTimeout(recoverySettings.internalActionLongTimeout()) + .build(); + this.fileChunkRequestOptions = TransportRequestOptions.builder() + .withCompress(false) // lucene files are already compressed and therefore compressing this won't really help much so + // we are saving the cpu for other things + .withType(TransportRequestOptions.Type.RECOVERY) + .withTimeout(recoverySettings.internalActionTimeout()) + .build(); + + } + + @Override + public void prepareForTranslogOperations(int totalTranslogOps) throws IOException { + transportService.submitRequest(targetNode, RecoveryTargetService.Actions.PREPARE_TRANSLOG, + new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps), + TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), + EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); + } + + @Override + public void finalizeRecovery() { + transportService.submitRequest(targetNode, RecoveryTargetService.Actions.FINALIZE, + new RecoveryFinalizeRecoveryRequest(recoveryId, shardId), + TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(), + EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); + } + + @Override + public void indexTranslogOperations(List operations, int totalTranslogOps) { + final RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest( + recoveryId, shardId, operations, totalTranslogOps); + transportService.submitRequest(targetNode, RecoveryTargetService.Actions.TRANSLOG_OPS, translogOperationsRequest, + translogOpsRequestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); + } + + @Override + public void receiveFileInfo(List phase1FileNames, List phase1FileSizes, List phase1ExistingFileNames, + List phase1ExistingFileSizes, int totalTranslogOps) { + + RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(recoveryId, shardId, + phase1FileNames, phase1FileSizes, phase1ExistingFileNames, phase1ExistingFileSizes, totalTranslogOps); + transportService.submitRequest(targetNode, RecoveryTargetService.Actions.FILES_INFO, recoveryInfoFilesRequest, + TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), + EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); + + } + + @Override + public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException { + transportService.submitRequest(targetNode, RecoveryTargetService.Actions.CLEAN_FILES, + new RecoveryCleanFilesRequest(recoveryId, shardId, sourceMetaData, totalTranslogOps), + TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), + EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); + } + + @Override + public void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesReference content, boolean + lastChunk, int totalTranslogOps) throws IOException { + // Pause using the rate limiter, if desired, to throttle the recovery + final long throttleTimeInNanos; + // always fetch the ratelimiter - it might be updated in real-time on the recovery settings + final RateLimiter rl = recoverySettings.rateLimiter(); + if (rl != null) { + long bytes = bytesSinceLastPause.addAndGet(content.length()); + if (bytes > rl.getMinPauseCheckBytes()) { + // Time to pause + bytesSinceLastPause.addAndGet(-bytes); + try { + throttleTimeInNanos = rl.pause(bytes); + onSourceThrottle.accept(throttleTimeInNanos); + } catch (IOException e) { + throw new ElasticsearchException("failed to pause recovery", e); + } + } else { + throttleTimeInNanos = 0; + } + } else { + throttleTimeInNanos = 0; + } + + transportService.submitRequest(targetNode, RecoveryTargetService.Actions.FILE_CHUNK, + new RecoveryFileChunkRequest(recoveryId, shardId, fileMetaData, position, content, lastChunk, + totalTranslogOps, + /* we send totalOperations with every request since we collect stats on the target and that way we can + * see how many translog ops we accumulate while copying files across the network. A future optimization + * would be in to restart file copy again (new deltas) if we have too many translog ops are piling up. + */ + throttleTimeInNanos), fileChunkRequestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); + } +} diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java index 8d75c474791..868b5dba9b9 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java @@ -22,7 +22,6 @@ package org.elasticsearch.indices.recovery; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -35,15 +34,16 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler { private final IndexShard shard; private final StartRecoveryRequest request; - public SharedFSRecoverySourceHandler(IndexShard shard, StartRecoveryRequest request, RecoverySettings recoverySettings, TransportService transportService, ESLogger logger) { - super(shard, request, recoverySettings, transportService, logger); + public SharedFSRecoverySourceHandler(IndexShard shard, RecoveryTargetHandler recoveryTarget, StartRecoveryRequest request, ESLogger + logger) { + super(shard, recoveryTarget, request, -1, logger); this.shard = shard; this.request = request; } @Override - public RecoveryResponse recoverToTarget() { - boolean engineClosed = false; + public RecoveryResponse recoverToTarget() throws IOException { + boolean engineClosed = false; try { logger.trace("{} recovery [phase1] to {}: skipping phase 1 for shared filesystem", request.shardId(), request.targetNode()); if (isPrimaryRelocation()) { @@ -83,5 +83,4 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler { shard.shardId(), request.targetNode()); return 0; } - } diff --git a/core/src/test/java/org/elasticsearch/common/util/CancellableThreadsTests.java b/core/src/test/java/org/elasticsearch/common/util/CancellableThreadsTests.java index 5c6a93254aa..a89cb48c37a 100644 --- a/core/src/test/java/org/elasticsearch/common/util/CancellableThreadsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/CancellableThreadsTests.java @@ -18,10 +18,12 @@ */ package org.elasticsearch.common.util; +import org.elasticsearch.common.util.CancellableThreads.IOInterruptable; import org.elasticsearch.common.util.CancellableThreads.Interruptable; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; +import java.io.IOException; import java.util.concurrent.CountDownLatch; public class CancellableThreadsTests extends ESTestCase { @@ -31,6 +33,13 @@ public class CancellableThreadsTests extends ESTestCase { } } + public static class IOCustomException extends IOException { + public IOCustomException(String msg) { + super(msg); + } + } + + private class TestPlan { public final int id; public final boolean busySpin; @@ -38,6 +47,8 @@ public class CancellableThreadsTests extends ESTestCase { public final boolean exitBeforeCancel; public final boolean exceptAfterCancel; public final boolean presetInterrupt; + public final boolean ioOp; + private final boolean ioException; private TestPlan(int id) { this.id = id; @@ -46,9 +57,77 @@ public class CancellableThreadsTests extends ESTestCase { this.exitBeforeCancel = randomBoolean(); this.exceptAfterCancel = randomBoolean(); this.presetInterrupt = randomBoolean(); + this.ioOp = randomBoolean(); + this.ioException = ioOp && randomBoolean(); } } + static class TestRunnable implements Interruptable { + final TestPlan plan; + final CountDownLatch readyForCancel; + + TestRunnable(TestPlan plan, CountDownLatch readyForCancel) { + this.plan = plan; + this.readyForCancel = readyForCancel; + } + + @Override + public void run() throws InterruptedException { + assertFalse("interrupt thread should have been clear", Thread.currentThread().isInterrupted()); + if (plan.exceptBeforeCancel) { + throw new CustomException("thread [" + plan.id + "] pre-cancel exception"); + } else if (plan.exitBeforeCancel) { + return; + } + readyForCancel.countDown(); + try { + if (plan.busySpin) { + while (!Thread.currentThread().isInterrupted()) { + } + } else { + Thread.sleep(50000); + } + } finally { + if (plan.exceptAfterCancel) { + throw new CustomException("thread [" + plan.id + "] post-cancel exception"); + } + } + } + } + + static class TestIORunnable implements IOInterruptable { + final TestPlan plan; + final CountDownLatch readyForCancel; + + TestIORunnable(TestPlan plan, CountDownLatch readyForCancel) { + this.plan = plan; + this.readyForCancel = readyForCancel; + } + + @Override + public void run() throws IOException, InterruptedException { + assertFalse("interrupt thread should have been clear", Thread.currentThread().isInterrupted()); + if (plan.exceptBeforeCancel) { + throw new IOCustomException("thread [" + plan.id + "] pre-cancel exception"); + } else if (plan.exitBeforeCancel) { + return; + } + readyForCancel.countDown(); + try { + if (plan.busySpin) { + while (!Thread.currentThread().isInterrupted()) { + } + } else { + Thread.sleep(50000); + } + } finally { + if (plan.exceptAfterCancel) { + throw new IOCustomException("thread [" + plan.id + "] post-cancel exception"); + } + } + + } + } public void testCancellableThreads() throws InterruptedException { Thread[] threads = new Thread[randomIntBetween(3, 10)]; @@ -60,47 +139,28 @@ public class CancellableThreadsTests extends ESTestCase { for (int i = 0; i < threads.length; i++) { final TestPlan plan = new TestPlan(i); plans[i] = plan; - threads[i] = new Thread(new Runnable() { - @Override - public void run() { - try { - if (plan.presetInterrupt) { - Thread.currentThread().interrupt(); + threads[i] = new Thread(() -> { + try { + if (plan.presetInterrupt) { + Thread.currentThread().interrupt(); + } + if (plan.ioOp) { + if (plan.ioException) { + cancellableThreads.executeIO(new TestIORunnable(plan, readyForCancel)); + } else { + cancellableThreads.executeIO(new TestRunnable(plan, readyForCancel)); } - cancellableThreads.execute(new Interruptable() { - @Override - public void run() throws InterruptedException { - assertFalse("interrupt thread should have been clear", Thread.currentThread().isInterrupted()); - if (plan.exceptBeforeCancel) { - throw new CustomException("thread [" + plan.id + "] pre-cancel exception"); - } else if (plan.exitBeforeCancel) { - return; - } - readyForCancel.countDown(); - try { - if (plan.busySpin) { - while (!Thread.currentThread().isInterrupted()) { - } - } else { - Thread.sleep(50000); - } - } finally { - if (plan.exceptAfterCancel) { - throw new CustomException("thread [" + plan.id + "] post-cancel exception"); - } - } - } - }); - } catch (Throwable t) { - throwables[plan.id] = t; + } else { + cancellableThreads.execute(new TestRunnable(plan, readyForCancel)); } - if (plan.exceptBeforeCancel || plan.exitBeforeCancel) { - // we have to mark we're ready now (actually done). - readyForCancel.countDown(); - } - interrupted[plan.id] = Thread.currentThread().isInterrupted(); - + } catch (Throwable t) { + throwables[plan.id] = t; } + if (plan.exceptBeforeCancel || plan.exitBeforeCancel) { + // we have to mark we're ready now (actually done). + readyForCancel.countDown(); + } + interrupted[plan.id] = Thread.currentThread().isInterrupted(); }); threads[i].setDaemon(true); threads[i].start(); @@ -114,8 +174,9 @@ public class CancellableThreadsTests extends ESTestCase { } for (int i = 0; i < threads.length; i++) { TestPlan plan = plans[i]; + final Class exceptionClass = plan.ioException ? IOCustomException.class : CustomException.class; if (plan.exceptBeforeCancel) { - assertThat(throwables[i], Matchers.instanceOf(CustomException.class)); + assertThat(throwables[i], Matchers.instanceOf(exceptionClass)); } else if (plan.exitBeforeCancel) { assertNull(throwables[i]); } else { @@ -124,7 +185,7 @@ public class CancellableThreadsTests extends ESTestCase { if (plan.exceptAfterCancel) { assertThat(throwables[i].getSuppressed(), Matchers.arrayContaining( - Matchers.instanceOf(CustomException.class) + Matchers.instanceOf(exceptionClass) )); } else { assertThat(throwables[i].getSuppressed(), Matchers.emptyArray()); diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java index d04f772ddba..a052b1d898c 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java +++ b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java @@ -41,7 +41,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShadowIndexShard; import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.indices.recovery.RecoveryTargetService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortOrder; @@ -485,7 +485,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { - if (keepFailing.get() && action.equals(RecoveryTarget.Actions.TRANSLOG_OPS)) { + if (keepFailing.get() && action.equals(RecoveryTargetService.Actions.TRANSLOG_OPS)) { logger.info("--> failing translog ops"); throw new ElasticsearchException("failing on purpose"); } diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 79f8a0cc666..4031aa5da25 100644 --- a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -50,13 +50,13 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; -import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; -import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.indices.recovery.RecoveryTargetService; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -343,7 +343,7 @@ public class CorruptedFileIT extends ESIntegTestCase { @Override public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { - if (corrupt.get() && action.equals(RecoveryTarget.Actions.FILE_CHUNK)) { + if (corrupt.get() && action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; byte[] array = req.content().array(); int i = randomIntBetween(0, req.content().length() - 1); @@ -415,7 +415,7 @@ public class CorruptedFileIT extends ESIntegTestCase { @Override public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { - if (action.equals(RecoveryTarget.Actions.FILE_CHUNK)) { + if (action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; if (truncate && req.length() > 1) { BytesArray array = new BytesArray(req.content().array(), req.content().arrayOffset(), (int) req.length() - 1); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index cc11cb82057..2c51f3c9de4 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -38,7 +38,6 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.discovery.DiscoveryService; import org.elasticsearch.index.recovery.RecoveryStats; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState.Stage; @@ -568,12 +567,12 @@ public class IndexRecoveryIT extends ESIntegTestCase { String[] recoveryActions = new String[]{ RecoverySource.Actions.START_RECOVERY, - RecoveryTarget.Actions.FILES_INFO, - RecoveryTarget.Actions.FILE_CHUNK, - RecoveryTarget.Actions.CLEAN_FILES, + RecoveryTargetService.Actions.FILES_INFO, + RecoveryTargetService.Actions.FILE_CHUNK, + RecoveryTargetService.Actions.CLEAN_FILES, //RecoveryTarget.Actions.TRANSLOG_OPS, <-- may not be sent if already flushed - RecoveryTarget.Actions.PREPARE_TRANSLOG, - RecoveryTarget.Actions.FINALIZE + RecoveryTargetService.Actions.PREPARE_TRANSLOG, + RecoveryTargetService.Actions.FINALIZE }; final String recoveryActionToBlock = randomFrom(recoveryActions); final boolean dropRequests = randomBoolean(); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index b29404d59b6..b5f744ddc23 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -39,7 +39,6 @@ import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.DirectoryService; @@ -71,7 +70,8 @@ public class RecoverySourceHandlerTests extends ESTestCase { new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT), null, RecoveryState.Type.STORE, randomLong()); Store store = newStore(createTempDir()); - RecoverySourceHandler handler = new RecoverySourceHandler(null, request, recoverySettings, null, logger); + RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, recoverySettings.getChunkSize().bytesAsInt(), + logger); Directory dir = store.directory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig()); int numDocs = randomIntBetween(10, 100); @@ -122,7 +122,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { Path tempDir = createTempDir(); Store store = newStore(tempDir, false); AtomicBoolean failedEngine = new AtomicBoolean(false); - RecoverySourceHandler handler = new RecoverySourceHandler(null, request, recoverySettings, null, logger) { + RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, recoverySettings.getChunkSize().bytesAsInt(), logger) { @Override protected void failEngine(IOException cause) { assertFalse(failedEngine.get()); @@ -185,7 +185,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { Path tempDir = createTempDir(); Store store = newStore(tempDir, false); AtomicBoolean failedEngine = new AtomicBoolean(false); - RecoverySourceHandler handler = new RecoverySourceHandler(null, request, recoverySettings, null, logger) { + RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, recoverySettings.getChunkSize().bytesAsInt(), logger) { @Override protected void failEngine(IOException cause) { assertFalse(failedEngine.get()); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java index edb0f7b6a78..a4ffef7b9eb 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java @@ -41,7 +41,7 @@ public class RecoveryStatusTests extends ESSingleNodeTestCase { IndexShard indexShard = service.getShardOrNull(0); DiscoveryNode node = new DiscoveryNode("foo", new LocalTransportAddress("bar"), Version.CURRENT); - RecoveryStatus status = new RecoveryStatus(indexShard, node, new RecoveryTarget.RecoveryListener() { + RecoveryTarget status = new RecoveryTarget(indexShard, node, new RecoveryTargetService.RecoveryListener() { @Override public void onRecoveryDone(RecoveryState state) { } diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java similarity index 97% rename from core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java rename to core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java index f81d9792187..7a516d5e36c 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java @@ -52,7 +52,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; -public class RecoveryStateTests extends ESTestCase { +public class RecoveryTargetTests extends ESTestCase { abstract class Streamer extends Thread { private T lastRead; final private AtomicBoolean shouldStop; @@ -329,8 +329,10 @@ public class RecoveryStateTests extends ESTestCase { assertThat((double) index.recoveredFilesPercent(), equalTo(100.0)); assertThat((double) index.recoveredBytesPercent(), equalTo(100.0)); } else { - assertThat((double) index.recoveredFilesPercent(), closeTo(100.0 * index.recoveredFileCount() / index.totalRecoverFiles(), 0.1)); - assertThat((double) index.recoveredBytesPercent(), closeTo(100.0 * index.recoveredBytes() / index.totalRecoverBytes(), 0.1)); + assertThat((double) index.recoveredFilesPercent(), + closeTo(100.0 * index.recoveredFileCount() / index.totalRecoverFiles(), 0.1)); + assertThat((double) index.recoveredBytesPercent(), + closeTo(100.0 * index.recoveredBytes() / index.totalRecoverBytes(), 0.1)); } } @@ -346,7 +348,8 @@ public class RecoveryStateTests extends ESTestCase { stages[i] = stages[j]; stages[j] = t; try { - RecoveryState state = new RecoveryState(new ShardId("bla", "_na_", 0), randomBoolean(), randomFrom(Type.values()), discoveryNode, discoveryNode); + RecoveryState state = new RecoveryState( + new ShardId("bla", "_na_", 0), randomBoolean(), randomFrom(Type.values()), discoveryNode, discoveryNode); for (Stage stage : stages) { state.setStage(stage); } @@ -360,7 +363,8 @@ public class RecoveryStateTests extends ESTestCase { i = randomIntBetween(1, stages.length - 1); ArrayList list = new ArrayList<>(Arrays.asList(Arrays.copyOfRange(stages, 0, i))); list.addAll(Arrays.asList(stages)); - RecoveryState state = new RecoveryState(new ShardId("bla", "_na_", 0), randomBoolean(), randomFrom(Type.values()), discoveryNode, discoveryNode); + RecoveryState state = new RecoveryState(new ShardId("bla", "_na_", 0), randomBoolean(), randomFrom(Type.values()), discoveryNode, + discoveryNode); for (Stage stage : list) { state.setStage(stage); } @@ -532,7 +536,7 @@ public class RecoveryStateTests extends ESTestCase { if (f.equals(anotherFile)) { assertEquals(f.hashCode(), anotherFile.hashCode()); } else if (f.hashCode() != anotherFile.hashCode()) { - assertFalse(f.equals(anotherFile)); + assertFalse(f.equals(anotherFile)); } } } diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java index 4cad0b2bf05..a47217e3048 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java +++ b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java @@ -30,8 +30,8 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveriesCollection; import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.indices.recovery.RecoveryStatus; import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.indices.recovery.RecoveryTargetService; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -45,7 +45,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; public class RecoveriesCollectionTests extends ESSingleNodeTestCase { - final static RecoveryTarget.RecoveryListener listener = new RecoveryTarget.RecoveryListener() { + final static RecoveryTargetService.RecoveryListener listener = new RecoveryTargetService.RecoveryListener() { @Override public void onRecoveryDone(RecoveryState state) { @@ -61,12 +61,12 @@ public class RecoveriesCollectionTests extends ESSingleNodeTestCase { createIndex(); final RecoveriesCollection collection = new RecoveriesCollection(logger, getInstanceFromNode(ThreadPool.class)); final long recoveryId = startRecovery(collection); - try (RecoveriesCollection.StatusRef status = collection.getStatus(recoveryId)) { + try (RecoveriesCollection.RecoveryRef status = collection.getRecovery(recoveryId)) { final long lastSeenTime = status.status().lastAccessTime(); assertBusy(new Runnable() { @Override public void run() { - try (RecoveriesCollection.StatusRef currentStatus = collection.getStatus(recoveryId)) { + try (RecoveriesCollection.RecoveryRef currentStatus = collection.getRecovery(recoveryId)) { assertThat("access time failed to update", lastSeenTime, lessThan(currentStatus.status().lastAccessTime())); } } @@ -81,7 +81,7 @@ public class RecoveriesCollectionTests extends ESSingleNodeTestCase { final RecoveriesCollection collection = new RecoveriesCollection(logger, getInstanceFromNode(ThreadPool.class)); final AtomicBoolean failed = new AtomicBoolean(); final CountDownLatch latch = new CountDownLatch(1); - final long recoveryId = startRecovery(collection, new RecoveryTarget.RecoveryListener() { + final long recoveryId = startRecovery(collection, new RecoveryTargetService.RecoveryListener() { @Override public void onRecoveryDone(RecoveryState state) { latch.countDown(); @@ -107,8 +107,8 @@ public class RecoveriesCollectionTests extends ESSingleNodeTestCase { final RecoveriesCollection collection = new RecoveriesCollection(logger, getInstanceFromNode(ThreadPool.class)); final long recoveryId = startRecovery(collection); final long recoveryId2 = startRecovery(collection); - try (RecoveriesCollection.StatusRef statusRef = collection.getStatus(recoveryId)) { - ShardId shardId = statusRef.status().shardId(); + try (RecoveriesCollection.RecoveryRef recoveryRef = collection.getRecovery(recoveryId)) { + ShardId shardId = recoveryRef.status().shardId(); assertTrue("failed to cancel recoveries", collection.cancelRecoveriesForShard(shardId, "test")); assertThat("all recoveries should be cancelled", collection.size(), equalTo(0)); } finally { @@ -124,19 +124,19 @@ public class RecoveriesCollectionTests extends ESSingleNodeTestCase { final long recoveryId2 = startRecovery(collection); final ArrayList toClose = new ArrayList<>(); try { - RecoveriesCollection.StatusRef statusRef = collection.getStatus(recoveryId); - toClose.add(statusRef); - ShardId shardId = statusRef.status().shardId(); + RecoveriesCollection.RecoveryRef recoveryRef = collection.getRecovery(recoveryId); + toClose.add(recoveryRef); + ShardId shardId = recoveryRef.status().shardId(); assertFalse("should not have cancelled recoveries", collection.cancelRecoveriesForShard(shardId, "test", status -> false)); - final Predicate shouldCancel = status -> status.recoveryId() == recoveryId; + final Predicate shouldCancel = status -> status.recoveryId() == recoveryId; assertTrue("failed to cancel recoveries", collection.cancelRecoveriesForShard(shardId, "test", shouldCancel)); assertThat("we should still have on recovery", collection.size(), equalTo(1)); - statusRef = collection.getStatus(recoveryId); - toClose.add(statusRef); - assertNull("recovery should have been deleted", statusRef); - statusRef = collection.getStatus(recoveryId2); - toClose.add(statusRef); - assertNotNull("recovery should NOT have been deleted", statusRef); + recoveryRef = collection.getRecovery(recoveryId); + toClose.add(recoveryRef); + assertNull("recovery should have been deleted", recoveryRef); + recoveryRef = collection.getRecovery(recoveryId2); + toClose.add(recoveryRef); + assertNotNull("recovery should NOT have been deleted", recoveryRef); } finally { // TODO: do we want a lucene IOUtils version of this? @@ -163,7 +163,7 @@ public class RecoveriesCollectionTests extends ESSingleNodeTestCase { return startRecovery(collection, listener, TimeValue.timeValueMinutes(60)); } - long startRecovery(RecoveriesCollection collection, RecoveryTarget.RecoveryListener listener, TimeValue timeValue) { + long startRecovery(RecoveriesCollection collection, RecoveryTargetService.RecoveryListener listener, TimeValue timeValue) { IndicesService indexServices = getInstanceFromNode(IndicesService.class); IndexShard indexShard = indexServices.indexServiceSafe("test").getShardOrNull(0); final DiscoveryNode sourceNode = new DiscoveryNode("id", DummyTransportAddress.INSTANCE, Version.CURRENT); diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java index fac65cc8dca..1fd44959a59 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -44,7 +44,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; -import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.indices.recovery.RecoveryTargetService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -440,7 +440,7 @@ public class RelocationIT extends ESIntegTestCase { @Override public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { - if (action.equals(RecoveryTarget.Actions.FILE_CHUNK)) { + if (action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest chunkRequest = (RecoveryFileChunkRequest) request; if (chunkRequest.name().startsWith(IndexFileNames.SEGMENTS)) { // corrupting the segments_N files in order to make sure future recovery re-send files diff --git a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index bfaf961ee21..0b5c4a6bebf 100644 --- a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -32,7 +32,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.recovery.IndexRecoveryIT; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.indices.recovery.RecoveryTargetService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -121,7 +121,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { @Override public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { - if (action.equals(RecoveryTarget.Actions.FILE_CHUNK)) { + if (action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; logger.debug("file chunk [" + req.toString() + "] lastChunk: " + req.lastChunk()); if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) { From 15a9da4d8406ee9a3d97766eafd91bf721235439 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 11 Feb 2016 17:21:21 -0500 Subject: [PATCH 21/22] Remove accidental println --- build.gradle | 1 - 1 file changed, 1 deletion(-) diff --git a/build.gradle b/build.gradle index eb1ee4e4f8c..b419bf01e15 100644 --- a/build.gradle +++ b/build.gradle @@ -225,7 +225,6 @@ allprojects { apply plugin: 'eclipse' // Name all the non-root projects after their path so that paths get grouped together when imported into eclipse. if (path != ':') { - System.err.println(eclipse.project.name + ' ' + path) eclipse.project.name = path } From 691c5c49ed8086fad0389fbf64077442c6d5ef86 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 12 Feb 2016 09:25:24 +0100 Subject: [PATCH 22/22] Fix registerSettingsFilterIfMissing and add unittest for settings filter registration --- .../common/settings/SettingsModule.java | 2 +- .../common/settings/SettingsModuleTests.java | 22 +++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 84eba6ba7b4..b06f53459c8 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -102,7 +102,7 @@ public class SettingsModule extends AbstractModule { } public void registerSettingsFilterIfMissing(String filter) { - if (settingsFilterPattern.contains(filter)) { + if (settingsFilterPattern.contains(filter) == false) { registerSettingsFilter(filter); } } diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java index 67ecb78a7cf..4f790c2d3a9 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java @@ -127,4 +127,26 @@ public class SettingsModuleTests extends ModuleTestCase { } } + + public void testRegisterSettingsFilter() { + Settings settings = Settings.builder().put("foo.bar", "false").put("bar.foo", false).put("bar.baz", false).build(); + SettingsModule module = new SettingsModule(settings); + module.registerSetting(Setting.boolSetting("foo.bar", true, false, Setting.Scope.CLUSTER)); + module.registerSetting(Setting.boolSetting("bar.foo", true, false, Setting.Scope.CLUSTER)); + module.registerSetting(Setting.boolSetting("bar.baz", true, false, Setting.Scope.CLUSTER)); + + module.registerSettingsFilter("foo.*"); + module.registerSettingsFilterIfMissing("bar.foo"); + try { + module.registerSettingsFilter("bar.foo"); + fail(); + } catch (IllegalArgumentException ex) { + assertEquals("filter [bar.foo] has already been registered", ex.getMessage()); + } + assertInstanceBinding(module, Settings.class, (s) -> s == settings); + assertInstanceBinding(module, SettingsFilter.class, (s) -> s.filter(settings).getAsMap().size() == 1); + assertInstanceBinding(module, SettingsFilter.class, (s) -> s.filter(settings).getAsMap().containsKey("bar.baz")); + assertInstanceBinding(module, SettingsFilter.class, (s) -> s.filter(settings).getAsMap().get("bar.baz").equals("false")); + + } }