diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a0655b4b849..6bbb655a18b 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -111,19 +111,18 @@ then `File->New Project From Existing Sources`. Point to the root of
the source directory, select
`Import project from external model->Gradle`, enable
`Use auto-import`. In order to run tests directly from
-IDEA 2017.2 and above it is required to disable IDEA run launcher to avoid
-finding yourself in "jar hell", which can be achieved by adding the
+IDEA 2017.2 and above, it is required to disable the IDEA run launcher in order to avoid
+`idea_rt.jar` causing "jar hell". This can be achieved by adding the
`-Didea.no.launcher=true` [JVM
-option](https://intellij-support.jetbrains.com/hc/en-us/articles/206544869-Configuring-JVM-options-and-platform-properties)
-or by adding `idea.no.launcher=true` to the
+option](https://intellij-support.jetbrains.com/hc/en-us/articles/206544869-Configuring-JVM-options-and-platform-properties).
+Alternatively, `idea.no.launcher=true` can be set in the
[`idea.properties`](https://www.jetbrains.com/help/idea/file-idea-properties.html)
-file which can be accessed under Help > Edit Custom Properties within IDEA. You
-may also need to [remove `ant-javafx.jar` from your
+file which can be accessed under Help > Edit Custom Properties (this will require a
+restart of IDEA). For IDEA 2017.3 and above, in addition to the JVM option, you will need to go to
+`Run->Edit Configurations...` and change the value for the `Shorten command line` setting from
+`user-local default: none` to `classpath file`. You may also need to [remove `ant-javafx.jar` from your
classpath](https://github.com/elastic/elasticsearch/issues/14348) if that is
-reported as a source of jar hell. Additionally, in order to run tests directly
-from IDEA 2017.3 and above, go to `Run->Edit Configurations...` and change the
-value for the `Shorten command line` setting from `user-local default: none` to
-`classpath file`.
+reported as a source of jar hell.
The Elasticsearch codebase makes heavy use of Java `assert`s and the
test runner requires that assertions be enabled within the JVM. This
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java
index 2cc1d4849d5..c8767318399 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java
@@ -21,6 +21,8 @@ package org.elasticsearch.client;
import org.apache.http.Header;
import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
@@ -29,13 +31,13 @@ import java.util.Collections;
/**
* A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Indices API.
- *
+ *
* See Indices API on elastic.co
*/
public final class IndicesClient {
private final RestHighLevelClient restHighLevelClient;
- public IndicesClient(RestHighLevelClient restHighLevelClient) {
+ IndicesClient(RestHighLevelClient restHighLevelClient) {
this.restHighLevelClient = restHighLevelClient;
}
@@ -56,8 +58,32 @@ public final class IndicesClient {
* See
* Delete Index API on elastic.co
*/
- public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, Header... headers) {
+ public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener,
+ Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent,
listener, Collections.emptySet(), headers);
}
+
+ /**
+ * Creates an index using the Create Index API
+ *
+ * See
+ * Create Index API on elastic.co
+ */
+ public CreateIndexResponse createIndex(CreateIndexRequest createIndexRequest, Header... headers) throws IOException {
+ return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent,
+ Collections.emptySet(), headers);
+ }
+
+ /**
+ * Asynchronously creates an index using the Create Index API
+ *
+ * See
+ * Create Index API on elastic.co
+ */
+ public void createIndexAsync(CreateIndexRequest createIndexRequest, ActionListener listener,
+ Header... headers) {
+ restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent,
+ listener, Collections.emptySet(), headers);
+ }
}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java
index e2a6dcac20b..a3544ddb89b 100755
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java
@@ -29,12 +29,14 @@ import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.DocWriteRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.ClearScrollRequest;
+import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.action.support.ActiveShardCount;
@@ -49,6 +51,7 @@ import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
@@ -135,6 +138,19 @@ public final class Request {
return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null);
}
+ static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException {
+ String endpoint = endpoint(createIndexRequest.indices(), Strings.EMPTY_ARRAY, "");
+
+ Params parameters = Params.builder();
+ parameters.withTimeout(createIndexRequest.timeout());
+ parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout());
+ parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards());
+ parameters.withUpdateAllTypes(createIndexRequest.updateAllTypes());
+
+ HttpEntity entity = createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE);
+ return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity);
+ }
+
static Request info() {
return new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
}
@@ -381,6 +397,18 @@ public final class Request {
return new Request("DELETE", "/_search/scroll", Collections.emptyMap(), entity);
}
+ static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException {
+ Params params = Params.builder();
+ params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true");
+ if (multiSearchRequest.maxConcurrentSearchRequests() != MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) {
+ params.putParam("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests()));
+ }
+ XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent();
+ byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent);
+ HttpEntity entity = new ByteArrayEntity(source, createContentType(xContent.type()));
+ return new Request("GET", "/_msearch", params.getParams(), entity);
+ }
+
private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef();
return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType));
@@ -520,6 +548,13 @@ public final class Request {
return putParam("timeout", timeout);
}
+ Params withUpdateAllTypes(boolean updateAllTypes) {
+ if (updateAllTypes) {
+ return putParam("update_all_types", Boolean.TRUE.toString());
+ }
+ return this;
+ }
+
Params withVersion(long version) {
if (version != Versions.MATCH_ANY) {
return putParam("version", Long.toString(version));
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
index 2ebaf2cf342..29ab7f90ff5 100755
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
@@ -38,6 +38,8 @@ import org.elasticsearch.action.main.MainRequest;
import org.elasticsearch.action.main.MainResponse;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.ClearScrollResponse;
+import org.elasticsearch.action.search.MultiSearchRequest;
+import org.elasticsearch.action.search.MultiSearchResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchScrollRequest;
@@ -377,6 +379,28 @@ public class RestHighLevelClient implements Closeable {
performRequestAsyncAndParseEntity(searchRequest, Request::search, SearchResponse::fromXContent, listener, emptySet(), headers);
}
+ /**
+ * Executes a multi search using the msearch API
+ *
+ * See Multi search API on
+ * elastic.co
+ */
+ public final MultiSearchResponse multiSearch(MultiSearchRequest multiSearchRequest, Header... headers) throws IOException {
+ return performRequestAndParseEntity(multiSearchRequest, Request::multiSearch, MultiSearchResponse::fromXContext,
+ emptySet(), headers);
+ }
+
+ /**
+ * Asynchronously executes a multi search using the msearch API
+ *
+ * See Multi search API on
+ * elastic.co
+ */
+ public final void multiSearchAsync(MultiSearchRequest searchRequest, ActionListener listener, Header... headers) {
+ performRequestAsyncAndParseEntity(searchRequest, Request::multiSearch, MultiSearchResponse::fromXContext, listener,
+ emptySet(), headers);
+ }
+
/**
* Executes a search using the Search Scroll API
*
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java
index 4045e565288..0d6430b5912 100755
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java
@@ -20,14 +20,88 @@
package org.elasticsearch.client;
import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
public class IndicesClientIT extends ESRestHighLevelClientTestCase {
+ @SuppressWarnings("unchecked")
+ public void testCreateIndex() throws IOException {
+ {
+ // Create index
+ String indexName = "plain_index";
+ assertFalse(indexExists(indexName));
+
+ CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
+
+ CreateIndexResponse createIndexResponse =
+ execute(createIndexRequest, highLevelClient().indices()::createIndex, highLevelClient().indices()::createIndexAsync);
+ assertTrue(createIndexResponse.isAcknowledged());
+
+ assertTrue(indexExists(indexName));
+ }
+ {
+ // Create index with mappings, aliases and settings
+ String indexName = "rich_index";
+ assertFalse(indexExists(indexName));
+
+ CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
+
+ Alias alias = new Alias("alias_name");
+ alias.filter("{\"term\":{\"year\":2016}}");
+ alias.routing("1");
+ createIndexRequest.alias(alias);
+
+ Settings.Builder settings = Settings.builder();
+ settings.put(SETTING_NUMBER_OF_REPLICAS, 2);
+ createIndexRequest.settings(settings);
+
+ XContentBuilder mappingBuilder = JsonXContent.contentBuilder();
+ mappingBuilder.startObject().startObject("properties").startObject("field");
+ mappingBuilder.field("type", "text");
+ mappingBuilder.endObject().endObject().endObject();
+ createIndexRequest.mapping("type_name", mappingBuilder);
+
+ CreateIndexResponse createIndexResponse =
+ execute(createIndexRequest, highLevelClient().indices()::createIndex, highLevelClient().indices()::createIndexAsync);
+ assertTrue(createIndexResponse.isAcknowledged());
+
+ Map indexMetaData = getIndexMetadata(indexName);
+
+ Map settingsData = (Map) indexMetaData.get("settings");
+ Map indexSettings = (Map) settingsData.get("index");
+ assertEquals("2", indexSettings.get("number_of_replicas"));
+
+ Map aliasesData = (Map) indexMetaData.get("aliases");
+ Map aliasData = (Map) aliasesData.get("alias_name");
+ assertEquals("1", aliasData.get("index_routing"));
+ Map filter = (Map) aliasData.get("filter");
+ Map term = (Map) filter.get("term");
+ assertEquals(2016, term.get("year"));
+
+ Map mappingsData = (Map) indexMetaData.get("mappings");
+ Map typeData = (Map) mappingsData.get("type_name");
+ Map properties = (Map) typeData.get("properties");
+ Map field = (Map) properties.get("field");
+
+ assertEquals("text", field.get("type"));
+ }
+ }
+
public void testDeleteIndex() throws IOException {
{
// Delete index if exists
@@ -65,4 +139,18 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
return response.getStatusLine().getStatusCode() == 200;
}
+
+ @SuppressWarnings("unchecked")
+ private Map getIndexMetadata(String index) throws IOException {
+ Response response = client().performRequest("GET", index);
+
+ XContentType entityContentType = XContentType.fromMediaTypeOrFormat(response.getEntity().getContentType().getValue());
+ Map responseEntity = XContentHelper.convertToMap(entityContentType.xContent(), response.getEntity().getContent(),
+ false);
+
+ Map indexMetaData = (Map) responseEntity.get(index);
+ assertNotNull(indexMetaData);
+
+ return indexMetaData;
+ }
}
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java
index 3be250d513d..182de30fd15 100755
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java
@@ -25,6 +25,7 @@ import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.util.EntityUtils;
import org.elasticsearch.action.DocWriteRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkShardRequest;
@@ -32,9 +33,11 @@ import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.ClearScrollRequest;
+import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
@@ -42,6 +45,7 @@ import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.common.CheckedBiConsumer;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
@@ -56,6 +60,7 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.rest.action.search.RestSearchAction;
+import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.builder.SearchSourceBuilder;
@@ -72,16 +77,21 @@ import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Constructor;
import java.lang.reflect.Modifier;
+import java.util.ArrayList;
import java.util.HashMap;
+import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.StringJoiner;
+import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
import static java.util.Collections.singletonMap;
+import static org.elasticsearch.client.Request.REQUEST_BODY_CONTENT_TYPE;
import static org.elasticsearch.client.Request.enforceSameContentType;
+import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
public class RequestTests extends ESTestCase {
@@ -245,6 +255,34 @@ public class RequestTests extends ESTestCase {
assertEquals(method, request.getMethod());
}
+ public void testCreateIndex() throws IOException {
+ CreateIndexRequest createIndexRequest = new CreateIndexRequest();
+
+ String indexName = "index-" + randomAlphaOfLengthBetween(2, 5);
+
+ createIndexRequest.index(indexName);
+
+ Map expectedParams = new HashMap<>();
+
+ setRandomTimeout(createIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
+ setRandomMasterTimeout(createIndexRequest, expectedParams);
+ setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams);
+
+ if (randomBoolean()) {
+ boolean updateAllTypes = randomBoolean();
+ createIndexRequest.updateAllTypes(updateAllTypes);
+ if (updateAllTypes) {
+ expectedParams.put("update_all_types", Boolean.TRUE.toString());
+ }
+ }
+
+ Request request = Request.createIndex(createIndexRequest);
+ assertEquals("/" + indexName, request.getEndpoint());
+ assertEquals(expectedParams, request.getParameters());
+ assertEquals("PUT", request.getMethod());
+ assertToXContentBody(createIndexRequest, request.getEntity());
+ }
+
public void testDeleteIndex() throws IOException {
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest();
@@ -399,11 +437,7 @@ public class RequestTests extends ESTestCase {
expectedParams.put("refresh", refreshPolicy.getValue());
}
}
- if (randomBoolean()) {
- int waitForActiveShards = randomIntBetween(0, 10);
- updateRequest.waitForActiveShards(waitForActiveShards);
- expectedParams.put("wait_for_active_shards", String.valueOf(waitForActiveShards));
- }
+ setRandomWaitForActiveShards(updateRequest::waitForActiveShards, expectedParams);
if (randomBoolean()) {
long version = randomLong();
updateRequest.version(version);
@@ -771,6 +805,55 @@ public class RequestTests extends ESTestCase {
}
}
+ public void testMultiSearch() throws IOException {
+ int numberOfSearchRequests = randomIntBetween(0, 32);
+ MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
+ for (int i = 0; i < numberOfSearchRequests; i++) {
+ SearchRequest searchRequest = randomSearchRequest(() -> {
+ // No need to return a very complex SearchSourceBuilder here, that is tested elsewhere
+ SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
+ searchSourceBuilder.from(randomInt(10));
+ searchSourceBuilder.size(randomIntBetween(20, 100));
+ return searchSourceBuilder;
+ });
+ // scroll is not supported in the current msearch api, so unset it:
+ searchRequest.scroll((Scroll) null);
+ // only expand_wildcards, ignore_unavailable and allow_no_indices can be specified from msearch api, so unset other options:
+ IndicesOptions randomlyGenerated = searchRequest.indicesOptions();
+ IndicesOptions msearchDefault = new MultiSearchRequest().indicesOptions();
+ searchRequest.indicesOptions(IndicesOptions.fromOptions(
+ randomlyGenerated.ignoreUnavailable(), randomlyGenerated.allowNoIndices(), randomlyGenerated.expandWildcardsOpen(),
+ randomlyGenerated.expandWildcardsClosed(), msearchDefault.allowAliasesToMultipleIndices(),
+ msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases()
+ ));
+ multiSearchRequest.add(searchRequest);
+ }
+
+ Map expectedParams = new HashMap<>();
+ expectedParams.put(RestSearchAction.TYPED_KEYS_PARAM, "true");
+ if (randomBoolean()) {
+ multiSearchRequest.maxConcurrentSearchRequests(randomIntBetween(1, 8));
+ expectedParams.put("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests()));
+ }
+
+ Request request = Request.multiSearch(multiSearchRequest);
+ assertEquals("/_msearch", request.getEndpoint());
+ assertEquals(expectedParams, request.getParameters());
+
+ List requests = new ArrayList<>();
+ CheckedBiConsumer consumer = (searchRequest, p) -> {
+ SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(p);
+ if (searchSourceBuilder.equals(new SearchSourceBuilder()) == false) {
+ searchRequest.source(searchSourceBuilder);
+ }
+ requests.add(searchRequest);
+ };
+ MultiSearchRequest.readMultiLineFormat(new BytesArray(EntityUtils.toByteArray(request.getEntity())),
+ REQUEST_BODY_CONTENT_TYPE.xContent(), consumer, null, multiSearchRequest.indicesOptions(), null, null,
+ null, xContentRegistry(), true);
+ assertEquals(requests, multiSearchRequest.requests());
+ }
+
public void testSearchScroll() throws IOException {
SearchScrollRequest searchScrollRequest = new SearchScrollRequest();
searchScrollRequest.scrollId(randomAlphaOfLengthBetween(5, 10));
@@ -782,7 +865,7 @@ public class RequestTests extends ESTestCase {
assertEquals("/_search/scroll", request.getEndpoint());
assertEquals(0, request.getParameters().size());
assertToXContentBody(searchScrollRequest, request.getEntity());
- assertEquals(Request.REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
+ assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
}
public void testClearScroll() throws IOException {
@@ -796,11 +879,11 @@ public class RequestTests extends ESTestCase {
assertEquals("/_search/scroll", request.getEndpoint());
assertEquals(0, request.getParameters().size());
assertToXContentBody(clearScrollRequest, request.getEntity());
- assertEquals(Request.REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
+ assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
}
private static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException {
- BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, Request.REQUEST_BODY_CONTENT_TYPE, false);
+ BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false);
assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue());
assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity)));
}
@@ -959,6 +1042,14 @@ public class RequestTests extends ESTestCase {
}
}
+ private static void setRandomWaitForActiveShards(Consumer setter, Map expectedParams) {
+ if (randomBoolean()) {
+ int waitForActiveShards = randomIntBetween(0, 10);
+ setter.accept(waitForActiveShards);
+ expectedParams.put("wait_for_active_shards", String.valueOf(waitForActiveShards));
+ }
+ }
+
private static void setRandomRefreshPolicy(ReplicatedWriteRequest> request, Map expectedParams) {
if (randomBoolean()) {
WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java
index 289ebf372d8..3e72c7c64b6 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java
@@ -23,20 +23,30 @@ import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.nio.entity.NStringEntity;
+import org.apache.lucene.search.join.ScoreMode;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.ClearScrollResponse;
+import org.elasticsearch.action.search.MultiSearchRequest;
+import org.elasticsearch.action.search.MultiSearchResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.MatchQueryBuilder;
+import org.elasticsearch.index.query.NestedQueryBuilder;
+import org.elasticsearch.index.query.ScriptQueryBuilder;
+import org.elasticsearch.index.query.TermsQueryBuilder;
import org.elasticsearch.join.aggregations.Children;
import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder;
import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.bucket.range.Range;
import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
@@ -45,10 +55,12 @@ import org.elasticsearch.search.aggregations.matrix.stats.MatrixStats;
import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.search.suggest.Suggest;
import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder;
+import org.hamcrest.Matchers;
import org.junit.Before;
import java.io.IOException;
@@ -64,6 +76,7 @@ import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.lessThan;
+import static org.hamcrest.Matchers.nullValue;
public class SearchIT extends ESRestHighLevelClientTestCase {
@@ -80,10 +93,24 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
StringEntity doc5 = new StringEntity("{\"type\":\"type2\", \"num\":100, \"num2\":10}", ContentType.APPLICATION_JSON);
client().performRequest("PUT", "/index/type/5", Collections.emptyMap(), doc5);
client().performRequest("POST", "/index/_refresh");
+
+ StringEntity doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
+ client().performRequest("PUT", "/index1/doc/1", Collections.emptyMap(), doc);
+ doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
+ client().performRequest("PUT", "/index1/doc/2", Collections.emptyMap(), doc);
+ doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
+ client().performRequest("PUT", "/index2/doc/3", Collections.emptyMap(), doc);
+ doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
+ client().performRequest("PUT", "/index2/doc/4", Collections.emptyMap(), doc);
+ doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
+ client().performRequest("PUT", "/index3/doc/5", Collections.emptyMap(), doc);
+ doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
+ client().performRequest("PUT", "/index3/doc/6", Collections.emptyMap(), doc);
+ client().performRequest("POST", "/index1,index2,index3/_refresh");
}
public void testSearchNoQuery() throws IOException {
- SearchRequest searchRequest = new SearchRequest();
+ SearchRequest searchRequest = new SearchRequest("index");
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
assertSearchHeader(searchResponse);
assertNull(searchResponse.getAggregations());
@@ -106,7 +133,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
}
public void testSearchMatchQuery() throws IOException {
- SearchRequest searchRequest = new SearchRequest();
+ SearchRequest searchRequest = new SearchRequest("index");
searchRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("num", 10)));
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
assertSearchHeader(searchResponse);
@@ -164,7 +191,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
assertEquals(RestStatus.BAD_REQUEST, exception.status());
}
- SearchRequest searchRequest = new SearchRequest();
+ SearchRequest searchRequest = new SearchRequest("index");
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.aggregation(new RangeAggregationBuilder("agg1").field("num")
.addRange("first", 0, 30).addRange("second", 31, 200));
@@ -193,7 +220,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
}
public void testSearchWithTermsAndRangeAgg() throws IOException {
- SearchRequest searchRequest = new SearchRequest();
+ SearchRequest searchRequest = new SearchRequest("index");
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
TermsAggregationBuilder agg = new TermsAggregationBuilder("agg1", ValueType.STRING).field("type.keyword");
agg.subAggregation(new RangeAggregationBuilder("subagg").field("num")
@@ -247,7 +274,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
}
public void testSearchWithMatrixStats() throws IOException {
- SearchRequest searchRequest = new SearchRequest();
+ SearchRequest searchRequest = new SearchRequest("index");
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.aggregation(new MatrixStatsAggregationBuilder("agg1").fields(Arrays.asList("num", "num2")));
searchSourceBuilder.size(0);
@@ -374,7 +401,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
}
public void testSearchWithSuggest() throws IOException {
- SearchRequest searchRequest = new SearchRequest();
+ SearchRequest searchRequest = new SearchRequest("index");
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion("sugg1", new PhraseSuggestionBuilder("type"))
.setGlobalText("type"));
@@ -464,6 +491,185 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
}
}
+ public void testMultiSearch() throws Exception {
+ MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
+ SearchRequest searchRequest1 = new SearchRequest("index1");
+ searchRequest1.source().sort("_id", SortOrder.ASC);
+ multiSearchRequest.add(searchRequest1);
+ SearchRequest searchRequest2 = new SearchRequest("index2");
+ searchRequest2.source().sort("_id", SortOrder.ASC);
+ multiSearchRequest.add(searchRequest2);
+ SearchRequest searchRequest3 = new SearchRequest("index3");
+ searchRequest3.source().sort("_id", SortOrder.ASC);
+ multiSearchRequest.add(searchRequest3);
+
+ MultiSearchResponse multiSearchResponse =
+ execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
+ assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
+ assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
+
+ assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue());
+ assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(false));
+ SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
+ assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
+ assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("1"));
+ assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getAt(1).getId(), Matchers.equalTo("2"));
+
+ assertThat(multiSearchResponse.getResponses()[1].getFailure(), Matchers.nullValue());
+ assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(false));
+ SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[1].getResponse());
+ assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
+ assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("3"));
+ assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(1).getId(), Matchers.equalTo("4"));
+
+ assertThat(multiSearchResponse.getResponses()[2].getFailure(), Matchers.nullValue());
+ assertThat(multiSearchResponse.getResponses()[2].isFailure(), Matchers.is(false));
+ SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[2].getResponse());
+ assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
+ assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("5"));
+ assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(1).getId(), Matchers.equalTo("6"));
+ }
+
+ public void testMultiSearch_withAgg() throws Exception {
+ MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
+ SearchRequest searchRequest1 = new SearchRequest("index1");
+ searchRequest1.source().size(0).aggregation(new TermsAggregationBuilder("name", ValueType.STRING).field("field.keyword")
+ .order(BucketOrder.key(true)));
+ multiSearchRequest.add(searchRequest1);
+ SearchRequest searchRequest2 = new SearchRequest("index2");
+ searchRequest2.source().size(0).aggregation(new TermsAggregationBuilder("name", ValueType.STRING).field("field.keyword")
+ .order(BucketOrder.key(true)));
+ multiSearchRequest.add(searchRequest2);
+ SearchRequest searchRequest3 = new SearchRequest("index3");
+ searchRequest3.source().size(0).aggregation(new TermsAggregationBuilder("name", ValueType.STRING).field("field.keyword")
+ .order(BucketOrder.key(true)));
+ multiSearchRequest.add(searchRequest3);
+
+ MultiSearchResponse multiSearchResponse =
+ execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
+ assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
+ assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
+
+ assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue());
+ assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(false));
+ SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
+ assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
+ assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getHits().length, Matchers.equalTo(0));
+ Terms terms = multiSearchResponse.getResponses()[0].getResponse().getAggregations().get("name");
+ assertThat(terms.getBuckets().size(), Matchers.equalTo(2));
+ assertThat(terms.getBuckets().get(0).getKeyAsString(), Matchers.equalTo("value1"));
+ assertThat(terms.getBuckets().get(1).getKeyAsString(), Matchers.equalTo("value2"));
+
+ assertThat(multiSearchResponse.getResponses()[1].getFailure(), Matchers.nullValue());
+ assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(false));
+ SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
+ assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
+ assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getHits().length, Matchers.equalTo(0));
+ terms = multiSearchResponse.getResponses()[1].getResponse().getAggregations().get("name");
+ assertThat(terms.getBuckets().size(), Matchers.equalTo(2));
+ assertThat(terms.getBuckets().get(0).getKeyAsString(), Matchers.equalTo("value1"));
+ assertThat(terms.getBuckets().get(1).getKeyAsString(), Matchers.equalTo("value2"));
+
+ assertThat(multiSearchResponse.getResponses()[2].getFailure(), Matchers.nullValue());
+ assertThat(multiSearchResponse.getResponses()[2].isFailure(), Matchers.is(false));
+ SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
+ assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
+ assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getHits().length, Matchers.equalTo(0));
+ terms = multiSearchResponse.getResponses()[2].getResponse().getAggregations().get("name");
+ assertThat(terms.getBuckets().size(), Matchers.equalTo(2));
+ assertThat(terms.getBuckets().get(0).getKeyAsString(), Matchers.equalTo("value1"));
+ assertThat(terms.getBuckets().get(1).getKeyAsString(), Matchers.equalTo("value2"));
+ }
+
+ public void testMultiSearch_withQuery() throws Exception {
+ MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
+ SearchRequest searchRequest1 = new SearchRequest("index1");
+ searchRequest1.source().query(new TermsQueryBuilder("field", "value2"));
+ multiSearchRequest.add(searchRequest1);
+ SearchRequest searchRequest2 = new SearchRequest("index2");
+ searchRequest2.source().query(new TermsQueryBuilder("field", "value2"));
+ multiSearchRequest.add(searchRequest2);
+ SearchRequest searchRequest3 = new SearchRequest("index3");
+ searchRequest3.source().query(new TermsQueryBuilder("field", "value2"));
+ multiSearchRequest.add(searchRequest3);
+
+ MultiSearchResponse multiSearchResponse =
+ execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
+ assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
+ assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
+
+ assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue());
+ assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(false));
+ SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
+ assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
+ assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("2"));
+
+ assertThat(multiSearchResponse.getResponses()[1].getFailure(), Matchers.nullValue());
+ assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(false));
+ SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[1].getResponse());
+ assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
+ assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("4"));
+
+ assertThat(multiSearchResponse.getResponses()[2].getFailure(), Matchers.nullValue());
+ assertThat(multiSearchResponse.getResponses()[2].isFailure(), Matchers.is(false));
+ SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[2].getResponse());
+ assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
+ assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("6"));
+
+ searchRequest1.source().highlighter(new HighlightBuilder().field("field"));
+ searchRequest2.source().highlighter(new HighlightBuilder().field("field"));
+ searchRequest3.source().highlighter(new HighlightBuilder().field("field"));
+ multiSearchResponse = execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
+ assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
+ assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
+
+ assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue());
+ assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(false));
+ SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
+ assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
+ assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getAt(0).getHighlightFields()
+ .get("field").fragments()[0].string(), Matchers.equalTo("value2"));
+
+ assertThat(multiSearchResponse.getResponses()[1].getFailure(), Matchers.nullValue());
+ assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(false));
+ SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[1].getResponse());
+ assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
+ assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("4"));
+ assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(0).getHighlightFields()
+ .get("field").fragments()[0].string(), Matchers.equalTo("value2"));
+
+ assertThat(multiSearchResponse.getResponses()[2].getFailure(), Matchers.nullValue());
+ assertThat(multiSearchResponse.getResponses()[2].isFailure(), Matchers.is(false));
+ SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[2].getResponse());
+ assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
+ assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("6"));
+ assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(0).getHighlightFields()
+ .get("field").fragments()[0].string(), Matchers.equalTo("value2"));
+ }
+
+ public void testMultiSearch_failure() throws Exception {
+ MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
+ SearchRequest searchRequest1 = new SearchRequest("index1");
+ searchRequest1.source().query(new ScriptQueryBuilder(new Script(ScriptType.INLINE, "invalid", "code", Collections.emptyMap())));
+ multiSearchRequest.add(searchRequest1);
+ SearchRequest searchRequest2 = new SearchRequest("index2");
+ searchRequest2.source().query(new ScriptQueryBuilder(new Script(ScriptType.INLINE, "invalid", "code", Collections.emptyMap())));
+ multiSearchRequest.add(searchRequest2);
+
+ MultiSearchResponse multiSearchResponse =
+ execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
+ assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
+ assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(2));
+
+ assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(true));
+ assertThat(multiSearchResponse.getResponses()[0].getFailure().getMessage(), containsString("search_phase_execution_exception"));
+ assertThat(multiSearchResponse.getResponses()[0].getResponse(), nullValue());
+
+ assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(true));
+ assertThat(multiSearchResponse.getResponses()[1].getFailure().getMessage(), containsString("search_phase_execution_exception"));
+ assertThat(multiSearchResponse.getResponses()[1].getResponse(), nullValue());
+ }
+
private static void assertSearchHeader(SearchResponse searchResponse) {
assertThat(searchResponse.getTook().nanos(), greaterThanOrEqualTo(0L));
assertEquals(0, searchResponse.getFailedShards());
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java
index e866fb92aae..372cc17d137 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java
@@ -21,13 +21,18 @@ package org.elasticsearch.client.documentation;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
-import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestHighLevelClient;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
@@ -52,8 +57,8 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
RestHighLevelClient client = highLevelClient();
{
- Response createIndexResponse = client().performRequest("PUT", "/posts");
- assertEquals(200, createIndexResponse.getStatusLine().getStatusCode());
+ CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("posts"));
+ assertTrue(createIndexResponse.isAcknowledged());
}
{
@@ -61,14 +66,26 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
DeleteIndexRequest request = new DeleteIndexRequest("posts"); // <1>
// end::delete-index-request
+ // tag::delete-index-request-timeout
+ request.timeout(TimeValue.timeValueMinutes(2)); // <1>
+ request.timeout("2m"); // <2>
+ // end::delete-index-request-timeout
+ // tag::delete-index-request-masterTimeout
+ request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
+ request.masterNodeTimeout("1m"); // <2>
+ // end::delete-index-request-masterTimeout
+ // tag::delete-index-request-indicesOptions
+ request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
+ // end::delete-index-request-indicesOptions
+
// tag::delete-index-execute
DeleteIndexResponse deleteIndexResponse = client.indices().deleteIndex(request);
// end::delete-index-execute
- assertTrue(deleteIndexResponse.isAcknowledged());
// tag::delete-index-response
boolean acknowledged = deleteIndexResponse.isAcknowledged(); // <1>
// end::delete-index-response
+ assertTrue(acknowledged);
// tag::delete-index-execute-async
client.indices().deleteIndexAsync(request, new ActionListener() {
@@ -85,26 +102,11 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
// end::delete-index-execute-async
}
- {
- DeleteIndexRequest request = new DeleteIndexRequest("posts");
- // tag::delete-index-request-timeout
- request.timeout(TimeValue.timeValueMinutes(2)); // <1>
- request.timeout("2m"); // <2>
- // end::delete-index-request-timeout
- // tag::delete-index-request-masterTimeout
- request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
- request.timeout("1m"); // <2>
- // end::delete-index-request-masterTimeout
- // tag::delete-index-request-indicesOptions
- request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
- // end::delete-index-request-indicesOptions
- }
-
{
// tag::delete-index-notfound
try {
DeleteIndexRequest request = new DeleteIndexRequest("does_not_exist");
- DeleteIndexResponse deleteIndexResponse = client.indices().deleteIndex(request);
+ client.indices().deleteIndex(request);
} catch (ElasticsearchException exception) {
if (exception.status() == RestStatus.NOT_FOUND) {
// <1>
@@ -113,4 +115,79 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
// end::delete-index-notfound
}
}
+
+ public void testCreateIndex() throws IOException {
+ RestHighLevelClient client = highLevelClient();
+
+ {
+ // tag::create-index-request
+ CreateIndexRequest request = new CreateIndexRequest("twitter"); // <1>
+ // end::create-index-request
+
+ // tag::create-index-request-settings
+ request.settings(Settings.builder() // <1>
+ .put("index.number_of_shards", 3)
+ .put("index.number_of_replicas", 2)
+ );
+ // end::create-index-request-settings
+
+ // tag::create-index-request-mappings
+ request.mapping("tweet", // <1>
+ " {\n" +
+ " \"tweet\": {\n" +
+ " \"properties\": {\n" +
+ " \"message\": {\n" +
+ " \"type\": \"text\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }", // <2>
+ XContentType.JSON);
+ // end::create-index-request-mappings
+
+ // tag::create-index-request-aliases
+ request.alias(
+ new Alias("twitter_alias") // <1>
+ );
+ // end::create-index-request-aliases
+
+ // tag::create-index-request-timeout
+ request.timeout(TimeValue.timeValueMinutes(2)); // <1>
+ request.timeout("2m"); // <2>
+ // end::create-index-request-timeout
+ // tag::create-index-request-masterTimeout
+ request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
+ request.masterNodeTimeout("1m"); // <2>
+ // end::create-index-request-masterTimeout
+ // tag::create-index-request-waitForActiveShards
+ request.waitForActiveShards(2); // <1>
+ request.waitForActiveShards(ActiveShardCount.DEFAULT); // <2>
+ // end::create-index-request-waitForActiveShards
+
+ // tag::create-index-execute
+ CreateIndexResponse createIndexResponse = client.indices().createIndex(request);
+ // end::create-index-execute
+
+ // tag::create-index-response
+ boolean acknowledged = createIndexResponse.isAcknowledged(); // <1>
+ boolean shardsAcked = createIndexResponse.isShardsAcked(); // <2>
+ // end::create-index-response
+ assertTrue(acknowledged);
+ assertTrue(shardsAcked);
+
+ // tag::create-index-execute-async
+ client.indices().createIndexAsync(request, new ActionListener() {
+ @Override
+ public void onResponse(CreateIndexResponse createIndexResponse) {
+ // <1>
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ // <2>
+ }
+ });
+ // end::create-index-execute-async
+ }
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java
index f4e807b9ffc..9e4e7b909f7 100644
--- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java
+++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java
@@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.aggregations.MultiBucketConsumerService;
import org.elasticsearch.transport.TcpTransport;
import java.io.IOException;
@@ -986,7 +987,10 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class,
org.elasticsearch.env.ShardLockObtainFailedException::new, 147, Version.V_5_0_2),
UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException.class,
- org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException::new, 148, Version.V_5_2_0);
+ org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException::new, 148, Version.V_5_2_0),
+ TOO_MANY_BUCKETS_EXCEPTION(MultiBucketConsumerService.TooManyBucketsException.class,
+ MultiBucketConsumerService.TooManyBucketsException::new, 149,
+ Version.V_7_0_0_alpha1);
final Class extends ElasticsearchException> exceptionClass;
final CheckedFunction constructor;
diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java
index 372d88c75cd..e234e8828bc 100644
--- a/core/src/main/java/org/elasticsearch/Version.java
+++ b/core/src/main/java/org/elasticsearch/Version.java
@@ -131,10 +131,13 @@ public class Version implements Comparable {
public static final int V_6_0_1_ID = 6000199;
public static final Version V_6_0_1 =
new Version(V_6_0_1_ID, org.apache.lucene.util.Version.LUCENE_7_0_1);
+ public static final int V_6_0_2_ID = 6000299;
+ public static final Version V_6_0_2 =
+ new Version(V_6_0_2_ID, org.apache.lucene.util.Version.LUCENE_7_0_1);
public static final int V_6_1_0_ID = 6010099;
public static final Version V_6_1_0 = new Version(V_6_1_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
public static final int V_6_2_0_ID = 6020099;
- public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
+ public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_0);
public static final int V_7_0_0_alpha1_ID = 7000001;
public static final Version V_7_0_0_alpha1 =
new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_0);
@@ -157,6 +160,8 @@ public class Version implements Comparable {
return V_6_1_0;
case V_6_2_0_ID:
return V_6_2_0;
+ case V_6_0_2_ID:
+ return V_6_0_2;
case V_6_0_1_ID:
return V_6_0_1;
case V_6_0_0_ID:
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java
index a9e4c777784..dc088f815b1 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java
@@ -21,10 +21,13 @@ package org.elasticsearch.action.admin.indices.alias;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
@@ -33,11 +36,17 @@ import org.elasticsearch.index.query.QueryBuilder;
import java.io.IOException;
import java.util.Map;
+import java.util.Objects;
/**
* Represents an alias, to be associated with an index
*/
-public class Alias implements Streamable {
+public class Alias implements Streamable, ToXContentObject {
+
+ private static final ParseField FILTER = new ParseField("filter");
+ private static final ParseField ROUTING = new ParseField("routing");
+ private static final ParseField INDEX_ROUTING = new ParseField("index_routing", "indexRouting", "index-routing");
+ private static final ParseField SEARCH_ROUTING = new ParseField("search_routing", "searchRouting", "search-routing");
private String name;
@@ -196,16 +205,16 @@ public class Alias implements Streamable {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
- if ("filter".equals(currentFieldName)) {
+ if (FILTER.match(currentFieldName)) {
Map filter = parser.mapOrdered();
alias.filter(filter);
}
} else if (token == XContentParser.Token.VALUE_STRING) {
- if ("routing".equals(currentFieldName)) {
+ if (ROUTING.match(currentFieldName)) {
alias.routing(parser.text());
- } else if ("index_routing".equals(currentFieldName) || "indexRouting".equals(currentFieldName) || "index-routing".equals(currentFieldName)) {
+ } else if (INDEX_ROUTING.match(currentFieldName)) {
alias.indexRouting(parser.text());
- } else if ("search_routing".equals(currentFieldName) || "searchRouting".equals(currentFieldName) || "search-routing".equals(currentFieldName)) {
+ } else if (SEARCH_ROUTING.match(currentFieldName)) {
alias.searchRouting(parser.text());
}
}
@@ -213,6 +222,29 @@ public class Alias implements Streamable {
return alias;
}
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(name);
+
+ if (filter != null) {
+ builder.rawField(FILTER.getPreferredName(), new BytesArray(filter), XContentType.JSON);
+ }
+
+ if (indexRouting != null && indexRouting.equals(searchRouting)) {
+ builder.field(ROUTING.getPreferredName(), indexRouting);
+ } else {
+ if (indexRouting != null) {
+ builder.field(INDEX_ROUTING.getPreferredName(), indexRouting);
+ }
+ if (searchRouting != null) {
+ builder.field(SEARCH_ROUTING.getPreferredName(), searchRouting);
+ }
+ }
+
+ builder.endObject();
+ return builder;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java
index 2d320b094b2..f628974834c 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java
@@ -30,6 +30,7 @@ import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.MapBuilder;
@@ -37,6 +38,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
+import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
@@ -65,7 +67,11 @@ import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
* @see org.elasticsearch.client.Requests#createIndexRequest(String)
* @see CreateIndexResponse
*/
-public class CreateIndexRequest extends AcknowledgedRequest implements IndicesRequest {
+public class CreateIndexRequest extends AcknowledgedRequest implements IndicesRequest, ToXContentObject {
+
+ private static final ParseField MAPPINGS = new ParseField("mappings");
+ private static final ParseField SETTINGS = new ParseField("settings");
+ private static final ParseField ALIASES = new ParseField("aliases");
private String cause = "";
@@ -376,14 +382,14 @@ public class CreateIndexRequest extends AcknowledgedRequest
public CreateIndexRequest source(Map source) {
for (Map.Entry entry : source.entrySet()) {
String name = entry.getKey();
- if (name.equals("settings")) {
+ if (SETTINGS.match(name)) {
settings((Map) entry.getValue());
- } else if (name.equals("mappings")) {
+ } else if (MAPPINGS.match(name)) {
Map mappings = (Map) entry.getValue();
for (Map.Entry entry1 : mappings.entrySet()) {
mapping(entry1.getKey(), (Map) entry1.getValue());
}
- } else if (name.equals("aliases")) {
+ } else if (ALIASES.match(name)) {
aliases((Map) entry.getValue());
} else {
// maybe custom?
@@ -520,4 +526,32 @@ public class CreateIndexRequest extends AcknowledgedRequest
out.writeBoolean(updateAllTypes);
waitForActiveShards.writeTo(out);
}
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+
+ builder.startObject(SETTINGS.getPreferredName());
+ settings.toXContent(builder, params);
+ builder.endObject();
+
+ builder.startObject(MAPPINGS.getPreferredName());
+ for (Map.Entry entry : mappings.entrySet()) {
+ builder.rawField(entry.getKey(), new BytesArray(entry.getValue()), XContentType.JSON);
+ }
+ builder.endObject();
+
+ builder.startObject(ALIASES.getPreferredName());
+ for (Alias alias : aliases) {
+ alias.toXContent(builder, params);
+ }
+ builder.endObject();
+
+ for (Map.Entry entry : customs.entrySet()) {
+ builder.field(entry.getKey(), entry.getValue(), params);
+ }
+
+ builder.endObject();
+ return builder;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java
index b770c11c6ab..5c07b4024ee 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java
@@ -39,20 +39,17 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru
*/
public class CreateIndexResponse extends AcknowledgedResponse implements ToXContentObject {
- private static final String SHARDS_ACKNOWLEDGED = "shards_acknowledged";
- private static final String INDEX = "index";
-
- private static final ParseField SHARDS_ACKNOWLEDGED_PARSER = new ParseField(SHARDS_ACKNOWLEDGED);
- private static final ParseField INDEX_PARSER = new ParseField(INDEX);
+ private static final ParseField SHARDS_ACKNOWLEDGED = new ParseField("shards_acknowledged");
+ private static final ParseField INDEX = new ParseField("index");
private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("create_index",
true, args -> new CreateIndexResponse((boolean) args[0], (boolean) args[1], (String) args[2]));
static {
declareAcknowledgedField(PARSER);
- PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SHARDS_ACKNOWLEDGED_PARSER,
+ PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SHARDS_ACKNOWLEDGED,
ObjectParser.ValueType.BOOLEAN);
- PARSER.declareField(constructorArg(), (parser, context) -> parser.text(), INDEX_PARSER, ObjectParser.ValueType.STRING);
+ PARSER.declareField(constructorArg(), (parser, context) -> parser.text(), INDEX, ObjectParser.ValueType.STRING);
}
private boolean shardsAcked;
@@ -102,8 +99,8 @@ public class CreateIndexResponse extends AcknowledgedResponse implements ToXCont
}
public void addCustomFields(XContentBuilder builder) throws IOException {
- builder.field(SHARDS_ACKNOWLEDGED, isShardsAcked());
- builder.field(INDEX, index());
+ builder.field(SHARDS_ACKNOWLEDGED.getPreferredName(), isShardsAcked());
+ builder.field(INDEX.getPreferredName(), index());
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java
index c9cf3257c76..b383c02be74 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java
@@ -36,9 +36,11 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
+import java.io.IOException;
import java.util.List;
/**
@@ -46,10 +48,15 @@ import java.util.List;
*/
public class TransportGetIndexAction extends TransportClusterInfoAction {
+ private final IndicesService indicesService;
+
@Inject
public TransportGetIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
- ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
- super(settings, GetIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, GetIndexRequest::new, indexNameExpressionResolver);
+ ThreadPool threadPool, ActionFilters actionFilters,
+ IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService) {
+ super(settings, GetIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, GetIndexRequest::new,
+ indexNameExpressionResolver);
+ this.indicesService = indicesService;
}
@Override
@@ -60,7 +67,8 @@ public class TransportGetIndexAction extends TransportClusterInfoAction metadataFieldPredicate = indicesService::isMetaDataField;
+ Predicate fieldPredicate = metadataFieldPredicate.or(indicesService.getFieldFilter().apply(shardId.getIndexName()));
+
Collection typeIntersection;
if (request.types().length == 0) {
typeIntersection = indexService.mapperService().types();
@@ -104,16 +110,15 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
}
}
- MapBuilder> typeMappings = new MapBuilder<>();
+ Map> typeMappings = new HashMap<>();
for (String type : typeIntersection) {
DocumentMapper documentMapper = indexService.mapperService().documentMapper(type);
- Map fieldMapping = findFieldMappingsByType(documentMapper, request);
+ Map fieldMapping = findFieldMappingsByType(fieldPredicate, documentMapper, request);
if (!fieldMapping.isEmpty()) {
typeMappings.put(type, fieldMapping);
}
}
-
- return new GetFieldMappingsResponse(singletonMap(shardId.getIndexName(), typeMappings.immutableMap()));
+ return new GetFieldMappingsResponse(singletonMap(shardId.getIndexName(), Collections.unmodifiableMap(typeMappings)));
}
@Override
@@ -163,47 +168,50 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
}
};
- private Map findFieldMappingsByType(DocumentMapper documentMapper, GetFieldMappingsIndexRequest request) {
- MapBuilder fieldMappings = new MapBuilder<>();
+ private static Map findFieldMappingsByType(Predicate fieldPredicate,
+ DocumentMapper documentMapper,
+ GetFieldMappingsIndexRequest request) {
+ Map fieldMappings = new HashMap<>();
final DocumentFieldMappers allFieldMappers = documentMapper.mappers();
for (String field : request.fields()) {
if (Regex.isMatchAllPattern(field)) {
for (FieldMapper fieldMapper : allFieldMappers) {
- addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults());
+ addFieldMapper(fieldPredicate, fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults());
}
} else if (Regex.isSimpleMatchPattern(field)) {
for (FieldMapper fieldMapper : allFieldMappers) {
if (Regex.simpleMatch(field, fieldMapper.fieldType().name())) {
- addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings,
- request.includeDefaults());
+ addFieldMapper(fieldPredicate, fieldMapper.fieldType().name(),
+ fieldMapper, fieldMappings, request.includeDefaults());
}
}
} else {
// not a pattern
FieldMapper fieldMapper = allFieldMappers.smartNameFieldMapper(field);
if (fieldMapper != null) {
- addFieldMapper(field, fieldMapper, fieldMappings, request.includeDefaults());
+ addFieldMapper(fieldPredicate, field, fieldMapper, fieldMappings, request.includeDefaults());
} else if (request.probablySingleFieldRequest()) {
fieldMappings.put(field, FieldMappingMetaData.NULL);
}
}
}
- return fieldMappings.immutableMap();
+ return Collections.unmodifiableMap(fieldMappings);
}
- private void addFieldMapper(String field, FieldMapper fieldMapper, MapBuilder fieldMappings, boolean includeDefaults) {
+ private static void addFieldMapper(Predicate fieldPredicate,
+ String field, FieldMapper fieldMapper, Map fieldMappings,
+ boolean includeDefaults) {
if (fieldMappings.containsKey(field)) {
return;
}
- try {
- XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
- builder.startObject();
- fieldMapper.toXContent(builder, includeDefaults ? includeDefaultsParams : ToXContent.EMPTY_PARAMS);
- builder.endObject();
- fieldMappings.put(field, new FieldMappingMetaData(fieldMapper.fieldType().name(), builder.bytes()));
- } catch (IOException e) {
- throw new ElasticsearchException("failed to serialize XContent of field [" + field + "]", e);
+ if (fieldPredicate.test(field)) {
+ try {
+ BytesReference bytes = XContentHelper.toXContent(fieldMapper, XContentType.JSON,
+ includeDefaults ? includeDefaultsParams : ToXContent.EMPTY_PARAMS, false);
+ fieldMappings.put(field, new FieldMappingMetaData(fieldMapper.fieldType().name(), bytes));
+ } catch (IOException e) {
+ throw new ElasticsearchException("failed to serialize XContent of field [" + field + "]", e);
+ }
}
}
-
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java
index 3189a5a15c2..8ad2ce5475f 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java
@@ -31,15 +31,23 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
+import java.io.IOException;
+
public class TransportGetMappingsAction extends TransportClusterInfoAction {
+ private final IndicesService indicesService;
+
@Inject
public TransportGetMappingsAction(Settings settings, TransportService transportService, ClusterService clusterService,
- ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
- super(settings, GetMappingsAction.NAME, transportService, clusterService, threadPool, actionFilters, GetMappingsRequest::new, indexNameExpressionResolver);
+ ThreadPool threadPool, ActionFilters actionFilters,
+ IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService) {
+ super(settings, GetMappingsAction.NAME, transportService, clusterService, threadPool, actionFilters, GetMappingsRequest::new,
+ indexNameExpressionResolver);
+ this.indicesService = indicesService;
}
@Override
@@ -50,7 +58,8 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction listener) {
+ protected void doMasterOperation(final GetMappingsRequest request, String[] concreteIndices, final ClusterState state,
+ final ActionListener listener) {
logger.trace("serving getMapping request based on version {}", state.version());
- ImmutableOpenMap> result = state.metaData().findMappings(
- concreteIndices, request.types()
- );
- listener.onResponse(new GetMappingsResponse(result));
+ try {
+ ImmutableOpenMap> result =
+ state.metaData().findMappings(concreteIndices, request.types(), indicesService.getFieldFilter());
+ listener.onResponse(new GetMappingsResponse(result));
+ } catch (IOException e) {
+ listener.onFailure(e);
+ }
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java
index b9e6f56b6d7..b24dc685df6 100644
--- a/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java
+++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java
@@ -40,6 +40,7 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
+import java.util.function.Predicate;
public class TransportFieldCapabilitiesIndexAction extends TransportSingleShardAction {
@@ -77,12 +78,15 @@ public class TransportFieldCapabilitiesIndexAction extends TransportSingleShardA
for (String field : request.fields()) {
fieldNames.addAll(mapperService.simpleMatchToIndexNames(field));
}
+ Predicate fieldPredicate = indicesService.getFieldFilter().apply(shardId.getIndexName());
Map responseMap = new HashMap<>();
for (String field : fieldNames) {
MappedFieldType ft = mapperService.fullName(field);
if (ft != null) {
FieldCapabilities fieldCap = new FieldCapabilities(field, ft.typeName(), ft.isSearchable(), ft.isAggregatable());
- responseMap.put(field, fieldCap);
+ if (indicesService.isMetaDataField(field) || fieldPredicate.test(field)) {
+ responseMap.put(field, fieldCap);
+ }
}
}
return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), responseMap);
diff --git a/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java
index 76f73bde4b6..7772b245658 100644
--- a/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java
@@ -23,20 +23,36 @@ import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.CompositeIndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.CheckedBiConsumer;
+import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.NamedXContentRegistry;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue;
+import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue;
/**
* A multi search API request.
*/
public class MultiSearchRequest extends ActionRequest implements CompositeIndicesRequest {
+ public static final int MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT = 0;
+
private int maxConcurrentSearchRequests = 0;
private List requests = new ArrayList<>();
@@ -131,4 +147,171 @@ public class MultiSearchRequest extends ActionRequest implements CompositeIndice
request.writeTo(out);
}
}
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ MultiSearchRequest that = (MultiSearchRequest) o;
+ return maxConcurrentSearchRequests == that.maxConcurrentSearchRequests &&
+ Objects.equals(requests, that.requests) &&
+ Objects.equals(indicesOptions, that.indicesOptions);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(maxConcurrentSearchRequests, requests, indicesOptions);
+ }
+
+ public static void readMultiLineFormat(BytesReference data,
+ XContent xContent,
+ CheckedBiConsumer consumer,
+ String[] indices,
+ IndicesOptions indicesOptions,
+ String[] types,
+ String routing,
+ String searchType,
+ NamedXContentRegistry registry,
+ boolean allowExplicitIndex) throws IOException {
+ int from = 0;
+ int length = data.length();
+ byte marker = xContent.streamSeparator();
+ while (true) {
+ int nextMarker = findNextMarker(marker, from, data, length);
+ if (nextMarker == -1) {
+ break;
+ }
+ // support first line with \n
+ if (nextMarker == 0) {
+ from = nextMarker + 1;
+ continue;
+ }
+
+ SearchRequest searchRequest = new SearchRequest();
+ if (indices != null) {
+ searchRequest.indices(indices);
+ }
+ if (indicesOptions != null) {
+ searchRequest.indicesOptions(indicesOptions);
+ }
+ if (types != null && types.length > 0) {
+ searchRequest.types(types);
+ }
+ if (routing != null) {
+ searchRequest.routing(routing);
+ }
+ if (searchType != null) {
+ searchRequest.searchType(searchType);
+ }
+ IndicesOptions defaultOptions = SearchRequest.DEFAULT_INDICES_OPTIONS;
+ // now parse the action
+ if (nextMarker - from > 0) {
+ try (XContentParser parser = xContent.createParser(registry, data.slice(from, nextMarker - from))) {
+ Map source = parser.map();
+ for (Map.Entry entry : source.entrySet()) {
+ Object value = entry.getValue();
+ if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) {
+ if (!allowExplicitIndex) {
+ throw new IllegalArgumentException("explicit index in multi search is not allowed");
+ }
+ searchRequest.indices(nodeStringArrayValue(value));
+ } else if ("type".equals(entry.getKey()) || "types".equals(entry.getKey())) {
+ searchRequest.types(nodeStringArrayValue(value));
+ } else if ("search_type".equals(entry.getKey()) || "searchType".equals(entry.getKey())) {
+ searchRequest.searchType(nodeStringValue(value, null));
+ } else if ("request_cache".equals(entry.getKey()) || "requestCache".equals(entry.getKey())) {
+ searchRequest.requestCache(nodeBooleanValue(value, entry.getKey()));
+ } else if ("preference".equals(entry.getKey())) {
+ searchRequest.preference(nodeStringValue(value, null));
+ } else if ("routing".equals(entry.getKey())) {
+ searchRequest.routing(nodeStringValue(value, null));
+ }
+ }
+ defaultOptions = IndicesOptions.fromMap(source, defaultOptions);
+ }
+ }
+ searchRequest.indicesOptions(defaultOptions);
+
+ // move pointers
+ from = nextMarker + 1;
+ // now for the body
+ nextMarker = findNextMarker(marker, from, data, length);
+ if (nextMarker == -1) {
+ break;
+ }
+ BytesReference bytes = data.slice(from, nextMarker - from);
+ try (XContentParser parser = xContent.createParser(registry, bytes)) {
+ consumer.accept(searchRequest, parser);
+ }
+ // move pointers
+ from = nextMarker + 1;
+ }
+ }
+
+ private static int findNextMarker(byte marker, int from, BytesReference data, int length) {
+ for (int i = from; i < length; i++) {
+ if (data.get(i) == marker) {
+ return i;
+ }
+ }
+ if (from != length) {
+ throw new IllegalArgumentException("The msearch request must be terminated by a newline [\n]");
+ }
+ return -1;
+ }
+
+ public static byte[] writeMultiLineFormat(MultiSearchRequest multiSearchRequest, XContent xContent) throws IOException {
+ ByteArrayOutputStream output = new ByteArrayOutputStream();
+ for (SearchRequest request : multiSearchRequest.requests()) {
+ try (XContentBuilder xContentBuilder = XContentBuilder.builder(xContent)) {
+ xContentBuilder.startObject();
+ if (request.indices() != null) {
+ xContentBuilder.field("index", request.indices());
+ }
+ if (request.indicesOptions() != null && request.indicesOptions() != SearchRequest.DEFAULT_INDICES_OPTIONS) {
+ if (request.indicesOptions().expandWildcardsOpen() && request.indicesOptions().expandWildcardsClosed()) {
+ xContentBuilder.field("expand_wildcards", "all");
+ } else if (request.indicesOptions().expandWildcardsOpen()) {
+ xContentBuilder.field("expand_wildcards", "open");
+ } else if (request.indicesOptions().expandWildcardsClosed()) {
+ xContentBuilder.field("expand_wildcards", "closed");
+ } else {
+ xContentBuilder.field("expand_wildcards", "none");
+ }
+ xContentBuilder.field("ignore_unavailable", request.indicesOptions().ignoreUnavailable());
+ xContentBuilder.field("allow_no_indices", request.indicesOptions().allowNoIndices());
+ }
+ if (request.types() != null) {
+ xContentBuilder.field("types", request.types());
+ }
+ if (request.searchType() != null) {
+ xContentBuilder.field("search_type", request.searchType().name().toLowerCase(Locale.ROOT));
+ }
+ if (request.requestCache() != null) {
+ xContentBuilder.field("request_cache", request.requestCache());
+ }
+ if (request.preference() != null) {
+ xContentBuilder.field("preference", request.preference());
+ }
+ if (request.routing() != null) {
+ xContentBuilder.field("routing", request.routing());
+ }
+ xContentBuilder.endObject();
+ xContentBuilder.bytes().writeTo(output);
+ }
+ output.write(xContent.streamSeparator());
+ try (XContentBuilder xContentBuilder = XContentBuilder.builder(xContent)) {
+ if (request.source() != null) {
+ request.source().toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS);
+ } else {
+ xContentBuilder.startObject();
+ xContentBuilder.endObject();
+ }
+ xContentBuilder.bytes().writeTo(output);
+ }
+ output.write(xContent.streamSeparator());
+ }
+ return output.toByteArray();
+ }
+
}
diff --git a/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java
index 560379a6ce2..cb30385ecc8 100644
--- a/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java
@@ -24,23 +24,39 @@ import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentParser.Token;
import java.io.IOException;
import java.util.Arrays;
import java.util.Iterator;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
/**
* A multi search response.
*/
public class MultiSearchResponse extends ActionResponse implements Iterable, ToXContentObject {
+ private static final ParseField RESPONSES = new ParseField(Fields.RESPONSES);
+ private static final ParseField TOOK_IN_MILLIS = new ParseField("took");
+ private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("multi_search",
+ true, a -> new MultiSearchResponse(((List)a[0]).toArray(new Item[0]), (long) a[1]));
+ static {
+ PARSER.declareObjectArray(constructorArg(), (p, c) -> itemFromXContent(p), RESPONSES);
+ PARSER.declareLong(constructorArg(), TOOK_IN_MILLIS);
+ }
+
/**
* A search response item, holding the actual search response, or an error message if it failed.
*/
@@ -188,6 +204,45 @@ public class MultiSearchResponse extends ActionResponse implements Iterable reduceContextFunction;
- public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService) {
+ /**
+ * Constructor.
+ * @param settings Node settings
+ * @param reduceContextFunction A function that builds a context for the reduce of an {@link InternalAggregation}
+ */
+ public SearchPhaseController(Settings settings, Function reduceContextFunction) {
super(settings);
- this.bigArrays = bigArrays;
- this.scriptService = scriptService;
+ this.reduceContextFunction = reduceContextFunction;
}
public AggregatedDfs aggregateDfs(Collection results) {
@@ -496,7 +500,7 @@ public final class SearchPhaseController extends AbstractComponent {
}
}
final Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions));
- ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, true);
+ ReduceContext reduceContext = reduceContextFunction.apply(true);
final InternalAggregations aggregations = aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList,
firstResult.pipelineAggregators(), reduceContext);
final SearchProfileShardResults shardResults = profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults);
@@ -513,7 +517,7 @@ public final class SearchPhaseController extends AbstractComponent {
* that relevant for the final reduce step. For final reduce see {@link #reduceAggs(List, List, ReduceContext)}
*/
private InternalAggregations reduceAggsIncrementally(List aggregationsList) {
- ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, false);
+ ReduceContext reduceContext = reduceContextFunction.apply(false);
return aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList,
null, reduceContext);
}
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java
index 02d2e6a3429..a8bbd698918 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java
@@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.StatusToXContentObject;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.rest.action.RestActions;
import org.elasticsearch.search.SearchHits;
@@ -242,9 +243,14 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
}
public static SearchResponse fromXContent(XContentParser parser) throws IOException {
- ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
- XContentParser.Token token;
- String currentFieldName = null;
+ ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
+ parser.nextToken();
+ return innerFromXContent(parser);
+ }
+
+ static SearchResponse innerFromXContent(XContentParser parser) throws IOException {
+ ensureExpectedToken(Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation);
+ String currentFieldName = parser.currentName();
SearchHits hits = null;
Aggregations aggs = null;
Suggest suggest = null;
@@ -259,8 +265,8 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
String scrollId = null;
List failures = new ArrayList<>();
Clusters clusters = Clusters.EMPTY;
- while((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (token == XContentParser.Token.FIELD_NAME) {
+ for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) {
+ if (token == Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (SCROLL_ID.match(currentFieldName)) {
@@ -276,7 +282,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
} else {
parser.skipChildren();
}
- } else if (token == XContentParser.Token.START_OBJECT) {
+ } else if (token == Token.START_OBJECT) {
if (SearchHits.Fields.HITS.equals(currentFieldName)) {
hits = SearchHits.fromXContent(parser);
} else if (Aggregations.AGGREGATIONS_FIELD.equals(currentFieldName)) {
@@ -286,8 +292,8 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
} else if (SearchProfileShardResults.PROFILE_FIELD.equals(currentFieldName)) {
profile = SearchProfileShardResults.fromXContent(parser);
} else if (RestActions._SHARDS_FIELD.match(currentFieldName)) {
- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (token == XContentParser.Token.FIELD_NAME) {
+ while ((token = parser.nextToken()) != Token.END_OBJECT) {
+ if (token == Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (RestActions.FAILED_FIELD.match(currentFieldName)) {
@@ -301,9 +307,9 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
} else {
parser.skipChildren();
}
- } else if (token == XContentParser.Token.START_ARRAY) {
+ } else if (token == Token.START_ARRAY) {
if (RestActions.FAILURES_FIELD.match(currentFieldName)) {
- while((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ while((token = parser.nextToken()) != Token.END_ARRAY) {
failures.add(ShardSearchFailure.fromXContent(parser));
}
} else {
diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java
index 9dec3be5c1b..371314b990c 100644
--- a/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java
@@ -76,7 +76,7 @@ public class TransportMultiSearchAction extends HandledTransportAction void declareAcknowledgedField(ConstructingObjectParser PARSER) {
- PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), ACKNOWLEDGED_PARSER,
+ PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), ACKNOWLEDGED,
ObjectParser.ValueType.BOOLEAN);
}
@@ -78,6 +77,6 @@ public abstract class AcknowledgedResponse extends ActionResponse {
}
protected void addAcknowledgedField(XContentBuilder builder) throws IOException {
- builder.field(ACKNOWLEDGED, isAcknowledged());
+ builder.field(ACKNOWLEDGED.getPreferredName(), isAcknowledged());
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java
index 66f5a49f6d6..74233b5cec7 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java
@@ -93,6 +93,9 @@ public class IndexTemplateMetaData extends AbstractDiffable mappings,
ImmutableOpenMap aliases,
ImmutableOpenMap customs) {
+ if (patterns == null || patterns.isEmpty()) {
+ throw new IllegalArgumentException("Index patterns must not be null or empty; got " + patterns);
+ }
this.name = name;
this.order = order;
this.version = version;
@@ -244,7 +247,7 @@ public class IndexTemplateMetaData extends AbstractDiffable 0 ? patterns.get(0) : "");
+ out.writeString(patterns.get(0));
}
Settings.writeSettingsToStream(settings, out);
out.writeVInt(mappings.size());
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java
index 83a06d9c4ca..9cbfb2ec71f 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java
@@ -107,15 +107,6 @@ public class MappingMetaData extends AbstractDiffable {
initMappers(withoutType);
}
- private MappingMetaData() {
- this.type = "";
- try {
- this.source = new CompressedXContent("{}");
- } catch (IOException ex) {
- throw new IllegalStateException("Cannot create MappingMetaData prototype", ex);
- }
- }
-
private void initMappers(Map withoutType) {
if (withoutType.containsKey("_routing")) {
boolean required = false;
@@ -143,13 +134,6 @@ public class MappingMetaData extends AbstractDiffable {
}
}
- public MappingMetaData(String type, CompressedXContent source, Routing routing, boolean hasParentField) {
- this.type = type;
- this.source = source;
- this.routing = routing;
- this.hasParentField = hasParentField;
- }
-
void updateDefaultMapping(MappingMetaData defaultMapping) {
if (routing == Routing.EMPTY) {
routing = defaultMapping.routing();
@@ -250,5 +234,4 @@ public class MappingMetaData extends AbstractDiffable {
public static Diff readDiffFrom(StreamInput in) throws IOException {
return readDiffFrom(MappingMetaData::new, in);
}
-
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
index c582f372e51..0e9bcf8f11a 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
@@ -48,11 +48,13 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
+import org.elasticsearch.plugins.MapperPlugin;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
@@ -69,6 +71,8 @@ import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
+import java.util.function.Function;
+import java.util.function.Predicate;
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
@@ -324,32 +328,38 @@ public class MetaData implements Iterable, Diffable, To
return false;
}
- /*
- * Finds all mappings for types and concrete indices. Types are expanded to
- * include all types that match the glob patterns in the types array. Empty
- * types array, null or {"_all"} will be expanded to all types available for
- * the given indices.
+ /**
+ * Finds all mappings for types and concrete indices. Types are expanded to include all types that match the glob
+ * patterns in the types array. Empty types array, null or {"_all"} will be expanded to all types available for
+ * the given indices. Only fields that match the provided field filter will be returned (default is a predicate
+ * that always returns true, which can be overridden via plugins)
+ *
+ * @see MapperPlugin#getFieldFilter()
+ *
*/
- public ImmutableOpenMap> findMappings(String[] concreteIndices, final String[] types) {
+ public ImmutableOpenMap> findMappings(String[] concreteIndices,
+ final String[] types,
+ Function> fieldFilter)
+ throws IOException {
assert types != null;
assert concreteIndices != null;
if (concreteIndices.length == 0) {
return ImmutableOpenMap.of();
}
+ boolean isAllTypes = isAllTypes(types);
ImmutableOpenMap.Builder> indexMapBuilder = ImmutableOpenMap.builder();
Iterable intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys());
for (String index : intersection) {
IndexMetaData indexMetaData = indices.get(index);
- ImmutableOpenMap.Builder filteredMappings;
- if (isAllTypes(types)) {
- indexMapBuilder.put(index, indexMetaData.getMappings()); // No types specified means get it all
-
+ Predicate fieldPredicate = fieldFilter.apply(index);
+ if (isAllTypes) {
+ indexMapBuilder.put(index, filterFields(indexMetaData.getMappings(), fieldPredicate));
} else {
- filteredMappings = ImmutableOpenMap.builder();
+ ImmutableOpenMap.Builder filteredMappings = ImmutableOpenMap.builder();
for (ObjectObjectCursor cursor : indexMetaData.getMappings()) {
if (Regex.simpleMatch(types, cursor.key)) {
- filteredMappings.put(cursor.key, cursor.value);
+ filteredMappings.put(cursor.key, filterFields(cursor.value, fieldPredicate));
}
}
if (!filteredMappings.isEmpty()) {
@@ -360,6 +370,95 @@ public class MetaData implements Iterable, Diffable, To
return indexMapBuilder.build();
}
+ private static ImmutableOpenMap filterFields(ImmutableOpenMap mappings,
+ Predicate fieldPredicate) throws IOException {
+ if (fieldPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) {
+ return mappings;
+ }
+ ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(mappings.size());
+ for (ObjectObjectCursor cursor : mappings) {
+ builder.put(cursor.key, filterFields(cursor.value, fieldPredicate));
+ }
+ return builder.build(); // No types specified means return them all
+ }
+
+ @SuppressWarnings("unchecked")
+ private static MappingMetaData filterFields(MappingMetaData mappingMetaData, Predicate fieldPredicate) throws IOException {
+ if (fieldPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) {
+ return mappingMetaData;
+ }
+ Map sourceAsMap = XContentHelper.convertToMap(mappingMetaData.source().compressedReference(), true).v2();
+ Map mapping;
+ if (sourceAsMap.size() == 1 && sourceAsMap.containsKey(mappingMetaData.type())) {
+ mapping = (Map) sourceAsMap.get(mappingMetaData.type());
+ } else {
+ mapping = sourceAsMap;
+ }
+
+ Map properties = (Map)mapping.get("properties");
+ if (properties == null || properties.isEmpty()) {
+ return mappingMetaData;
+ }
+
+ filterFields("", properties, fieldPredicate);
+
+ return new MappingMetaData(mappingMetaData.type(), sourceAsMap);
+ }
+
+ @SuppressWarnings("unchecked")
+ private static boolean filterFields(String currentPath, Map fields, Predicate fieldPredicate) {
+ assert fieldPredicate != MapperPlugin.NOOP_FIELD_PREDICATE;
+ Iterator> entryIterator = fields.entrySet().iterator();
+ while (entryIterator.hasNext()) {
+ Map.Entry entry = entryIterator.next();
+ String newPath = mergePaths(currentPath, entry.getKey());
+ Object value = entry.getValue();
+ boolean mayRemove = true;
+ boolean isMultiField = false;
+ if (value instanceof Map) {
+ Map map = (Map) value;
+ Map properties = (Map)map.get("properties");
+ if (properties != null) {
+ mayRemove = filterFields(newPath, properties, fieldPredicate);
+ } else {
+ Map subFields = (Map)map.get("fields");
+ if (subFields != null) {
+ isMultiField = true;
+ if (mayRemove = filterFields(newPath, subFields, fieldPredicate)) {
+ map.remove("fields");
+ }
+ }
+ }
+ } else {
+ throw new IllegalStateException("cannot filter mappings, found unknown element of type [" + value.getClass() + "]");
+ }
+
+ //only remove a field if it has no sub-fields left and it has to be excluded
+ if (fieldPredicate.test(newPath) == false) {
+ if (mayRemove) {
+ entryIterator.remove();
+ } else if (isMultiField) {
+ //multi fields that should be excluded but hold subfields that don't have to be excluded are converted to objects
+ Map map = (Map) value;
+ Map subFields = (Map)map.get("fields");
+ assert subFields.size() > 0;
+ map.put("properties", subFields);
+ map.remove("fields");
+ map.remove("type");
+ }
+ }
+ }
+ //return true if the ancestor may be removed, as it has no sub-fields left
+ return fields.size() == 0;
+ }
+
+ private static String mergePaths(String path, String field) {
+ if (path.length() == 0) {
+ return field;
+ }
+ return path + "." + field;
+ }
+
/**
* Returns all the concrete indices.
*/
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java
index 2c0bc929294..59c38be50e8 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java
@@ -38,6 +38,7 @@ import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
@@ -54,7 +55,6 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
-import java.util.function.Predicate;
import static org.elasticsearch.action.support.ContextPreservingActionListener.wrapPreservingContext;
@@ -164,13 +164,16 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
Settings.Builder settingsForOpenIndices = Settings.builder();
final Set skippedSettings = new HashSet<>();
- indexScopedSettings.validate(normalizedSettings, false); // don't validate dependencies here we check it below
- // never allow to change the number of shards
+ indexScopedSettings.validate(normalizedSettings.filter(s -> Regex.isSimpleMatchPattern(s) == false /* don't validate wildcards */),
+ false); //don't validate dependencies here we check it below never allow to change the number of shards
for (String key : normalizedSettings.keySet()) {
Setting setting = indexScopedSettings.get(key);
- assert setting != null; // we already validated the normalized settings
+ boolean isWildcard = setting == null && Regex.isSimpleMatchPattern(key);
+ assert setting != null // we already validated the normalized settings
+ || (isWildcard && normalizedSettings.hasValue(key) == false)
+ : "unknown setting: " + key + " isWildcard: " + isWildcard + " hasValue: " + normalizedSettings.hasValue(key);
settingsForClosedIndices.copy(key, normalizedSettings);
- if (setting.isDynamic()) {
+ if (isWildcard || setting.isDynamic()) {
settingsForOpenIndices.copy(key, normalizedSettings);
} else {
skippedSettings.add(key);
diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java
index 13c2e50eba2..9914ee2577a 100644
--- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java
@@ -133,8 +133,11 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting");
Objects.requireNonNull(state.get(), "please set initial state before starting");
addListener(localNodeMasterListeners);
- threadPoolExecutor = EsExecutors.newSinglePrioritizing(CLUSTER_UPDATE_THREAD_NAME,
- daemonThreadFactory(settings, CLUSTER_UPDATE_THREAD_NAME), threadPool.getThreadContext(), threadPool.scheduler());
+ threadPoolExecutor = EsExecutors.newSinglePrioritizing(
+ nodeName() + "/" + CLUSTER_UPDATE_THREAD_NAME,
+ daemonThreadFactory(settings, CLUSTER_UPDATE_THREAD_NAME),
+ threadPool.getThreadContext(),
+ threadPool.scheduler());
}
class UpdateTask extends SourcePrioritizedRunnable implements Function {
diff --git a/core/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/core/src/main/java/org/elasticsearch/cluster/service/MasterService.java
index a5f71dc48b8..6858866d2dc 100644
--- a/core/src/main/java/org/elasticsearch/cluster/service/MasterService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/service/MasterService.java
@@ -104,8 +104,11 @@ public class MasterService extends AbstractLifecycleComponent {
protected synchronized void doStart() {
Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting");
Objects.requireNonNull(clusterStateSupplier, "please set a cluster state supplier before starting");
- threadPoolExecutor = EsExecutors.newSinglePrioritizing(MASTER_UPDATE_THREAD_NAME,
- daemonThreadFactory(settings, MASTER_UPDATE_THREAD_NAME), threadPool.getThreadContext(), threadPool.scheduler());
+ threadPoolExecutor = EsExecutors.newSinglePrioritizing(
+ nodeName() + "/" + MASTER_UPDATE_THREAD_NAME,
+ daemonThreadFactory(settings, MASTER_UPDATE_THREAD_NAME),
+ threadPool.getThreadContext(),
+ threadPool.scheduler());
taskBatcher = new Batcher(logger, threadPoolExecutor);
}
diff --git a/core/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java b/core/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java
new file mode 100644
index 00000000000..fbdcdfd6885
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.bytes;
+
+import org.apache.lucene.util.BytesRef;
+
+import java.nio.ByteBuffer;
+
+/**
+ * This is a {@link BytesReference} backed by a {@link ByteBuffer}. The byte buffer can either be a heap or
+ * direct byte buffer. The reference is composed of the space between the {@link ByteBuffer#position} and
+ * {@link ByteBuffer#limit} at construction time. If the position or limit of the underlying byte buffer is
+ * changed, those changes will not be reflected in this reference. However, modifying the limit or position
+ * of the underlying byte buffer is not recommended as those can be used during {@link ByteBuffer#get()}
+ * bounds checks. Use {@link ByteBuffer#duplicate()} at creation time if you plan on modifying the markers of
+ * the underlying byte buffer. Any changes to the underlying data in the byte buffer will be reflected.
+ */
+public class ByteBufferReference extends BytesReference {
+
+ private final ByteBuffer buffer;
+ private final int offset;
+ private final int length;
+
+ public ByteBufferReference(ByteBuffer buffer) {
+ this.buffer = buffer;
+ this.offset = buffer.position();
+ this.length = buffer.remaining();
+ }
+
+ @Override
+ public byte get(int index) {
+ return buffer.get(index + offset);
+ }
+
+ @Override
+ public int length() {
+ return length;
+ }
+
+ @Override
+ public BytesReference slice(int from, int length) {
+ if (from < 0 || (from + length) > this.length) {
+ throw new IndexOutOfBoundsException("can't slice a buffer with length [" + this.length + "], with slice parameters from ["
+ + from + "], length [" + length + "]");
+ }
+ ByteBuffer newByteBuffer = buffer.duplicate();
+ newByteBuffer.position(offset + from);
+ newByteBuffer.limit(offset + from + length);
+ return new ByteBufferReference(newByteBuffer);
+ }
+
+ /**
+ * This will return a bytes ref composed of the bytes. If this is a direct byte buffer, the bytes will
+ * have to be copied.
+ *
+ * @return the bytes ref
+ */
+ @Override
+ public BytesRef toBytesRef() {
+ if (buffer.hasArray()) {
+ return new BytesRef(buffer.array(), buffer.arrayOffset() + offset, length);
+ }
+ final byte[] copy = new byte[length];
+ buffer.get(copy, offset, length);
+ return new BytesRef(copy);
+ }
+
+ @Override
+ public long ramBytesUsed() {
+ return buffer.capacity();
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java b/core/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java
index f8030296940..9eb1fa9a3f4 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java
@@ -241,6 +241,11 @@ public enum GeoShapeType {
}
return coordinates;
}
+
+ @Override
+ public String wktName() {
+ return BBOX;
+ }
},
CIRCLE("circle") {
@Override
@@ -273,11 +278,13 @@ public enum GeoShapeType {
private final String shapename;
private static Map shapeTypeMap = new HashMap<>();
+ private static final String BBOX = "BBOX";
static {
for (GeoShapeType type : values()) {
shapeTypeMap.put(type.shapename, type);
}
+ shapeTypeMap.put(ENVELOPE.wktName().toLowerCase(Locale.ROOT), ENVELOPE);
}
GeoShapeType(String shapename) {
@@ -300,6 +307,11 @@ public enum GeoShapeType {
ShapeBuilder.Orientation orientation, boolean coerce);
abstract CoordinateNode validate(CoordinateNode coordinates, boolean coerce);
+ /** wkt shape name */
+ public String wktName() {
+ return this.shapename;
+ }
+
public static List getShapeWriteables() {
List namedWriteables = new ArrayList<>();
namedWriteables.add(new Entry(ShapeBuilder.class, PointBuilder.TYPE.shapeName(), PointBuilder::new));
@@ -313,4 +325,9 @@ public enum GeoShapeType {
namedWriteables.add(new Entry(ShapeBuilder.class, GeometryCollectionBuilder.TYPE.shapeName(), GeometryCollectionBuilder::new));
return namedWriteables;
}
+
+ @Override
+ public String toString() {
+ return this.shapename;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java
index 108e66d9150..ecc33b94ae4 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java
@@ -168,6 +168,11 @@ public class CircleBuilder extends ShapeBuilder {
return TYPE;
}
+ @Override
+ public String toWKT() {
+ throw new UnsupportedOperationException("The WKT spec does not support CIRCLE geometry");
+ }
+
@Override
public int hashCode() {
return Objects.hash(center, radius, unit.ordinal());
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java
index b352aa1d924..4949c363347 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java
@@ -20,6 +20,7 @@
package org.elasticsearch.common.geo.builders;
import org.elasticsearch.common.geo.GeoShapeType;
+import org.elasticsearch.common.geo.parsers.GeoWKTParser;
import org.elasticsearch.common.geo.parsers.ShapeParser;
import org.locationtech.spatial4j.shape.Rectangle;
import com.vividsolutions.jts.geom.Coordinate;
@@ -70,6 +71,28 @@ public class EnvelopeBuilder extends ShapeBuilder {
return this.bottomRight;
}
+ @Override
+ protected StringBuilder contentToWKT() {
+ StringBuilder sb = new StringBuilder();
+
+ sb.append(GeoWKTParser.LPAREN);
+ // minX, maxX, maxY, minY
+ sb.append(topLeft.x);
+ sb.append(GeoWKTParser.COMMA);
+ sb.append(GeoWKTParser.SPACE);
+ sb.append(bottomRight.x);
+ sb.append(GeoWKTParser.COMMA);
+ sb.append(GeoWKTParser.SPACE);
+ // TODO support Z??
+ sb.append(topLeft.y);
+ sb.append(GeoWKTParser.COMMA);
+ sb.append(GeoWKTParser.SPACE);
+ sb.append(bottomRight.y);
+ sb.append(GeoWKTParser.RPAREN);
+
+ return sb;
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java
index 3ea422265a7..84052939da4 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java
@@ -21,6 +21,7 @@ package org.elasticsearch.common.geo.builders;
import org.elasticsearch.common.geo.GeoShapeType;
import org.elasticsearch.common.geo.parsers.ShapeParser;
+import org.elasticsearch.common.geo.parsers.GeoWKTParser;
import org.locationtech.spatial4j.shape.Shape;
import org.elasticsearch.ElasticsearchException;
@@ -136,6 +137,23 @@ public class GeometryCollectionBuilder extends ShapeBuilder {
return builder.endObject();
}
+ @Override
+ protected StringBuilder contentToWKT() {
+ StringBuilder sb = new StringBuilder();
+ if (shapes.isEmpty()) {
+ sb.append(GeoWKTParser.EMPTY);
+ } else {
+ sb.append(GeoWKTParser.LPAREN);
+ sb.append(shapes.get(0).toWKT());
+ for (int i = 1; i < shapes.size(); ++i) {
+ sb.append(GeoWKTParser.COMMA);
+ sb.append(shapes.get(i).toWKT());
+ }
+ sb.append(GeoWKTParser.RPAREN);
+ }
+ return sb;
+ }
+
@Override
public GeoShapeType type() {
return TYPE;
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java
index 1a4f71da2d4..34a8960f69c 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java
@@ -20,8 +20,8 @@
package org.elasticsearch.common.geo.builders;
import org.elasticsearch.common.geo.GeoShapeType;
+import org.elasticsearch.common.geo.parsers.GeoWKTParser;
import org.elasticsearch.common.geo.parsers.ShapeParser;
-import org.locationtech.spatial4j.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.geom.LineString;
@@ -82,6 +82,25 @@ public class MultiLineStringBuilder extends ShapeBuilder 0) {
+ sb.append(ShapeBuilder.coordinateListToWKT(lines.get(0).coordinates));
+ }
+ for (int i = 1; i < lines.size(); ++i) {
+ sb.append(GeoWKTParser.COMMA);
+ sb.append(ShapeBuilder.coordinateListToWKT(lines.get(i).coordinates));
+ }
+ sb.append(GeoWKTParser.RPAREN);
+ }
+ return sb;
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java
index 3c002631b8d..aa577887e00 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java
@@ -21,6 +21,7 @@ package org.elasticsearch.common.geo.builders;
import org.elasticsearch.common.geo.GeoShapeType;
import org.elasticsearch.common.geo.parsers.ShapeParser;
+import org.elasticsearch.common.geo.parsers.GeoWKTParser;
import org.locationtech.spatial4j.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate;
@@ -101,6 +102,37 @@ public class MultiPolygonBuilder extends ShapeBuilder {
return polygons;
}
+ private static String polygonCoordinatesToWKT(PolygonBuilder polygon) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(GeoWKTParser.LPAREN);
+ sb.append(ShapeBuilder.coordinateListToWKT(polygon.shell().coordinates));
+ for (LineStringBuilder hole : polygon.holes()) {
+ sb.append(GeoWKTParser.COMMA);
+ sb.append(ShapeBuilder.coordinateListToWKT(hole.coordinates));
+ }
+ sb.append(GeoWKTParser.RPAREN);
+ return sb.toString();
+ }
+
+ @Override
+ protected StringBuilder contentToWKT() {
+ final StringBuilder sb = new StringBuilder();
+ if (polygons.isEmpty()) {
+ sb.append(GeoWKTParser.EMPTY);
+ } else {
+ sb.append(GeoWKTParser.LPAREN);
+ if (polygons.size() > 0) {
+ sb.append(polygonCoordinatesToWKT(polygons.get(0)));
+ }
+ for (int i = 1; i < polygons.size(); ++i) {
+ sb.append(GeoWKTParser.COMMA);
+ sb.append(polygonCoordinatesToWKT(polygons.get(i)));
+ }
+ sb.append(GeoWKTParser.RPAREN);
+ }
+ return sb;
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
index 919aae37c73..ffcb44c9e46 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
@@ -729,6 +729,19 @@ public class PolygonBuilder extends ShapeBuilder {
}
}
+ @Override
+ protected StringBuilder contentToWKT() {
+ StringBuilder sb = new StringBuilder();
+ sb.append('(');
+ sb.append(ShapeBuilder.coordinateListToWKT(shell.coordinates));
+ for (LineStringBuilder hole : holes) {
+ sb.append(", ");
+ sb.append(ShapeBuilder.coordinateListToWKT(hole.coordinates));
+ }
+ sb.append(')');
+ return sb;
+ }
+
@Override
public int hashCode() {
return Objects.hash(shell, holes, orientation);
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
index ef50a667faa..106c312a3bc 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
@@ -27,6 +27,7 @@ import org.apache.logging.log4j.Logger;
import org.elasticsearch.Assertions;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.geo.GeoShapeType;
+import org.elasticsearch.common.geo.parsers.GeoWKTParser;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -339,6 +340,47 @@ public abstract class ShapeBuilder>
}
}
+ protected StringBuilder contentToWKT() {
+ return coordinateListToWKT(this.coordinates);
+ }
+
+ public String toWKT() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(type().wktName());
+ sb.append(GeoWKTParser.SPACE);
+ sb.append(contentToWKT());
+ return sb.toString();
+ }
+
+ protected static StringBuilder coordinateListToWKT(final List coordinates) {
+ final StringBuilder sb = new StringBuilder();
+
+ if (coordinates.isEmpty()) {
+ sb.append(GeoWKTParser.EMPTY);
+ } else {
+ // walk through coordinates:
+ sb.append(GeoWKTParser.LPAREN);
+ sb.append(coordinateToWKT(coordinates.get(0)));
+ for (int i = 1; i < coordinates.size(); ++i) {
+ sb.append(GeoWKTParser.COMMA);
+ sb.append(GeoWKTParser.SPACE);
+ sb.append(coordinateToWKT(coordinates.get(i)));
+ }
+ sb.append(GeoWKTParser.RPAREN);
+ }
+
+ return sb;
+ }
+
+ private static String coordinateToWKT(final Coordinate coordinate) {
+ final StringBuilder sb = new StringBuilder();
+ sb.append(coordinate.x + GeoWKTParser.SPACE + coordinate.y);
+ if (Double.isNaN(coordinate.z) == false) {
+ sb.append(GeoWKTParser.SPACE + coordinate.z);
+ }
+ return sb.toString();
+ }
+
protected static final IntersectionOrder INTERSECTION_ORDER = new IntersectionOrder();
private static final class IntersectionOrder implements Comparator {
diff --git a/core/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java b/core/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java
new file mode 100644
index 00000000000..005caed53a7
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java
@@ -0,0 +1,321 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.geo.parsers;
+
+import com.vividsolutions.jts.geom.Coordinate;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.geo.GeoShapeType;
+
+import org.elasticsearch.common.geo.builders.CoordinatesBuilder;
+import org.elasticsearch.common.geo.builders.EnvelopeBuilder;
+import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder;
+import org.elasticsearch.common.geo.builders.LineStringBuilder;
+import org.elasticsearch.common.geo.builders.MultiLineStringBuilder;
+import org.elasticsearch.common.geo.builders.MultiPointBuilder;
+import org.elasticsearch.common.geo.builders.MultiPolygonBuilder;
+import org.elasticsearch.common.geo.builders.PointBuilder;
+import org.elasticsearch.common.geo.builders.PolygonBuilder;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.io.FastStringReader;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.io.StreamTokenizer;
+import java.util.List;
+
+/**
+ * Parses shape geometry represented in WKT format
+ *
+ * complies with OGC® document: 12-063r5 and ISO/IEC 13249-3:2016 standard
+ * located at http://docs.opengeospatial.org/is/12-063r5/12-063r5.html
+ */
+public class GeoWKTParser {
+ public static final String EMPTY = "EMPTY";
+ public static final String SPACE = Loggers.SPACE;
+ public static final String LPAREN = "(";
+ public static final String RPAREN = ")";
+ public static final String COMMA = ",";
+ private static final String NAN = "NaN";
+
+ private static final String NUMBER = "";
+ private static final String EOF = "END-OF-STREAM";
+ private static final String EOL = "END-OF-LINE";
+
+ // no instance
+ private GeoWKTParser() {}
+
+ public static ShapeBuilder parse(XContentParser parser)
+ throws IOException, ElasticsearchParseException {
+ FastStringReader reader = new FastStringReader(parser.text());
+ try {
+ // setup the tokenizer; configured to read words w/o numbers
+ StreamTokenizer tokenizer = new StreamTokenizer(reader);
+ tokenizer.resetSyntax();
+ tokenizer.wordChars('a', 'z');
+ tokenizer.wordChars('A', 'Z');
+ tokenizer.wordChars(128 + 32, 255);
+ tokenizer.wordChars('0', '9');
+ tokenizer.wordChars('-', '-');
+ tokenizer.wordChars('+', '+');
+ tokenizer.wordChars('.', '.');
+ tokenizer.whitespaceChars(0, ' ');
+ tokenizer.commentChar('#');
+ ShapeBuilder builder = parseGeometry(tokenizer);
+ checkEOF(tokenizer);
+ return builder;
+ } finally {
+ reader.close();
+ }
+ }
+
+ /** parse geometry from the stream tokenizer */
+ private static ShapeBuilder parseGeometry(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
+ final GeoShapeType type = GeoShapeType.forName(nextWord(stream));
+ switch (type) {
+ case POINT:
+ return parsePoint(stream);
+ case MULTIPOINT:
+ return parseMultiPoint(stream);
+ case LINESTRING:
+ return parseLine(stream);
+ case MULTILINESTRING:
+ return parseMultiLine(stream);
+ case POLYGON:
+ return parsePolygon(stream);
+ case MULTIPOLYGON:
+ return parseMultiPolygon(stream);
+ case ENVELOPE:
+ return parseBBox(stream);
+ case GEOMETRYCOLLECTION:
+ return parseGeometryCollection(stream);
+ default:
+ throw new IllegalArgumentException("Unknown geometry type: " + type);
+ }
+ }
+
+ private static EnvelopeBuilder parseBBox(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
+ if (nextEmptyOrOpen(stream).equals(EMPTY)) {
+ return null;
+ }
+ double minLon = nextNumber(stream);
+ nextComma(stream);
+ double maxLon = nextNumber(stream);
+ nextComma(stream);
+ double maxLat = nextNumber(stream);
+ nextComma(stream);
+ double minLat = nextNumber(stream);
+ nextCloser(stream);
+ return new EnvelopeBuilder(new Coordinate(minLon, maxLat), new Coordinate(maxLon, minLat));
+ }
+
+ private static PointBuilder parsePoint(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
+ if (nextEmptyOrOpen(stream).equals(EMPTY)) {
+ return null;
+ }
+ PointBuilder pt = new PointBuilder(nextNumber(stream), nextNumber(stream));
+ if (isNumberNext(stream) == true) {
+ nextNumber(stream);
+ }
+ nextCloser(stream);
+ return pt;
+ }
+
+ private static List parseCoordinateList(StreamTokenizer stream)
+ throws IOException, ElasticsearchParseException {
+ CoordinatesBuilder coordinates = new CoordinatesBuilder();
+ boolean isOpenParen = false;
+ if (isNumberNext(stream) || (isOpenParen = nextWord(stream).equals(LPAREN))) {
+ coordinates.coordinate(parseCoordinate(stream));
+ }
+
+ if (isOpenParen && nextCloser(stream).equals(RPAREN) == false) {
+ throw new ElasticsearchParseException("expected: [{}]" + RPAREN + " but found: [{}]" + tokenString(stream), stream.lineno());
+ }
+
+ while (nextCloserOrComma(stream).equals(COMMA)) {
+ isOpenParen = false;
+ if (isNumberNext(stream) || (isOpenParen = nextWord(stream).equals(LPAREN))) {
+ coordinates.coordinate(parseCoordinate(stream));
+ }
+ if (isOpenParen && nextCloser(stream).equals(RPAREN) == false) {
+ throw new ElasticsearchParseException("expected: " + RPAREN + " but found: " + tokenString(stream), stream.lineno());
+ }
+ }
+ return coordinates.build();
+ }
+
+ private static Coordinate parseCoordinate(StreamTokenizer stream)
+ throws IOException, ElasticsearchParseException {
+ final double lon = nextNumber(stream);
+ final double lat = nextNumber(stream);
+ Double z = null;
+ if (isNumberNext(stream)) {
+ z = nextNumber(stream);
+ }
+ return z == null ? new Coordinate(lon, lat) : new Coordinate(lon, lat, z);
+ }
+
+ private static MultiPointBuilder parseMultiPoint(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
+ String token = nextEmptyOrOpen(stream);
+ if (token.equals(EMPTY)) {
+ return null;
+ }
+ return new MultiPointBuilder(parseCoordinateList(stream));
+ }
+
+ private static LineStringBuilder parseLine(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
+ String token = nextEmptyOrOpen(stream);
+ if (token.equals(EMPTY)) {
+ return null;
+ }
+ return new LineStringBuilder(parseCoordinateList(stream));
+ }
+
+ private static MultiLineStringBuilder parseMultiLine(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
+ String token = nextEmptyOrOpen(stream);
+ if (token.equals(EMPTY)) {
+ return null;
+ }
+ MultiLineStringBuilder builder = new MultiLineStringBuilder();
+ builder.linestring(parseLine(stream));
+ while (nextCloserOrComma(stream).equals(COMMA)) {
+ builder.linestring(parseLine(stream));
+ }
+ return builder;
+ }
+
+ private static PolygonBuilder parsePolygon(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
+ if (nextEmptyOrOpen(stream).equals(EMPTY)) {
+ return null;
+ }
+ PolygonBuilder builder = new PolygonBuilder(parseLine(stream), ShapeBuilder.Orientation.RIGHT);
+ while (nextCloserOrComma(stream).equals(COMMA)) {
+ builder.hole(parseLine(stream));
+ }
+ return builder;
+ }
+
+ private static MultiPolygonBuilder parseMultiPolygon(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
+ if (nextEmptyOrOpen(stream).equals(EMPTY)) {
+ return null;
+ }
+ MultiPolygonBuilder builder = new MultiPolygonBuilder().polygon(parsePolygon(stream));
+ while (nextCloserOrComma(stream).equals(COMMA)) {
+ builder.polygon(parsePolygon(stream));
+ }
+ return builder;
+ }
+
+ private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer stream)
+ throws IOException, ElasticsearchParseException {
+ if (nextEmptyOrOpen(stream).equals(EMPTY)) {
+ return null;
+ }
+ GeometryCollectionBuilder builder = new GeometryCollectionBuilder().shape(parseGeometry(stream));
+ while (nextCloserOrComma(stream).equals(COMMA)) {
+ builder.shape(parseGeometry(stream));
+ }
+ return builder;
+ }
+
+ /** next word in the stream */
+ private static String nextWord(StreamTokenizer stream) throws ElasticsearchParseException, IOException {
+ switch (stream.nextToken()) {
+ case StreamTokenizer.TT_WORD:
+ final String word = stream.sval;
+ return word.equalsIgnoreCase(EMPTY) ? EMPTY : word;
+ case '(': return LPAREN;
+ case ')': return RPAREN;
+ case ',': return COMMA;
+ }
+ throw new ElasticsearchParseException("expected word but found: " + tokenString(stream), stream.lineno());
+ }
+
+ private static double nextNumber(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
+ if (stream.nextToken() == StreamTokenizer.TT_WORD) {
+ if (stream.sval.equalsIgnoreCase(NAN)) {
+ return Double.NaN;
+ } else {
+ try {
+ return Double.parseDouble(stream.sval);
+ } catch (NumberFormatException e) {
+ throw new ElasticsearchParseException("invalid number found: " + stream.sval, stream.lineno());
+ }
+ }
+ }
+ throw new ElasticsearchParseException("expected number but found: " + tokenString(stream), stream.lineno());
+ }
+
+ private static String tokenString(StreamTokenizer stream) {
+ switch (stream.ttype) {
+ case StreamTokenizer.TT_WORD: return stream.sval;
+ case StreamTokenizer.TT_EOF: return EOF;
+ case StreamTokenizer.TT_EOL: return EOL;
+ case StreamTokenizer.TT_NUMBER: return NUMBER;
+ }
+ return "'" + (char) stream.ttype + "'";
+ }
+
+ private static boolean isNumberNext(StreamTokenizer stream) throws IOException {
+ final int type = stream.nextToken();
+ stream.pushBack();
+ return type == StreamTokenizer.TT_WORD;
+ }
+
+ private static String nextEmptyOrOpen(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
+ final String next = nextWord(stream);
+ if (next.equals(EMPTY) || next.equals(LPAREN)) {
+ return next;
+ }
+ throw new ElasticsearchParseException("expected " + EMPTY + " or " + LPAREN
+ + " but found: " + tokenString(stream), stream.lineno());
+ }
+
+ private static String nextCloser(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
+ if (nextWord(stream).equals(RPAREN)) {
+ return RPAREN;
+ }
+ throw new ElasticsearchParseException("expected " + RPAREN + " but found: " + tokenString(stream), stream.lineno());
+ }
+
+ private static String nextComma(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
+ if (nextWord(stream).equals(COMMA) == true) {
+ return COMMA;
+ }
+ throw new ElasticsearchParseException("expected " + COMMA + " but found: " + tokenString(stream), stream.lineno());
+ }
+
+ private static String nextCloserOrComma(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
+ String token = nextWord(stream);
+ if (token.equals(COMMA) || token.equals(RPAREN)) {
+ return token;
+ }
+ throw new ElasticsearchParseException("expected " + COMMA + " or " + RPAREN
+ + " but found: " + tokenString(stream), stream.lineno());
+ }
+
+ /** next word in the stream */
+ private static void checkEOF(StreamTokenizer stream) throws ElasticsearchParseException, IOException {
+ if (stream.nextToken() != StreamTokenizer.TT_EOF) {
+ throw new ElasticsearchParseException("expected end of WKT string but found additional text: "
+ + tokenString(stream), stream.lineno());
+ }
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java b/core/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java
index 39540f902fe..0ee3333c480 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java
@@ -51,6 +51,8 @@ public interface ShapeParser {
return null;
} if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
return GeoJsonParser.parse(parser, shapeMapper);
+ } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
+ return GeoWKTParser.parse(parser);
}
throw new ElasticsearchParseException("shape must be an object consisting of type and coordinates");
}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java
index 38eaef1d14d..f952eb36a0d 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java
@@ -500,6 +500,16 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
return updateSettings(toApply, target, updates, type, false);
}
+ /**
+ * Returns true if the given key is a valid delete key
+ */
+ private boolean isValidDelete(String key, boolean onlyDynamic) {
+ return isFinalSetting(key) == false && // it's not a final setting
+ (onlyDynamic && isDynamicSetting(key) // it's a dynamicSetting and we only do dynamic settings
+ || get(key) == null && key.startsWith(ARCHIVED_SETTINGS_PREFIX) // the setting is not registered AND it's been archived
+ || (onlyDynamic == false && get(key) != null)); // if it's not dynamic AND we have a key
+ }
+
/**
* Updates a target settings builder with new, updated or deleted settings from a given settings builder.
*
@@ -519,21 +529,16 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
final Predicate canUpdate = (key) -> (
isFinalSetting(key) == false && // it's not a final setting
((onlyDynamic == false && get(key) != null) || isDynamicSetting(key)));
- final Predicate canRemove = (key) ->(// we can delete if
- isFinalSetting(key) == false && // it's not a final setting
- (onlyDynamic && isDynamicSetting(key) // it's a dynamicSetting and we only do dynamic settings
- || get(key) == null && key.startsWith(ARCHIVED_SETTINGS_PREFIX) // the setting is not registered AND it's been archived
- || (onlyDynamic == false && get(key) != null))); // if it's not dynamic AND we have a key
for (String key : toApply.keySet()) {
- boolean isNull = toApply.get(key) == null;
- if (isNull && (canRemove.test(key) || key.endsWith("*"))) {
+ boolean isDelete = toApply.hasValue(key) == false;
+ if (isDelete && (isValidDelete(key, onlyDynamic) || key.endsWith("*"))) {
// this either accepts null values that suffice the canUpdate test OR wildcard expressions (key ends with *)
// we don't validate if there is any dynamic setting with that prefix yet we could do in the future
toRemove.add(key);
// we don't set changed here it's set after we apply deletes below if something actually changed
} else if (get(key) == null) {
throw new IllegalArgumentException(type + " setting [" + key + "], not recognized");
- } else if (isNull == false && canUpdate.test(key)) {
+ } else if (isDelete == false && canUpdate.test(key)) {
validate(key, toApply, false); // we might not have a full picture here do to a dependency validation
settingsBuilder.copy(key, toApply);
updates.copy(key, toApply);
@@ -546,7 +551,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
}
}
}
- changed |= applyDeletes(toRemove, target, canRemove);
+ changed |= applyDeletes(toRemove, target, k -> isValidDelete(k, onlyDynamic));
target.put(settingsBuilder.build());
return changed;
}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
index 2bea2a59e16..ae28b42cf16 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
@@ -85,6 +85,7 @@ import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.SearchService;
+import org.elasticsearch.search.aggregations.MultiBucketConsumerService;
import org.elasticsearch.search.fetch.subphase.highlight.FastVectorHighlighter;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.RemoteClusterAware;
@@ -360,6 +361,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
SearchService.DEFAULT_KEEPALIVE_SETTING,
SearchService.KEEPALIVE_INTERVAL_SETTING,
SearchService.MAX_KEEPALIVE_SETTING,
+ MultiBucketConsumerService.MAX_BUCKET_SETTING,
SearchService.LOW_LEVEL_CANCELLATION_SETTING,
Node.WRITE_PORTS_FILE_SETTING,
Node.NODE_NAME_SETTING,
diff --git a/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java
index 441bb131f03..6ebc47c8252 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java
@@ -49,6 +49,7 @@ import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
+import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.lucene.codecs.CodecUtil;
@@ -59,7 +60,6 @@ import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.SetOnce;
-import org.elasticsearch.bootstrap.BootstrapSettings;
import org.elasticsearch.cli.ExitCodes;
import org.elasticsearch.cli.UserException;
import org.elasticsearch.common.Randomness;
@@ -75,6 +75,11 @@ import org.elasticsearch.common.Randomness;
*/
public class KeyStoreWrapper implements SecureSettings {
+ /**
+ * A regex for the valid characters that a setting name in the keystore may use.
+ */
+ private static final Pattern ALLOWED_SETTING_NAME = Pattern.compile("[a-z0-9_\\-.]+");
+
public static final Setting SEED_SETTING = SecureSetting.secureString("keystore.seed", null);
/** Characters that may be used in the bootstrap seed setting added to all keystores. */
@@ -383,6 +388,18 @@ public class KeyStoreWrapper implements SecureSettings {
return Base64.getDecoder().wrap(bytesStream);
}
+ /**
+ * Ensure the given setting name is allowed.
+ *
+ * @throws IllegalArgumentException if the setting name is not valid
+ */
+ public static void validateSettingName(String setting) {
+ if (ALLOWED_SETTING_NAME.matcher(setting).matches() == false) {
+ throw new IllegalArgumentException("Setting name [" + setting + "] does not match the allowed setting name pattern ["
+ + ALLOWED_SETTING_NAME.pattern() + "]");
+ }
+ }
+
/**
* Set a string setting.
*
@@ -390,6 +407,7 @@ public class KeyStoreWrapper implements SecureSettings {
*/
void setString(String setting, char[] value) throws GeneralSecurityException {
assert isLoaded();
+ validateSettingName(setting);
if (ASCII_ENCODER.canEncode(CharBuffer.wrap(value)) == false) {
throw new IllegalArgumentException("Value must be ascii");
}
@@ -401,6 +419,7 @@ public class KeyStoreWrapper implements SecureSettings {
/** Set a file setting. */
void setFile(String setting, byte[] bytes) throws GeneralSecurityException {
assert isLoaded();
+ validateSettingName(setting);
bytes = Base64.getEncoder().encode(bytes);
char[] chars = new char[bytes.length];
for (int i = 0; i < chars.length; ++i) {
diff --git a/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java b/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java
index 4a1e598bba8..c23a0bd42e3 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java
@@ -46,6 +46,7 @@ public abstract class SecureSetting extends Setting {
private SecureSetting(String key, Property... properties) {
super(key, (String)null, null, ArrayUtils.concat(properties, FIXED_PROPERTIES, Property.class));
assert assertAllowedProperties(properties);
+ KeyStoreWrapper.validateSettingName(key);
}
private boolean assertAllowedProperties(Setting.Property... properties) {
diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java
index 41acefdd8e8..0a0a01c3fe3 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java
@@ -306,6 +306,13 @@ public final class Settings implements ToXContentFragment {
}
}
+ /**
+ * Returns true iff the given key has a value in this settings object
+ */
+ public boolean hasValue(String key) {
+ return settings.get(key) != null;
+ }
+
/**
* We have to lazy initialize the deprecation logger as otherwise a static logger here would be constructed before logging is configured
* leading to a runtime failure (see {@link LogConfigurator#checkErrorListener()} ). The premature construction would come from any
@@ -617,7 +624,7 @@ public final class Settings implements ToXContentFragment {
}
/**
- * Parsers the generated xconten from {@link Settings#toXContent(XContentBuilder, Params)} into a new Settings object.
+ * Parsers the generated xcontent from {@link Settings#toXContent(XContentBuilder, Params)} into a new Settings object.
* Note this method requires the parser to either be positioned on a null token or on
* {@link org.elasticsearch.common.xcontent.XContentParser.Token#START_OBJECT}.
*/
@@ -1229,8 +1236,9 @@ public final class Settings implements ToXContentFragment {
Iterator> iterator = map.entrySet().iterator();
while(iterator.hasNext()) {
Map.Entry entry = iterator.next();
- if (entry.getKey().startsWith(prefix) == false) {
- replacements.put(prefix + entry.getKey(), entry.getValue());
+ String key = entry.getKey();
+ if (key.startsWith(prefix) == false && key.endsWith("*") == false) {
+ replacements.put(prefix + key, entry.getValue());
iterator.remove();
}
}
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java
index 45d9a208284..057a970470b 100644
--- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java
@@ -246,13 +246,16 @@ public class EsExecutors {
* waiting if necessary for space to become available.
*/
static class ForceQueuePolicy implements XRejectedExecutionHandler {
+
@Override
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
try {
+ // force queue policy should only be used with a scaling queue
+ assert executor.getQueue() instanceof ExecutorScalingQueue;
executor.getQueue().put(r);
- } catch (InterruptedException e) {
- //should never happen since we never wait
- throw new EsRejectedExecutionException(e);
+ } catch (final InterruptedException e) {
+ // a scaling queue never blocks so a put to it can never be interrupted
+ throw new AssertionError(e);
}
}
@@ -260,6 +263,7 @@ public class EsExecutors {
public long rejected() {
return 0;
}
+
}
}
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java
index 01fbbac725b..a38bbf452b7 100644
--- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java
@@ -27,29 +27,20 @@ import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
public class EsRejectedExecutionException extends ElasticsearchException {
+
private final boolean isExecutorShutdown;
- public EsRejectedExecutionException(String message, boolean isExecutorShutdown, Object... args) {
- super(message, args);
+ public EsRejectedExecutionException(String message, boolean isExecutorShutdown) {
+ super(message);
this.isExecutorShutdown = isExecutorShutdown;
}
- public EsRejectedExecutionException(String message, Object... args) {
- this(message, false, args);
- }
-
- public EsRejectedExecutionException(String message, boolean isExecutorShutdown) {
- this(message, isExecutorShutdown, new Object[0]);
+ public EsRejectedExecutionException(String message) {
+ this(message, false);
}
public EsRejectedExecutionException() {
- super((String)null);
- this.isExecutorShutdown = false;
- }
-
- public EsRejectedExecutionException(Throwable e) {
- super(null, e);
- this.isExecutorShutdown = false;
+ this(null, false);
}
@Override
@@ -79,4 +70,5 @@ public class EsRejectedExecutionException extends ElasticsearchException {
public boolean isExecutorShutdown() {
return isExecutorShutdown;
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java
index a1ac182b8dc..8bbf0a59ee0 100644
--- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java
@@ -37,7 +37,11 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor {
/**
* Name used in error reporting.
*/
- protected final String name;
+ private final String name;
+
+ final String getName() {
+ return name;
+ }
EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
BlockingQueue workQueue, ThreadFactory threadFactory, ThreadContext contextHolder) {
@@ -138,15 +142,16 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor {
}
@Override
- public String toString() {
+ public final String toString() {
StringBuilder b = new StringBuilder();
b.append(getClass().getSimpleName()).append('[');
- b.append(name).append(", ");
+ b.append("name = ").append(name).append(", ");
if (getQueue() instanceof SizeBlockingQueue) {
@SuppressWarnings("rawtypes")
SizeBlockingQueue queue = (SizeBlockingQueue) getQueue();
b.append("queue capacity = ").append(queue.capacity()).append(", ");
}
+ appendThreadPoolExecutorDetails(b);
/*
* ThreadPoolExecutor has some nice information in its toString but we
* can't get at it easily without just getting the toString.
@@ -155,6 +160,16 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor {
return b.toString();
}
+ /**
+ * Append details about this thread pool to the specified {@link StringBuilder}. All details should be appended as key/value pairs in
+ * the form "%s = %s, "
+ *
+ * @param sb the {@link StringBuilder} to append to
+ */
+ protected void appendThreadPoolExecutorDetails(final StringBuilder sb) {
+
+ }
+
protected Runnable wrapRunnable(Runnable command) {
return contextHolder.preserveContext(command);
}
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java
index 8062d5510c7..e929192b5dd 100644
--- a/core/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java
@@ -22,21 +22,16 @@ package org.elasticsearch.common.util.concurrent;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.common.ExponentiallyWeightedMovingAverage;
-import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.common.util.concurrent.ResizableBlockingQueue;
import java.util.Locale;
-import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
import java.util.function.Supplier;
-import java.util.stream.Stream;
/**
* An extension to thread pool executor, which automatically adjusts the queue size of the
@@ -80,8 +75,8 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
this.maxQueueSize = maxQueueSize;
this.targetedResponseTimeNanos = targetedResponseTime.getNanos();
this.executionEWMA = new ExponentiallyWeightedMovingAverage(EWMA_ALPHA, 0);
- logger.debug("thread pool [{}] will adjust queue by [{}] when determining automatic queue size",
- name, QUEUE_ADJUSTMENT_AMOUNT);
+ logger.debug(
+ "thread pool [{}] will adjust queue by [{}] when determining automatic queue size", getName(), QUEUE_ADJUSTMENT_AMOUNT);
}
@Override
@@ -180,7 +175,7 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
final long avgTaskTime = totalNanos / tasksPerFrame;
logger.debug("[{}]: there were [{}] tasks in [{}], avg task time [{}], EWMA task execution [{}], " +
"[{} tasks/s], optimal queue is [{}], current capacity [{}]",
- name,
+ getName(),
tasksPerFrame,
TimeValue.timeValueNanos(totalRuntime),
TimeValue.timeValueNanos(avgTaskTime),
@@ -196,7 +191,7 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
final int newCapacity =
workQueue.adjustCapacity(desiredQueueSize, QUEUE_ADJUSTMENT_AMOUNT, minQueueSize, maxQueueSize);
if (oldCapacity != newCapacity && logger.isDebugEnabled()) {
- logger.debug("adjusted [{}] queue size by [{}], old capacity: [{}], new capacity: [{}]", name,
+ logger.debug("adjusted [{}] queue size by [{}], old capacity: [{}], new capacity: [{}]", getName(),
newCapacity > oldCapacity ? QUEUE_ADJUSTMENT_AMOUNT : -QUEUE_ADJUSTMENT_AMOUNT,
oldCapacity, newCapacity);
}
@@ -205,7 +200,7 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
logger.warn((Supplier>) () -> new ParameterizedMessage(
"failed to calculate optimal queue size for [{}] thread pool, " +
"total frame time [{}ns], tasks [{}], task execution time [{}ns]",
- name, totalRuntime, tasksPerFrame, totalNanos),
+ getName(), totalRuntime, tasksPerFrame, totalNanos),
e);
} finally {
// Finally, decrement the task count and time back to their starting values. We
@@ -224,7 +219,8 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
// - Adjustment happens and we decrement the tasks by 10, taskCount is now 15
// - Since taskCount will now be incremented forever, it will never be 10 again,
// so there will be no further adjustments
- logger.debug("[{}]: too many incoming tasks while queue size adjustment occurs, resetting measurements to 0", name);
+ logger.debug(
+ "[{}]: too many incoming tasks while queue size adjustment occurs, resetting measurements to 0", getName());
totalTaskNanos.getAndSet(1);
taskCount.getAndSet(0);
startNs = System.nanoTime();
@@ -237,26 +233,13 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
}
@Override
- public String toString() {
- StringBuilder b = new StringBuilder();
- b.append(getClass().getSimpleName()).append('[');
- b.append(name).append(", ");
-
- @SuppressWarnings("rawtypes")
- ResizableBlockingQueue queue = (ResizableBlockingQueue) getQueue();
-
- b.append("queue capacity = ").append(getCurrentCapacity()).append(", ");
- b.append("min queue capacity = ").append(minQueueSize).append(", ");
- b.append("max queue capacity = ").append(maxQueueSize).append(", ");
- b.append("frame size = ").append(tasksPerFrame).append(", ");
- b.append("targeted response rate = ").append(TimeValue.timeValueNanos(targetedResponseTimeNanos)).append(", ");
- b.append("task execution EWMA = ").append(TimeValue.timeValueNanos((long)executionEWMA.getAverage())).append(", ");
- b.append("adjustment amount = ").append(QUEUE_ADJUSTMENT_AMOUNT).append(", ");
- /*
- * ThreadPoolExecutor has some nice information in its toString but we
- * can't get at it easily without just getting the toString.
- */
- b.append(super.toString()).append(']');
- return b.toString();
+ protected void appendThreadPoolExecutorDetails(StringBuilder sb) {
+ sb.append("min queue capacity = ").append(minQueueSize).append(", ");
+ sb.append("max queue capacity = ").append(maxQueueSize).append(", ");
+ sb.append("frame size = ").append(tasksPerFrame).append(", ");
+ sb.append("targeted response rate = ").append(TimeValue.timeValueNanos(targetedResponseTimeNanos)).append(", ");
+ sb.append("task execution EWMA = ").append(TimeValue.timeValueNanos((long) executionEWMA.getAverage())).append(", ");
+ sb.append("adjustment amount = ").append(QUEUE_ADJUSTMENT_AMOUNT).append(", ");
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java
index 95c08e88898..6427368c4b9 100644
--- a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java
@@ -74,7 +74,6 @@ public final class ThreadContext implements Closeable, Writeable {
private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct();
private final Map defaultHeader;
private final ContextThreadLocal threadLocal;
- private boolean isSystemContext;
/**
* Creates a new ThreadContext instance
@@ -121,7 +120,6 @@ public final class ThreadContext implements Closeable, Writeable {
return () -> threadLocal.set(context);
}
-
/**
* Just like {@link #stashContext()} but no default context is set.
* @param preserveResponseHeaders if set to true the response headers of the restore thread will be preserved.
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java
index 06269706e0d..54cdb7caeaa 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java
@@ -167,12 +167,13 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[unicast_connect]");
unicastZenPingExecutorService = EsExecutors.newScaling(
- "unicast_connect",
- 0, concurrentConnects,
- 60,
- TimeUnit.SECONDS,
- threadFactory,
- threadPool.getThreadContext());
+ nodeName() + "/" + "unicast_connect",
+ 0,
+ concurrentConnects,
+ 60,
+ TimeUnit.SECONDS,
+ threadFactory,
+ threadPool.getThreadContext());
}
/**
diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java
index f923abc1a6c..8c134b140bd 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java
+++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java
@@ -69,7 +69,9 @@ public final class EngineConfig {
private final QueryCache queryCache;
private final QueryCachingPolicy queryCachingPolicy;
@Nullable
- private final List refreshListeners;
+ private final List externalRefreshListener;
+ @Nullable
+ private final List internalRefreshListener;
@Nullable
private final Sort indexSort;
private final boolean forceNewHistoryUUID;
@@ -120,7 +122,8 @@ public final class EngineConfig {
Similarity similarity, CodecService codecService, Engine.EventListener eventListener,
QueryCache queryCache, QueryCachingPolicy queryCachingPolicy,
boolean forceNewHistoryUUID, TranslogConfig translogConfig, TimeValue flushMergesAfter,
- List refreshListeners, Sort indexSort,
+ List externalRefreshListener,
+ List internalRefreshListener, Sort indexSort,
TranslogRecoveryRunner translogRecoveryRunner, CircuitBreakerService circuitBreakerService) {
if (openMode == null) {
throw new IllegalArgumentException("openMode must not be null");
@@ -147,7 +150,8 @@ public final class EngineConfig {
this.flushMergesAfter = flushMergesAfter;
this.openMode = openMode;
this.forceNewHistoryUUID = forceNewHistoryUUID;
- this.refreshListeners = refreshListeners;
+ this.externalRefreshListener = externalRefreshListener;
+ this.internalRefreshListener = internalRefreshListener;
this.indexSort = indexSort;
this.translogRecoveryRunner = translogRecoveryRunner;
this.circuitBreakerService = circuitBreakerService;
@@ -343,12 +347,18 @@ public final class EngineConfig {
}
/**
- * The refresh listeners to add to Lucene
+ * The refresh listeners to add to Lucene for externally visible refreshes
*/
- public List getRefreshListeners() {
- return refreshListeners;
+ public List getExternalRefreshListener() {
+ return externalRefreshListener;
}
+ /**
+ * The refresh listeners to add to Lucene for internally visible refreshes. These listeners will also be invoked on external refreshes
+ */
+ public List getInternalRefreshListener() { return internalRefreshListener;}
+
+
/**
* returns true if the engine is allowed to optimize indexing operations with an auto-generated ID
*/
diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
index e431bfb7a5b..53747b063df 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
+++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
@@ -232,9 +232,12 @@ public class InternalEngine extends Engine {
assert pendingTranslogRecovery.get() == false : "translog recovery can't be pending before we set it";
// don't allow commits until we are done with recovering
pendingTranslogRecovery.set(openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG);
- for (ReferenceManager.RefreshListener listener: engineConfig.getRefreshListeners()) {
+ for (ReferenceManager.RefreshListener listener: engineConfig.getExternalRefreshListener()) {
this.externalSearcherManager.addListener(listener);
}
+ for (ReferenceManager.RefreshListener listener: engineConfig.getInternalRefreshListener()) {
+ this.internalSearcherManager.addListener(listener);
+ }
success = true;
} finally {
if (success == false) {
@@ -426,11 +429,6 @@ public class InternalEngine extends Engine {
} else if (translog.isCurrent(translogGeneration) == false) {
commitIndexWriter(indexWriter, translog, lastCommittedSegmentInfos.getUserData().get(Engine.SYNC_COMMIT_ID));
refreshLastCommittedSegmentInfos();
- } else if (lastCommittedSegmentInfos.getUserData().containsKey(HISTORY_UUID_KEY) == false) {
- assert historyUUID != null;
- // put the history uuid into the index
- commitIndexWriter(indexWriter, translog, lastCommittedSegmentInfos.getUserData().get(Engine.SYNC_COMMIT_ID));
- refreshLastCommittedSegmentInfos();
}
// clean up what's not needed
translog.trimUnreferencedReaders();
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java
index db40fb228bc..68d6ac66678 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java
@@ -369,7 +369,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
public void setStrategyName(String strategyName) {
checkIfFrozen();
this.strategyName = strategyName;
- if (this.strategyName.equals(SpatialStrategy.TERM)) {
+ if (this.strategyName.equals(SpatialStrategy.TERM.getStrategyName())) {
this.pointsOnly = true;
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
index b21f47d8feb..e34a762f527 100755
--- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
@@ -105,6 +105,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT,
Property.Dynamic, Property.IndexScope, Property.Deprecated);
+ //TODO this needs to be cleaned up: _timestamp and _ttl are not supported anymore, _field_names, _seq_no, _version and _source are
+ //also missing, not sure if on purpose. See IndicesModule#getMetadataMappers
private static ObjectHashSet META_FIELDS = ObjectHashSet.from(
"_uid", "_id", "_type", "_parent", "_routing", "_index",
"_size", "_timestamp", "_ttl"
diff --git a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java
index 2c60ebfac6b..95e3505e746 100644
--- a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java
+++ b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java
@@ -39,10 +39,13 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardClosedException;
import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
+import java.io.IOException;
+
/**
* Background global checkpoint sync action initiated when a shard goes inactive. This is needed because while we send the global checkpoint
* on every replication operation, after the last operation completes the global checkpoint could advance but without a follow-up operation
@@ -116,16 +119,24 @@ public class GlobalCheckpointSyncAction extends TransportReplicationAction<
@Override
protected PrimaryResult shardOperationOnPrimary(
final Request request, final IndexShard indexShard) throws Exception {
- indexShard.getTranslog().sync();
+ maybeSyncTranslog(indexShard);
return new PrimaryResult<>(request, new ReplicationResponse());
}
@Override
protected ReplicaResult shardOperationOnReplica(final Request request, final IndexShard indexShard) throws Exception {
- indexShard.getTranslog().sync();
+ maybeSyncTranslog(indexShard);
return new ReplicaResult();
}
+ private void maybeSyncTranslog(final IndexShard indexShard) throws IOException {
+ final Translog translog = indexShard.getTranslog();
+ if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST &&
+ translog.getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) {
+ indexShard.getTranslog().sync();
+ }
+ }
+
public static final class Request extends ReplicationRequest {
private Request() {
diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
index 1dc28915d09..f0246060acf 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
@@ -48,7 +48,6 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
-import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
@@ -66,7 +65,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.AsyncIOProcessor;
-import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexModule;
@@ -416,12 +414,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
logger.debug("failed to refresh due to move to cluster wide started", e);
}
- if (newRouting.primary()) {
- final DiscoveryNode recoverySourceNode = recoveryState.getSourceNode();
- if (currentRouting.isRelocationTarget() == false || recoverySourceNode.getVersion().before(Version.V_6_0_0_alpha1)) {
- // there was no primary context hand-off in < 6.0.0, need to manually activate the shard
- getEngine().seqNoService().activatePrimaryMode(getEngine().seqNoService().getLocalCheckpoint());
- }
+ if (newRouting.primary() && currentRouting.isRelocationTarget() == false) {
+ // there was no primary context hand-off in < 6.0.0, need to manually activate the shard
+ getEngine().seqNoService().activatePrimaryMode(getEngine().seqNoService().getLocalCheckpoint());
}
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
@@ -485,15 +480,18 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* subsequently fails before the primary/replica re-sync completes successfully and we are now being
* promoted, the local checkpoint tracker here could be left in a state where it would re-issue sequence
* numbers. To ensure that this is not the case, we restore the state of the local checkpoint tracker by
- * replaying the translog and marking any operations there are completed. Rolling the translog generation is
- * not strictly needed here (as we will never have collisions between sequence numbers in a translog
- * generation in a new primary as it takes the last known sequence number as a starting point), but it
- * simplifies reasoning about the relationship between primary terms and translog generations.
+ * replaying the translog and marking any operations there are completed.
*/
- getEngine().rollTranslogGeneration();
- getEngine().restoreLocalCheckpointFromTranslog();
- getEngine().fillSeqNoGaps(newPrimaryTerm);
- getEngine().seqNoService().updateLocalCheckpointForShard(currentRouting.allocationId().getId(),
+ final Engine engine = getEngine();
+ engine.restoreLocalCheckpointFromTranslog();
+ /* Rolling the translog generation is not strictly needed here (as we will never have collisions between
+ * sequence numbers in a translog generation in a new primary as it takes the last known sequence number
+ * as a starting point), but it simplifies reasoning about the relationship between primary terms and
+ * translog generations.
+ */
+ engine.rollTranslogGeneration();
+ engine.fillSeqNoGaps(newPrimaryTerm);
+ engine.seqNoService().updateLocalCheckpointForShard(currentRouting.allocationId().getId(),
getEngine().seqNoService().getLocalCheckpoint());
primaryReplicaSyncer.accept(this, new ActionListener() {
@Override
@@ -1337,6 +1335,17 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
active.set(true);
newEngine.recoverFromTranslog();
}
+ assertSequenceNumbersInCommit();
+ }
+
+ private boolean assertSequenceNumbersInCommit() throws IOException {
+ final Map userData = SegmentInfos.readLatestCommit(store.directory()).getUserData();
+ assert userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) : "commit point doesn't contains a local checkpoint";
+ assert userData.containsKey(SequenceNumbers.MAX_SEQ_NO) : "commit point doesn't contains a maximum sequence number";
+ assert userData.containsKey(Engine.HISTORY_UUID_KEY) : "commit point doesn't contains a history uuid";
+ assert userData.get(Engine.HISTORY_UUID_KEY).equals(getHistoryUUID()) : "commit point history uuid ["
+ + userData.get(Engine.HISTORY_UUID_KEY) + "] is different than engine [" + getHistoryUUID() + "]";
+ return true;
}
private boolean assertMaxUnsafeAutoIdInCommit() throws IOException {
@@ -2185,8 +2194,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener,
indexCache.query(), cachingPolicy, forceNewHistoryUUID, translogConfig,
IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()),
- Arrays.asList(refreshListeners, new RefreshMetricUpdater(refreshMetric)), indexSort,
- this::runTranslogRecovery, circuitBreakerService);
+ Collections.singletonList(refreshListeners),
+ Collections.singletonList(new RefreshMetricUpdater(refreshMetric)),
+ indexSort, this::runTranslogRecovery, circuitBreakerService);
}
/**
diff --git a/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java
index 495f1dc4bdb..d527fa83501 100644
--- a/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java
+++ b/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java
@@ -217,7 +217,13 @@ public class TermVectorsService {
if (perFieldAnalyzer != null && perFieldAnalyzer.containsKey(field)) {
analyzer = mapperService.getIndexAnalyzers().get(perFieldAnalyzer.get(field).toString());
} else {
- analyzer = mapperService.fullName(field).indexAnalyzer();
+ MappedFieldType fieldType = mapperService.fullName(field);
+ if (fieldType instanceof KeywordFieldMapper.KeywordFieldType) {
+ KeywordFieldMapper.KeywordFieldType keywordFieldType = (KeywordFieldMapper.KeywordFieldType) fieldType;
+ analyzer = keywordFieldType.normalizer() == null ? keywordFieldType.indexAnalyzer() : keywordFieldType.normalizer();
+ } else {
+ analyzer = fieldType.indexAnalyzer();
+ }
}
if (analyzer == null) {
analyzer = mapperService.getIndexAnalyzers().getDefaultIndexAnalyzer();
diff --git a/core/src/main/java/org/elasticsearch/index/translog/CountedBitSet.java b/core/src/main/java/org/elasticsearch/index/translog/CountedBitSet.java
index 9fac230c9a8..ca1ae279a99 100644
--- a/core/src/main/java/org/elasticsearch/index/translog/CountedBitSet.java
+++ b/core/src/main/java/org/elasticsearch/index/translog/CountedBitSet.java
@@ -21,6 +21,7 @@ package org.elasticsearch.index.translog;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.RamUsageEstimator;
/**
* A {@link CountedBitSet} wraps a {@link FixedBitSet} but automatically releases the internal bitset
@@ -28,11 +29,14 @@ import org.apache.lucene.util.FixedBitSet;
* from translog as these numbers are likely to form contiguous ranges (eg. filling all bits).
*/
final class CountedBitSet extends BitSet {
+ static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(CountedBitSet.class);
private short onBits; // Number of bits are set.
private FixedBitSet bitset;
CountedBitSet(short numBits) {
- assert numBits > 0;
+ if (numBits <= 0) {
+ throw new IllegalArgumentException("Number of bits must be positive. Given [" + numBits + "]");
+ }
this.onBits = 0;
this.bitset = new FixedBitSet(numBits);
}
@@ -41,7 +45,6 @@ final class CountedBitSet extends BitSet {
public boolean get(int index) {
assert 0 <= index && index < this.length();
assert bitset == null || onBits < bitset.length() : "Bitset should be released when all bits are set";
-
return bitset == null ? true : bitset.get(index);
}
@@ -52,7 +55,7 @@ final class CountedBitSet extends BitSet {
// Ignore set when bitset is full.
if (bitset != null) {
- boolean wasOn = bitset.getAndSet(index);
+ final boolean wasOn = bitset.getAndSet(index);
if (wasOn == false) {
onBits++;
// Once all bits are set, we can simply just return YES for all indexes.
@@ -66,12 +69,12 @@ final class CountedBitSet extends BitSet {
@Override
public void clear(int startIndex, int endIndex) {
- throw new UnsupportedOperationException("Not implemented yet");
+ throw new UnsupportedOperationException();
}
@Override
public void clear(int index) {
- throw new UnsupportedOperationException("Not implemented yet");
+ throw new UnsupportedOperationException();
}
@Override
@@ -86,20 +89,19 @@ final class CountedBitSet extends BitSet {
@Override
public int prevSetBit(int index) {
- throw new UnsupportedOperationException("Not implemented yet");
+ throw new UnsupportedOperationException();
}
@Override
public int nextSetBit(int index) {
- throw new UnsupportedOperationException("Not implemented yet");
+ throw new UnsupportedOperationException();
}
@Override
public long ramBytesUsed() {
- throw new UnsupportedOperationException("Not implemented yet");
+ return BASE_RAM_BYTES_USED + (bitset == null ? 0 : bitset.ramBytesUsed());
}
- // Exposed for testing
boolean isInternalBitsetReleased() {
return bitset == null;
}
diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java
index e446ec7e6d3..f1e52409943 100644
--- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java
+++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java
@@ -24,7 +24,6 @@ import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition;
import org.elasticsearch.action.resync.TransportResyncReplicationAction;
-import org.elasticsearch.index.shard.PrimaryReplicaSyncer;
import org.elasticsearch.common.geo.ShapesAvailability;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
@@ -33,12 +32,12 @@ import org.elasticsearch.index.mapper.BooleanFieldMapper;
import org.elasticsearch.index.mapper.CompletionFieldMapper;
import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.FieldNamesFieldMapper;
+import org.elasticsearch.index.mapper.GeoPointFieldMapper;
import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.IndexFieldMapper;
import org.elasticsearch.index.mapper.IpFieldMapper;
import org.elasticsearch.index.mapper.KeywordFieldMapper;
-import org.elasticsearch.index.mapper.GeoPointFieldMapper;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.NumberFieldMapper;
@@ -52,6 +51,7 @@ import org.elasticsearch.index.mapper.TypeFieldMapper;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.mapper.VersionFieldMapper;
import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction;
+import org.elasticsearch.index.shard.PrimaryReplicaSyncer;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.indices.mapper.MapperRegistry;
@@ -64,6 +64,9 @@ import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.function.Predicate;
/**
* Configures classes and services that are shared by indices on each node.
@@ -73,7 +76,8 @@ public class IndicesModule extends AbstractModule {
private final MapperRegistry mapperRegistry;
public IndicesModule(List mapperPlugins) {
- this.mapperRegistry = new MapperRegistry(getMappers(mapperPlugins), getMetadataMappers(mapperPlugins));
+ this.mapperRegistry = new MapperRegistry(getMappers(mapperPlugins), getMetadataMappers(mapperPlugins),
+ getFieldFilter(mapperPlugins));
registerBuiltinWritables();
}
@@ -118,23 +122,42 @@ public class IndicesModule extends AbstractModule {
return Collections.unmodifiableMap(mappers);
}
- private Map getMetadataMappers(List mapperPlugins) {
+ private static final Map builtInMetadataMappers = initBuiltInMetadataMappers();
+
+ private static Map initBuiltInMetadataMappers() {
+ Map builtInMetadataMappers;
// Use a LinkedHashMap for metadataMappers because iteration order matters
+ builtInMetadataMappers = new LinkedHashMap<>();
+ // UID first so it will be the first stored field to load (so will benefit from "fields: []" early termination
+ builtInMetadataMappers.put(UidFieldMapper.NAME, new UidFieldMapper.TypeParser());
+ builtInMetadataMappers.put(IdFieldMapper.NAME, new IdFieldMapper.TypeParser());
+ builtInMetadataMappers.put(RoutingFieldMapper.NAME, new RoutingFieldMapper.TypeParser());
+ builtInMetadataMappers.put(IndexFieldMapper.NAME, new IndexFieldMapper.TypeParser());
+ builtInMetadataMappers.put(SourceFieldMapper.NAME, new SourceFieldMapper.TypeParser());
+ builtInMetadataMappers.put(TypeFieldMapper.NAME, new TypeFieldMapper.TypeParser());
+ builtInMetadataMappers.put(VersionFieldMapper.NAME, new VersionFieldMapper.TypeParser());
+ builtInMetadataMappers.put(ParentFieldMapper.NAME, new ParentFieldMapper.TypeParser());
+ builtInMetadataMappers.put(SeqNoFieldMapper.NAME, new SeqNoFieldMapper.TypeParser());
+ //_field_names must be added last so that it has a chance to see all the other mappers
+ builtInMetadataMappers.put(FieldNamesFieldMapper.NAME, new FieldNamesFieldMapper.TypeParser());
+ return Collections.unmodifiableMap(builtInMetadataMappers);
+ }
+
+ private static Map getMetadataMappers(List mapperPlugins) {
Map metadataMappers = new LinkedHashMap<>();
- // builtin metadata mappers
- // UID first so it will be the first stored field to load (so will benefit from "fields: []" early termination
-
- metadataMappers.put(UidFieldMapper.NAME, new UidFieldMapper.TypeParser());
- metadataMappers.put(IdFieldMapper.NAME, new IdFieldMapper.TypeParser());
- metadataMappers.put(RoutingFieldMapper.NAME, new RoutingFieldMapper.TypeParser());
- metadataMappers.put(IndexFieldMapper.NAME, new IndexFieldMapper.TypeParser());
- metadataMappers.put(SourceFieldMapper.NAME, new SourceFieldMapper.TypeParser());
- metadataMappers.put(TypeFieldMapper.NAME, new TypeFieldMapper.TypeParser());
- metadataMappers.put(VersionFieldMapper.NAME, new VersionFieldMapper.TypeParser());
- metadataMappers.put(ParentFieldMapper.NAME, new ParentFieldMapper.TypeParser());
- metadataMappers.put(SeqNoFieldMapper.NAME, new SeqNoFieldMapper.TypeParser());
- // _field_names is not registered here, see below
+ int i = 0;
+ Map.Entry fieldNamesEntry = null;
+ for (Map.Entry entry : builtInMetadataMappers.entrySet()) {
+ if (i < builtInMetadataMappers.size() - 1) {
+ metadataMappers.put(entry.getKey(), entry.getValue());
+ } else {
+ assert entry.getKey().equals(FieldNamesFieldMapper.NAME) : "_field_names must be the last registered mapper, order counts";
+ fieldNamesEntry = entry;
+ }
+ i++;
+ }
+ assert fieldNamesEntry != null;
for (MapperPlugin mapperPlugin : mapperPlugins) {
for (Map.Entry entry : mapperPlugin.getMetadataMappers().entrySet()) {
@@ -147,11 +170,49 @@ public class IndicesModule extends AbstractModule {
}
}
- // we register _field_names here so that it has a chance to see all other mappers, including from plugins
- metadataMappers.put(FieldNamesFieldMapper.NAME, new FieldNamesFieldMapper.TypeParser());
+ // we register _field_names here so that it has a chance to see all the other mappers, including from plugins
+ metadataMappers.put(fieldNamesEntry.getKey(), fieldNamesEntry.getValue());
return Collections.unmodifiableMap(metadataMappers);
}
+ /**
+ * Returns a set containing all of the builtin metadata fields
+ */
+ public static Set getBuiltInMetaDataFields() {
+ return builtInMetadataMappers.keySet();
+ }
+
+ private static Function> getFieldFilter(List mapperPlugins) {
+ Function> fieldFilter = MapperPlugin.NOOP_FIELD_FILTER;
+ for (MapperPlugin mapperPlugin : mapperPlugins) {
+ fieldFilter = and(fieldFilter, mapperPlugin.getFieldFilter());
+ }
+ return fieldFilter;
+ }
+
+ private static Function> and(Function> first,
+ Function> second) {
+ //the purpose of this method is to not chain no-op field predicates, so that we can easily find out when no plugins plug in
+ //a field filter, hence skip the mappings filtering part as a whole, as it requires parsing mappings into a map.
+ if (first == MapperPlugin.NOOP_FIELD_FILTER) {
+ return second;
+ }
+ if (second == MapperPlugin.NOOP_FIELD_FILTER) {
+ return first;
+ }
+ return index -> {
+ Predicate firstPredicate = first.apply(index);
+ Predicate secondPredicate = second.apply(index);
+ if (firstPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) {
+ return secondPredicate;
+ }
+ if (secondPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) {
+ return firstPredicate;
+ }
+ return firstPredicate.and(secondPredicate);
+ };
+ }
+
@Override
protected void configure() {
bind(IndicesStore.class).asEagerSingleton();
diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java
index e2c66260a39..e6f3007a799 100644
--- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java
+++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java
@@ -127,7 +127,9 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
+import java.util.function.Function;
import java.util.function.LongSupplier;
+import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
@@ -1262,4 +1264,22 @@ public class IndicesService extends AbstractLifecycleComponent
}
}
}
+
+ /**
+ * Returns a function which given an index name, returns a predicate which fields must match in order to be returned by get mappings,
+ * get index, get field mappings and field capabilities API. Useful to filter the fields that such API return.
+ * The predicate receives the the field name as input argument. In case multiple plugins register a field filter through
+ * {@link org.elasticsearch.plugins.MapperPlugin#getFieldFilter()}, only fields that match all the registered filters will be
+ * returned by get mappings, get index, get field mappings and field capabilities API.
+ */
+ public Function> getFieldFilter() {
+ return mapperRegistry.getFieldFilter();
+ }
+
+ /**
+ * Returns true if the provided field is a registered metadata field (including ones registered via plugins), false otherwise.
+ */
+ public boolean isMetaDataField(String field) {
+ return mapperRegistry.isMetaDataField(field);
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/indices/mapper/MapperRegistry.java b/core/src/main/java/org/elasticsearch/indices/mapper/MapperRegistry.java
index bcc4c09d3dd..41d563c2037 100644
--- a/core/src/main/java/org/elasticsearch/indices/mapper/MapperRegistry.java
+++ b/core/src/main/java/org/elasticsearch/indices/mapper/MapperRegistry.java
@@ -21,10 +21,13 @@ package org.elasticsearch.indices.mapper;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
+import org.elasticsearch.plugins.MapperPlugin;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;
+import java.util.function.Function;
+import java.util.function.Predicate;
/**
* A registry for all field mappers.
@@ -33,11 +36,14 @@ public final class MapperRegistry {
private final Map mapperParsers;
private final Map metadataMapperParsers;
+ private final Function> fieldFilter;
+
public MapperRegistry(Map mapperParsers,
- Map metadataMapperParsers) {
+ Map metadataMapperParsers, Function> fieldFilter) {
this.mapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(mapperParsers));
this.metadataMapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(metadataMapperParsers));
+ this.fieldFilter = fieldFilter;
}
/**
@@ -55,4 +61,22 @@ public final class MapperRegistry {
public Map getMetadataMapperParsers() {
return metadataMapperParsers;
}
+
+ /**
+ * Returns true if the provide field is a registered metadata field, false otherwise
+ */
+ public boolean isMetaDataField(String field) {
+ return getMetadataMapperParsers().containsKey(field);
+ }
+
+ /**
+ * Returns a function that given an index name, returns a predicate that fields must match in order to be returned by get mappings,
+ * get index, get field mappings and field capabilities API. Useful to filter the fields that such API return.
+ * The predicate receives the field name as input arguments. In case multiple plugins register a field filter through
+ * {@link MapperPlugin#getFieldFilter()}, only fields that match all the registered filters will be returned by get mappings,
+ * get index, get field mappings and field capabilities API.
+ */
+ public Function> getFieldFilter() {
+ return fieldFilter;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java
index 71ad21c14d7..a847088869b 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java
@@ -149,12 +149,13 @@ public class RecoverySourceHandler {
final Translog translog = shard.getTranslog();
final long startingSeqNo;
+ final long requiredSeqNoRangeStart;
final boolean isSequenceNumberBasedRecoveryPossible = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO &&
isTargetSameHistory() && isTranslogReadyForSequenceNumberBasedRecovery();
-
if (isSequenceNumberBasedRecoveryPossible) {
logger.trace("performing sequence numbers based recovery. starting at [{}]", request.startingSeqNo());
startingSeqNo = request.startingSeqNo();
+ requiredSeqNoRangeStart = startingSeqNo;
} else {
final Engine.IndexCommitRef phase1Snapshot;
try {
@@ -162,10 +163,12 @@ public class RecoverySourceHandler {
} catch (final Exception e) {
throw new RecoveryEngineException(shard.shardId(), 1, "snapshot failed", e);
}
- // we set this to unassigned to create a translog roughly according to the retention policy
- // on the target
- startingSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
-
+ // we set this to 0 to create a translog roughly according to the retention policy
+ // on the target. Note that it will still filter out legacy operations with no sequence numbers
+ startingSeqNo = 0;
+ // but we must have everything above the local checkpoint in the commit
+ requiredSeqNoRangeStart =
+ Long.parseLong(phase1Snapshot.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1;
try {
phase1(phase1Snapshot.getIndexCommit(), translog::totalOperations);
} catch (final Exception e) {
@@ -178,6 +181,9 @@ public class RecoverySourceHandler {
}
}
}
+ assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo;
+ assert requiredSeqNoRangeStart >= startingSeqNo : "requiredSeqNoRangeStart [" + requiredSeqNoRangeStart + "] is lower than ["
+ + startingSeqNo + "]";
runUnderPrimaryPermit(() -> shard.initiateTracking(request.targetAllocationId()));
@@ -187,10 +193,19 @@ public class RecoverySourceHandler {
throw new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e);
}
+ final long endingSeqNo = shard.seqNoStats().getMaxSeqNo();
+ /*
+ * We need to wait for all operations up to the current max to complete, otherwise we can not guarantee that all
+ * operations in the required range will be available for replaying from the translog of the source.
+ */
+ cancellableThreads.execute(() -> shard.waitForOpsToComplete(endingSeqNo));
+
+ logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo);
+
logger.trace("snapshot translog for recovery; current size is [{}]", translog.estimateTotalOperationsFromMinSeq(startingSeqNo));
final long targetLocalCheckpoint;
try(Translog.Snapshot snapshot = translog.newSnapshotFromMinSeqNo(startingSeqNo)) {
- targetLocalCheckpoint = phase2(startingSeqNo, snapshot);
+ targetLocalCheckpoint = phase2(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, snapshot);
} catch (Exception e) {
throw new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e);
}
@@ -224,7 +239,8 @@ public class RecoverySourceHandler {
/**
* Determines if the source translog is ready for a sequence-number-based peer recovery. The main condition here is that the source
- * translog contains all operations between the local checkpoint on the target and the current maximum sequence number on the source.
+ * translog contains all operations above the local checkpoint on the target. We already know the that translog contains or will contain
+ * all ops above the source local checkpoint, so we can stop check there.
*
* @return {@code true} if the source is ready for a sequence-number-based recovery
* @throws IOException if an I/O exception occurred reading the translog snapshot
@@ -232,18 +248,10 @@ public class RecoverySourceHandler {
boolean isTranslogReadyForSequenceNumberBasedRecovery() throws IOException {
final long startingSeqNo = request.startingSeqNo();
assert startingSeqNo >= 0;
- final long endingSeqNo = shard.seqNoStats().getMaxSeqNo();
- logger.trace("testing sequence numbers in range: [{}, {}]", startingSeqNo, endingSeqNo);
+ final long localCheckpoint = shard.getLocalCheckpoint();
+ logger.trace("testing sequence numbers in range: [{}, {}]", startingSeqNo, localCheckpoint);
// the start recovery request is initialized with the starting sequence number set to the target shard's local checkpoint plus one
- if (startingSeqNo - 1 <= endingSeqNo) {
- /*
- * We need to wait for all operations up to the current max to complete, otherwise we can not guarantee that all
- * operations in the required range will be available for replaying from the translog of the source.
- */
- cancellableThreads.execute(() -> shard.waitForOpsToComplete(endingSeqNo));
-
- logger.trace("all operations up to [{}] completed, checking translog content", endingSeqNo);
-
+ if (startingSeqNo - 1 <= localCheckpoint) {
final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1);
try (Translog.Snapshot snapshot = shard.getTranslog().newSnapshotFromMinSeqNo(startingSeqNo)) {
Translog.Operation operation;
@@ -253,7 +261,7 @@ public class RecoverySourceHandler {
}
}
}
- return tracker.getCheckpoint() >= endingSeqNo;
+ return tracker.getCheckpoint() >= localCheckpoint;
} else {
return false;
}
@@ -433,13 +441,15 @@ public class RecoverySourceHandler {
* point-in-time view of the translog). It then sends each translog operation to the target node so it can be replayed into the new
* shard.
*
- * @param startingSeqNo the sequence number to start recovery from, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} if all
- * ops should be sent
- * @param snapshot a snapshot of the translog
- *
+ * @param startingSeqNo the sequence number to start recovery from, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} if all
+ * ops should be sent
+ * @param requiredSeqNoRangeStart the lower sequence number of the required range (ending with endingSeqNo)
+ * @param endingSeqNo the highest sequence number that should be sent
+ * @param snapshot a snapshot of the translog
* @return the local checkpoint on the target
*/
- long phase2(final long startingSeqNo, final Translog.Snapshot snapshot) throws IOException {
+ long phase2(final long startingSeqNo, long requiredSeqNoRangeStart, long endingSeqNo, final Translog.Snapshot snapshot)
+ throws IOException {
if (shard.state() == IndexShardState.CLOSED) {
throw new IndexShardClosedException(request.shardId());
}
@@ -447,10 +457,11 @@ public class RecoverySourceHandler {
final StopWatch stopWatch = new StopWatch().start();
- logger.trace("recovery [phase2]: sending transaction log operations");
+ logger.trace("recovery [phase2]: sending transaction log operations (seq# from [" + startingSeqNo + "], " +
+ "required [" + requiredSeqNoRangeStart + ":" + endingSeqNo + "]");
// send all the snapshot's translog operations to the target
- final SendSnapshotResult result = sendSnapshot(startingSeqNo, snapshot);
+ final SendSnapshotResult result = sendSnapshot(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, snapshot);
stopWatch.stop();
logger.trace("recovery [phase2]: took [{}]", stopWatch.totalTime());
@@ -511,18 +522,26 @@ public class RecoverySourceHandler {
*
* Operations are bulked into a single request depending on an operation count limit or size-in-bytes limit.
*
- * @param startingSeqNo the sequence number for which only operations with a sequence number greater than this will be sent
- * @param snapshot the translog snapshot to replay operations from
- * @return the local checkpoint on the target and the total number of operations sent
+ * @param startingSeqNo the sequence number for which only operations with a sequence number greater than this will be sent
+ * @param requiredSeqNoRangeStart the lower sequence number of the required range
+ * @param endingSeqNo the upper bound of the sequence number range to be sent (inclusive)
+ * @param snapshot the translog snapshot to replay operations from @return the local checkpoint on the target and the
+ * total number of operations sent
* @throws IOException if an I/O exception occurred reading the translog snapshot
*/
- protected SendSnapshotResult sendSnapshot(final long startingSeqNo, final Translog.Snapshot snapshot) throws IOException {
+ protected SendSnapshotResult sendSnapshot(final long startingSeqNo, long requiredSeqNoRangeStart, long endingSeqNo,
+ final Translog.Snapshot snapshot) throws IOException {
+ assert requiredSeqNoRangeStart <= endingSeqNo + 1:
+ "requiredSeqNoRangeStart " + requiredSeqNoRangeStart + " is larger than endingSeqNo " + endingSeqNo;
+ assert startingSeqNo <= requiredSeqNoRangeStart :
+ "startingSeqNo " + startingSeqNo + " is larger than requiredSeqNoRangeStart " + requiredSeqNoRangeStart;
int ops = 0;
long size = 0;
int skippedOps = 0;
int totalSentOps = 0;
final AtomicLong targetLocalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO);
final List operations = new ArrayList<>();
+ final LocalCheckpointTracker requiredOpsTracker = new LocalCheckpointTracker(endingSeqNo, requiredSeqNoRangeStart - 1);
final int expectedTotalOps = snapshot.totalOperations();
if (expectedTotalOps == 0) {
@@ -539,12 +558,9 @@ public class RecoverySourceHandler {
throw new IndexShardClosedException(request.shardId());
}
cancellableThreads.checkForCancel();
- /*
- * If we are doing a sequence-number-based recovery, we have to skip older ops for which no sequence number was assigned, and
- * any ops before the starting sequence number.
- */
+
final long seqNo = operation.seqNo();
- if (startingSeqNo >= 0 && (seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || seqNo < startingSeqNo)) {
+ if (seqNo < startingSeqNo || seqNo > endingSeqNo) {
skippedOps++;
continue;
}
@@ -552,6 +568,7 @@ public class RecoverySourceHandler {
ops++;
size += operation.estimateSize();
totalSentOps++;
+ requiredOpsTracker.markSeqNoAsCompleted(seqNo);
// check if this request is past bytes threshold, and if so, send it off
if (size >= chunkSizeInBytes) {
@@ -569,8 +586,14 @@ public class RecoverySourceHandler {
}
assert expectedTotalOps == snapshot.overriddenOperations() + skippedOps + totalSentOps
- : String.format(Locale.ROOT, "expected total [%d], overridden [%d], skipped [%d], total sent [%d]",
- expectedTotalOps, snapshot.overriddenOperations(), skippedOps, totalSentOps);
+ : String.format(Locale.ROOT, "expected total [%d], overridden [%d], skipped [%d], total sent [%d]",
+ expectedTotalOps, snapshot.overriddenOperations(), skippedOps, totalSentOps);
+
+ if (requiredOpsTracker.getCheckpoint() < endingSeqNo) {
+ throw new IllegalStateException("translog replay failed to cover required sequence numbers" +
+ " (required range [" + requiredSeqNoRangeStart + ":" + endingSeqNo + "). first missing op is ["
+ + (requiredOpsTracker.getCheckpoint() + 1) + "]");
+ }
logger.trace("sent final batch of [{}][{}] (total: [{}]) translog operations", ops, new ByteSizeValue(size), expectedTotalOps);
diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java
index fee6d76ca3d..a4b7e5147d5 100644
--- a/core/src/main/java/org/elasticsearch/node/Node.java
+++ b/core/src/main/java/org/elasticsearch/node/Node.java
@@ -100,7 +100,6 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
-import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.indices.recovery.PeerRecoverySourceService;
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.recovery.RecoverySettings;
@@ -449,6 +448,11 @@ public class Node implements Closeable {
transportService, indicesService, pluginsService, circuitBreakerService, scriptModule.getScriptService(),
httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter(), responseCollectorService,
searchTransportService);
+
+ final SearchService searchService = newSearchService(clusterService, indicesService,
+ threadPool, scriptModule.getScriptService(), bigArrays, searchModule.getFetchPhase(),
+ responseCollectorService);
+
modules.add(b -> {
b.bind(Node.class).toInstance(this);
b.bind(NodeService.class).toInstance(nodeService);
@@ -470,12 +474,10 @@ public class Node implements Closeable {
b.bind(MetaDataUpgrader.class).toInstance(metaDataUpgrader);
b.bind(MetaStateService.class).toInstance(metaStateService);
b.bind(IndicesService.class).toInstance(indicesService);
- b.bind(SearchService.class).toInstance(newSearchService(clusterService, indicesService,
- threadPool, scriptModule.getScriptService(), bigArrays, searchModule.getFetchPhase(),
- responseCollectorService));
+ b.bind(SearchService.class).toInstance(searchService);
b.bind(SearchTransportService.class).toInstance(searchTransportService);
- b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(settings, bigArrays,
- scriptModule.getScriptService()));
+ b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(settings,
+ searchService::createReduceContext));
b.bind(Transport.class).toInstance(transport);
b.bind(TransportService.class).toInstance(transportService);
b.bind(NetworkService.class).toInstance(networkService);
diff --git a/core/src/main/java/org/elasticsearch/plugins/MapperPlugin.java b/core/src/main/java/org/elasticsearch/plugins/MapperPlugin.java
index 5dfcdc6bda4..5edf994b32e 100644
--- a/core/src/main/java/org/elasticsearch/plugins/MapperPlugin.java
+++ b/core/src/main/java/org/elasticsearch/plugins/MapperPlugin.java
@@ -19,12 +19,14 @@
package org.elasticsearch.plugins;
-import java.util.Collections;
-import java.util.Map;
-
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
+import java.util.Collections;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.function.Predicate;
+
/**
* An extension point for {@link Plugin} implementations to add custom mappers
*/
@@ -32,7 +34,7 @@ public interface MapperPlugin {
/**
* Returns additional mapper implementations added by this plugin.
- *
+ *
* The key of the returned {@link Map} is the unique name for the mapper which will be used
* as the mapping {@code type}, and the value is a {@link Mapper.TypeParser} to parse the
* mapper settings into a {@link Mapper}.
@@ -43,7 +45,7 @@ public interface MapperPlugin {
/**
* Returns additional metadata mapper implementations added by this plugin.
- *
+ *