Merge branch 'master' into rankeval

This commit is contained in:
Christoph Büscher 2017-12-07 14:22:46 +01:00
commit 52cb6c8ef2
225 changed files with 6816 additions and 1470 deletions

View File

@ -111,19 +111,18 @@ then `File->New Project From Existing Sources`. Point to the root of
the source directory, select
`Import project from external model->Gradle`, enable
`Use auto-import`. In order to run tests directly from
IDEA 2017.2 and above it is required to disable IDEA run launcher to avoid
finding yourself in "jar hell", which can be achieved by adding the
IDEA 2017.2 and above, it is required to disable the IDEA run launcher in order to avoid
`idea_rt.jar` causing "jar hell". This can be achieved by adding the
`-Didea.no.launcher=true` [JVM
option](https://intellij-support.jetbrains.com/hc/en-us/articles/206544869-Configuring-JVM-options-and-platform-properties)
or by adding `idea.no.launcher=true` to the
option](https://intellij-support.jetbrains.com/hc/en-us/articles/206544869-Configuring-JVM-options-and-platform-properties).
Alternatively, `idea.no.launcher=true` can be set in the
[`idea.properties`](https://www.jetbrains.com/help/idea/file-idea-properties.html)
file which can be accessed under Help > Edit Custom Properties within IDEA. You
may also need to [remove `ant-javafx.jar` from your
file which can be accessed under Help > Edit Custom Properties (this will require a
restart of IDEA). For IDEA 2017.3 and above, in addition to the JVM option, you will need to go to
`Run->Edit Configurations...` and change the value for the `Shorten command line` setting from
`user-local default: none` to `classpath file`. You may also need to [remove `ant-javafx.jar` from your
classpath](https://github.com/elastic/elasticsearch/issues/14348) if that is
reported as a source of jar hell. Additionally, in order to run tests directly
from IDEA 2017.3 and above, go to `Run->Edit Configurations...` and change the
value for the `Shorten command line` setting from `user-local default: none` to
`classpath file`.
reported as a source of jar hell.
The Elasticsearch codebase makes heavy use of Java `assert`s and the
test runner requires that assertions be enabled within the JVM. This

View File

@ -21,6 +21,8 @@ package org.elasticsearch.client;
import org.apache.http.Header;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
@ -29,13 +31,13 @@ import java.util.Collections;
/**
* A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Indices API.
*
* <p>
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices.html">Indices API on elastic.co</a>
*/
public final class IndicesClient {
private final RestHighLevelClient restHighLevelClient;
public IndicesClient(RestHighLevelClient restHighLevelClient) {
IndicesClient(RestHighLevelClient restHighLevelClient) {
this.restHighLevelClient = restHighLevelClient;
}
@ -56,8 +58,32 @@ public final class IndicesClient {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html">
* Delete Index API on elastic.co</a>
*/
public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListener<DeleteIndexResponse> listener, Header... headers) {
public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListener<DeleteIndexResponse> listener,
Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent,
listener, Collections.emptySet(), headers);
}
/**
* Creates an index using the Create Index API
* <p>
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html">
* Create Index API on elastic.co</a>
*/
public CreateIndexResponse createIndex(CreateIndexRequest createIndexRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent,
Collections.emptySet(), headers);
}
/**
* Asynchronously creates an index using the Create Index API
* <p>
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html">
* Create Index API on elastic.co</a>
*/
public void createIndexAsync(CreateIndexRequest createIndexRequest, ActionListener<CreateIndexResponse> listener,
Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent,
listener, Collections.emptySet(), headers);
}
}

View File

@ -29,12 +29,14 @@ import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.action.support.ActiveShardCount;
@ -49,6 +51,7 @@ import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
@ -135,6 +138,19 @@ public final class Request {
return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null);
}
static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException {
String endpoint = endpoint(createIndexRequest.indices(), Strings.EMPTY_ARRAY, "");
Params parameters = Params.builder();
parameters.withTimeout(createIndexRequest.timeout());
parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout());
parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards());
parameters.withUpdateAllTypes(createIndexRequest.updateAllTypes());
HttpEntity entity = createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE);
return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity);
}
static Request info() {
return new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
}
@ -381,6 +397,18 @@ public final class Request {
return new Request("DELETE", "/_search/scroll", Collections.emptyMap(), entity);
}
static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException {
Params params = Params.builder();
params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true");
if (multiSearchRequest.maxConcurrentSearchRequests() != MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) {
params.putParam("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests()));
}
XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent();
byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent);
HttpEntity entity = new ByteArrayEntity(source, createContentType(xContent.type()));
return new Request("GET", "/_msearch", params.getParams(), entity);
}
private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef();
return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType));
@ -520,6 +548,13 @@ public final class Request {
return putParam("timeout", timeout);
}
Params withUpdateAllTypes(boolean updateAllTypes) {
if (updateAllTypes) {
return putParam("update_all_types", Boolean.TRUE.toString());
}
return this;
}
Params withVersion(long version) {
if (version != Versions.MATCH_ANY) {
return putParam("version", Long.toString(version));

View File

@ -38,6 +38,8 @@ import org.elasticsearch.action.main.MainRequest;
import org.elasticsearch.action.main.MainResponse;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.ClearScrollResponse;
import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.MultiSearchResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchScrollRequest;
@ -377,6 +379,28 @@ public class RestHighLevelClient implements Closeable {
performRequestAsyncAndParseEntity(searchRequest, Request::search, SearchResponse::fromXContent, listener, emptySet(), headers);
}
/**
* Executes a multi search using the msearch API
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html">Multi search API on
* elastic.co</a>
*/
public final MultiSearchResponse multiSearch(MultiSearchRequest multiSearchRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(multiSearchRequest, Request::multiSearch, MultiSearchResponse::fromXContext,
emptySet(), headers);
}
/**
* Asynchronously executes a multi search using the msearch API
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html">Multi search API on
* elastic.co</a>
*/
public final void multiSearchAsync(MultiSearchRequest searchRequest, ActionListener<MultiSearchResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(searchRequest, Request::multiSearch, MultiSearchResponse::fromXContext, listener,
emptySet(), headers);
}
/**
* Executes a search using the Search Scroll API
*

View File

@ -20,14 +20,88 @@
package org.elasticsearch.client;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.util.Map;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
public class IndicesClientIT extends ESRestHighLevelClientTestCase {
@SuppressWarnings("unchecked")
public void testCreateIndex() throws IOException {
{
// Create index
String indexName = "plain_index";
assertFalse(indexExists(indexName));
CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
CreateIndexResponse createIndexResponse =
execute(createIndexRequest, highLevelClient().indices()::createIndex, highLevelClient().indices()::createIndexAsync);
assertTrue(createIndexResponse.isAcknowledged());
assertTrue(indexExists(indexName));
}
{
// Create index with mappings, aliases and settings
String indexName = "rich_index";
assertFalse(indexExists(indexName));
CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
Alias alias = new Alias("alias_name");
alias.filter("{\"term\":{\"year\":2016}}");
alias.routing("1");
createIndexRequest.alias(alias);
Settings.Builder settings = Settings.builder();
settings.put(SETTING_NUMBER_OF_REPLICAS, 2);
createIndexRequest.settings(settings);
XContentBuilder mappingBuilder = JsonXContent.contentBuilder();
mappingBuilder.startObject().startObject("properties").startObject("field");
mappingBuilder.field("type", "text");
mappingBuilder.endObject().endObject().endObject();
createIndexRequest.mapping("type_name", mappingBuilder);
CreateIndexResponse createIndexResponse =
execute(createIndexRequest, highLevelClient().indices()::createIndex, highLevelClient().indices()::createIndexAsync);
assertTrue(createIndexResponse.isAcknowledged());
Map<String, Object> indexMetaData = getIndexMetadata(indexName);
Map<String, Object> settingsData = (Map) indexMetaData.get("settings");
Map<String, Object> indexSettings = (Map) settingsData.get("index");
assertEquals("2", indexSettings.get("number_of_replicas"));
Map<String, Object> aliasesData = (Map) indexMetaData.get("aliases");
Map<String, Object> aliasData = (Map) aliasesData.get("alias_name");
assertEquals("1", aliasData.get("index_routing"));
Map<String, Object> filter = (Map) aliasData.get("filter");
Map<String, Object> term = (Map) filter.get("term");
assertEquals(2016, term.get("year"));
Map<String, Object> mappingsData = (Map) indexMetaData.get("mappings");
Map<String, Object> typeData = (Map) mappingsData.get("type_name");
Map<String, Object> properties = (Map) typeData.get("properties");
Map<String, Object> field = (Map) properties.get("field");
assertEquals("text", field.get("type"));
}
}
public void testDeleteIndex() throws IOException {
{
// Delete index if exists
@ -65,4 +139,18 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
return response.getStatusLine().getStatusCode() == 200;
}
@SuppressWarnings("unchecked")
private Map<String, Object> getIndexMetadata(String index) throws IOException {
Response response = client().performRequest("GET", index);
XContentType entityContentType = XContentType.fromMediaTypeOrFormat(response.getEntity().getContentType().getValue());
Map<String, Object> responseEntity = XContentHelper.convertToMap(entityContentType.xContent(), response.getEntity().getContent(),
false);
Map<String, Object> indexMetaData = (Map) responseEntity.get(index);
assertNotNull(indexMetaData);
return indexMetaData;
}
}

View File

@ -25,6 +25,7 @@ import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.util.EntityUtils;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkShardRequest;
@ -32,9 +33,11 @@ import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
@ -42,6 +45,7 @@ import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.common.CheckedBiConsumer;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
@ -56,6 +60,7 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.builder.SearchSourceBuilder;
@ -72,16 +77,21 @@ import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Constructor;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.StringJoiner;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.client.Request.REQUEST_BODY_CONTENT_TYPE;
import static org.elasticsearch.client.Request.enforceSameContentType;
import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
public class RequestTests extends ESTestCase {
@ -245,6 +255,34 @@ public class RequestTests extends ESTestCase {
assertEquals(method, request.getMethod());
}
public void testCreateIndex() throws IOException {
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
String indexName = "index-" + randomAlphaOfLengthBetween(2, 5);
createIndexRequest.index(indexName);
Map<String, String> expectedParams = new HashMap<>();
setRandomTimeout(createIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
setRandomMasterTimeout(createIndexRequest, expectedParams);
setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams);
if (randomBoolean()) {
boolean updateAllTypes = randomBoolean();
createIndexRequest.updateAllTypes(updateAllTypes);
if (updateAllTypes) {
expectedParams.put("update_all_types", Boolean.TRUE.toString());
}
}
Request request = Request.createIndex(createIndexRequest);
assertEquals("/" + indexName, request.getEndpoint());
assertEquals(expectedParams, request.getParameters());
assertEquals("PUT", request.getMethod());
assertToXContentBody(createIndexRequest, request.getEntity());
}
public void testDeleteIndex() throws IOException {
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest();
@ -399,11 +437,7 @@ public class RequestTests extends ESTestCase {
expectedParams.put("refresh", refreshPolicy.getValue());
}
}
if (randomBoolean()) {
int waitForActiveShards = randomIntBetween(0, 10);
updateRequest.waitForActiveShards(waitForActiveShards);
expectedParams.put("wait_for_active_shards", String.valueOf(waitForActiveShards));
}
setRandomWaitForActiveShards(updateRequest::waitForActiveShards, expectedParams);
if (randomBoolean()) {
long version = randomLong();
updateRequest.version(version);
@ -771,6 +805,55 @@ public class RequestTests extends ESTestCase {
}
}
public void testMultiSearch() throws IOException {
int numberOfSearchRequests = randomIntBetween(0, 32);
MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
for (int i = 0; i < numberOfSearchRequests; i++) {
SearchRequest searchRequest = randomSearchRequest(() -> {
// No need to return a very complex SearchSourceBuilder here, that is tested elsewhere
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.from(randomInt(10));
searchSourceBuilder.size(randomIntBetween(20, 100));
return searchSourceBuilder;
});
// scroll is not supported in the current msearch api, so unset it:
searchRequest.scroll((Scroll) null);
// only expand_wildcards, ignore_unavailable and allow_no_indices can be specified from msearch api, so unset other options:
IndicesOptions randomlyGenerated = searchRequest.indicesOptions();
IndicesOptions msearchDefault = new MultiSearchRequest().indicesOptions();
searchRequest.indicesOptions(IndicesOptions.fromOptions(
randomlyGenerated.ignoreUnavailable(), randomlyGenerated.allowNoIndices(), randomlyGenerated.expandWildcardsOpen(),
randomlyGenerated.expandWildcardsClosed(), msearchDefault.allowAliasesToMultipleIndices(),
msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases()
));
multiSearchRequest.add(searchRequest);
}
Map<String, String> expectedParams = new HashMap<>();
expectedParams.put(RestSearchAction.TYPED_KEYS_PARAM, "true");
if (randomBoolean()) {
multiSearchRequest.maxConcurrentSearchRequests(randomIntBetween(1, 8));
expectedParams.put("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests()));
}
Request request = Request.multiSearch(multiSearchRequest);
assertEquals("/_msearch", request.getEndpoint());
assertEquals(expectedParams, request.getParameters());
List<SearchRequest> requests = new ArrayList<>();
CheckedBiConsumer<SearchRequest, XContentParser, IOException> consumer = (searchRequest, p) -> {
SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(p);
if (searchSourceBuilder.equals(new SearchSourceBuilder()) == false) {
searchRequest.source(searchSourceBuilder);
}
requests.add(searchRequest);
};
MultiSearchRequest.readMultiLineFormat(new BytesArray(EntityUtils.toByteArray(request.getEntity())),
REQUEST_BODY_CONTENT_TYPE.xContent(), consumer, null, multiSearchRequest.indicesOptions(), null, null,
null, xContentRegistry(), true);
assertEquals(requests, multiSearchRequest.requests());
}
public void testSearchScroll() throws IOException {
SearchScrollRequest searchScrollRequest = new SearchScrollRequest();
searchScrollRequest.scrollId(randomAlphaOfLengthBetween(5, 10));
@ -782,7 +865,7 @@ public class RequestTests extends ESTestCase {
assertEquals("/_search/scroll", request.getEndpoint());
assertEquals(0, request.getParameters().size());
assertToXContentBody(searchScrollRequest, request.getEntity());
assertEquals(Request.REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
}
public void testClearScroll() throws IOException {
@ -796,11 +879,11 @@ public class RequestTests extends ESTestCase {
assertEquals("/_search/scroll", request.getEndpoint());
assertEquals(0, request.getParameters().size());
assertToXContentBody(clearScrollRequest, request.getEntity());
assertEquals(Request.REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
}
private static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException {
BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, Request.REQUEST_BODY_CONTENT_TYPE, false);
BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false);
assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue());
assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity)));
}
@ -959,6 +1042,14 @@ public class RequestTests extends ESTestCase {
}
}
private static void setRandomWaitForActiveShards(Consumer<Integer> setter, Map<String, String> expectedParams) {
if (randomBoolean()) {
int waitForActiveShards = randomIntBetween(0, 10);
setter.accept(waitForActiveShards);
expectedParams.put("wait_for_active_shards", String.valueOf(waitForActiveShards));
}
}
private static void setRandomRefreshPolicy(ReplicatedWriteRequest<?> request, Map<String, String> expectedParams) {
if (randomBoolean()) {
WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());

View File

@ -23,20 +23,30 @@ import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.nio.entity.NStringEntity;
import org.apache.lucene.search.join.ScoreMode;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.ClearScrollResponse;
import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.MultiSearchResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.MatchQueryBuilder;
import org.elasticsearch.index.query.NestedQueryBuilder;
import org.elasticsearch.index.query.ScriptQueryBuilder;
import org.elasticsearch.index.query.TermsQueryBuilder;
import org.elasticsearch.join.aggregations.Children;
import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.bucket.range.Range;
import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
@ -45,10 +55,12 @@ import org.elasticsearch.search.aggregations.matrix.stats.MatrixStats;
import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.search.suggest.Suggest;
import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder;
import org.hamcrest.Matchers;
import org.junit.Before;
import java.io.IOException;
@ -64,6 +76,7 @@ import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.Matchers.nullValue;
public class SearchIT extends ESRestHighLevelClientTestCase {
@ -80,10 +93,24 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
StringEntity doc5 = new StringEntity("{\"type\":\"type2\", \"num\":100, \"num2\":10}", ContentType.APPLICATION_JSON);
client().performRequest("PUT", "/index/type/5", Collections.emptyMap(), doc5);
client().performRequest("POST", "/index/_refresh");
StringEntity doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
client().performRequest("PUT", "/index1/doc/1", Collections.emptyMap(), doc);
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
client().performRequest("PUT", "/index1/doc/2", Collections.emptyMap(), doc);
doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
client().performRequest("PUT", "/index2/doc/3", Collections.emptyMap(), doc);
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
client().performRequest("PUT", "/index2/doc/4", Collections.emptyMap(), doc);
doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
client().performRequest("PUT", "/index3/doc/5", Collections.emptyMap(), doc);
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
client().performRequest("PUT", "/index3/doc/6", Collections.emptyMap(), doc);
client().performRequest("POST", "/index1,index2,index3/_refresh");
}
public void testSearchNoQuery() throws IOException {
SearchRequest searchRequest = new SearchRequest();
SearchRequest searchRequest = new SearchRequest("index");
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
assertSearchHeader(searchResponse);
assertNull(searchResponse.getAggregations());
@ -106,7 +133,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
}
public void testSearchMatchQuery() throws IOException {
SearchRequest searchRequest = new SearchRequest();
SearchRequest searchRequest = new SearchRequest("index");
searchRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("num", 10)));
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
assertSearchHeader(searchResponse);
@ -164,7 +191,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
assertEquals(RestStatus.BAD_REQUEST, exception.status());
}
SearchRequest searchRequest = new SearchRequest();
SearchRequest searchRequest = new SearchRequest("index");
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.aggregation(new RangeAggregationBuilder("agg1").field("num")
.addRange("first", 0, 30).addRange("second", 31, 200));
@ -193,7 +220,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
}
public void testSearchWithTermsAndRangeAgg() throws IOException {
SearchRequest searchRequest = new SearchRequest();
SearchRequest searchRequest = new SearchRequest("index");
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
TermsAggregationBuilder agg = new TermsAggregationBuilder("agg1", ValueType.STRING).field("type.keyword");
agg.subAggregation(new RangeAggregationBuilder("subagg").field("num")
@ -247,7 +274,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
}
public void testSearchWithMatrixStats() throws IOException {
SearchRequest searchRequest = new SearchRequest();
SearchRequest searchRequest = new SearchRequest("index");
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.aggregation(new MatrixStatsAggregationBuilder("agg1").fields(Arrays.asList("num", "num2")));
searchSourceBuilder.size(0);
@ -374,7 +401,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
}
public void testSearchWithSuggest() throws IOException {
SearchRequest searchRequest = new SearchRequest();
SearchRequest searchRequest = new SearchRequest("index");
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion("sugg1", new PhraseSuggestionBuilder("type"))
.setGlobalText("type"));
@ -464,6 +491,185 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
}
}
public void testMultiSearch() throws Exception {
MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
SearchRequest searchRequest1 = new SearchRequest("index1");
searchRequest1.source().sort("_id", SortOrder.ASC);
multiSearchRequest.add(searchRequest1);
SearchRequest searchRequest2 = new SearchRequest("index2");
searchRequest2.source().sort("_id", SortOrder.ASC);
multiSearchRequest.add(searchRequest2);
SearchRequest searchRequest3 = new SearchRequest("index3");
searchRequest3.source().sort("_id", SortOrder.ASC);
multiSearchRequest.add(searchRequest3);
MultiSearchResponse multiSearchResponse =
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue());
assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(false));
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("1"));
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getAt(1).getId(), Matchers.equalTo("2"));
assertThat(multiSearchResponse.getResponses()[1].getFailure(), Matchers.nullValue());
assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(false));
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[1].getResponse());
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("3"));
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(1).getId(), Matchers.equalTo("4"));
assertThat(multiSearchResponse.getResponses()[2].getFailure(), Matchers.nullValue());
assertThat(multiSearchResponse.getResponses()[2].isFailure(), Matchers.is(false));
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[2].getResponse());
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("5"));
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(1).getId(), Matchers.equalTo("6"));
}
public void testMultiSearch_withAgg() throws Exception {
MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
SearchRequest searchRequest1 = new SearchRequest("index1");
searchRequest1.source().size(0).aggregation(new TermsAggregationBuilder("name", ValueType.STRING).field("field.keyword")
.order(BucketOrder.key(true)));
multiSearchRequest.add(searchRequest1);
SearchRequest searchRequest2 = new SearchRequest("index2");
searchRequest2.source().size(0).aggregation(new TermsAggregationBuilder("name", ValueType.STRING).field("field.keyword")
.order(BucketOrder.key(true)));
multiSearchRequest.add(searchRequest2);
SearchRequest searchRequest3 = new SearchRequest("index3");
searchRequest3.source().size(0).aggregation(new TermsAggregationBuilder("name", ValueType.STRING).field("field.keyword")
.order(BucketOrder.key(true)));
multiSearchRequest.add(searchRequest3);
MultiSearchResponse multiSearchResponse =
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue());
assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(false));
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getHits().length, Matchers.equalTo(0));
Terms terms = multiSearchResponse.getResponses()[0].getResponse().getAggregations().get("name");
assertThat(terms.getBuckets().size(), Matchers.equalTo(2));
assertThat(terms.getBuckets().get(0).getKeyAsString(), Matchers.equalTo("value1"));
assertThat(terms.getBuckets().get(1).getKeyAsString(), Matchers.equalTo("value2"));
assertThat(multiSearchResponse.getResponses()[1].getFailure(), Matchers.nullValue());
assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(false));
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getHits().length, Matchers.equalTo(0));
terms = multiSearchResponse.getResponses()[1].getResponse().getAggregations().get("name");
assertThat(terms.getBuckets().size(), Matchers.equalTo(2));
assertThat(terms.getBuckets().get(0).getKeyAsString(), Matchers.equalTo("value1"));
assertThat(terms.getBuckets().get(1).getKeyAsString(), Matchers.equalTo("value2"));
assertThat(multiSearchResponse.getResponses()[2].getFailure(), Matchers.nullValue());
assertThat(multiSearchResponse.getResponses()[2].isFailure(), Matchers.is(false));
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getTotalHits(), Matchers.equalTo(2L));
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getHits().length, Matchers.equalTo(0));
terms = multiSearchResponse.getResponses()[2].getResponse().getAggregations().get("name");
assertThat(terms.getBuckets().size(), Matchers.equalTo(2));
assertThat(terms.getBuckets().get(0).getKeyAsString(), Matchers.equalTo("value1"));
assertThat(terms.getBuckets().get(1).getKeyAsString(), Matchers.equalTo("value2"));
}
public void testMultiSearch_withQuery() throws Exception {
MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
SearchRequest searchRequest1 = new SearchRequest("index1");
searchRequest1.source().query(new TermsQueryBuilder("field", "value2"));
multiSearchRequest.add(searchRequest1);
SearchRequest searchRequest2 = new SearchRequest("index2");
searchRequest2.source().query(new TermsQueryBuilder("field", "value2"));
multiSearchRequest.add(searchRequest2);
SearchRequest searchRequest3 = new SearchRequest("index3");
searchRequest3.source().query(new TermsQueryBuilder("field", "value2"));
multiSearchRequest.add(searchRequest3);
MultiSearchResponse multiSearchResponse =
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue());
assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(false));
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("2"));
assertThat(multiSearchResponse.getResponses()[1].getFailure(), Matchers.nullValue());
assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(false));
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[1].getResponse());
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("4"));
assertThat(multiSearchResponse.getResponses()[2].getFailure(), Matchers.nullValue());
assertThat(multiSearchResponse.getResponses()[2].isFailure(), Matchers.is(false));
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[2].getResponse());
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("6"));
searchRequest1.source().highlighter(new HighlightBuilder().field("field"));
searchRequest2.source().highlighter(new HighlightBuilder().field("field"));
searchRequest3.source().highlighter(new HighlightBuilder().field("field"));
multiSearchResponse = execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
assertThat(multiSearchResponse.getResponses()[0].getFailure(), Matchers.nullValue());
assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(false));
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[0].getResponse());
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getAt(0).getHighlightFields()
.get("field").fragments()[0].string(), Matchers.equalTo("<em>value2</em>"));
assertThat(multiSearchResponse.getResponses()[1].getFailure(), Matchers.nullValue());
assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(false));
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[1].getResponse());
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("4"));
assertThat(multiSearchResponse.getResponses()[1].getResponse().getHits().getAt(0).getHighlightFields()
.get("field").fragments()[0].string(), Matchers.equalTo("<em>value2</em>"));
assertThat(multiSearchResponse.getResponses()[2].getFailure(), Matchers.nullValue());
assertThat(multiSearchResponse.getResponses()[2].isFailure(), Matchers.is(false));
SearchIT.assertSearchHeader(multiSearchResponse.getResponses()[2].getResponse());
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getTotalHits(), Matchers.equalTo(1L));
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(0).getId(), Matchers.equalTo("6"));
assertThat(multiSearchResponse.getResponses()[2].getResponse().getHits().getAt(0).getHighlightFields()
.get("field").fragments()[0].string(), Matchers.equalTo("<em>value2</em>"));
}
public void testMultiSearch_failure() throws Exception {
MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
SearchRequest searchRequest1 = new SearchRequest("index1");
searchRequest1.source().query(new ScriptQueryBuilder(new Script(ScriptType.INLINE, "invalid", "code", Collections.emptyMap())));
multiSearchRequest.add(searchRequest1);
SearchRequest searchRequest2 = new SearchRequest("index2");
searchRequest2.source().query(new ScriptQueryBuilder(new Script(ScriptType.INLINE, "invalid", "code", Collections.emptyMap())));
multiSearchRequest.add(searchRequest2);
MultiSearchResponse multiSearchResponse =
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(2));
assertThat(multiSearchResponse.getResponses()[0].isFailure(), Matchers.is(true));
assertThat(multiSearchResponse.getResponses()[0].getFailure().getMessage(), containsString("search_phase_execution_exception"));
assertThat(multiSearchResponse.getResponses()[0].getResponse(), nullValue());
assertThat(multiSearchResponse.getResponses()[1].isFailure(), Matchers.is(true));
assertThat(multiSearchResponse.getResponses()[1].getFailure().getMessage(), containsString("search_phase_execution_exception"));
assertThat(multiSearchResponse.getResponses()[1].getResponse(), nullValue());
}
private static void assertSearchHeader(SearchResponse searchResponse) {
assertThat(searchResponse.getTook().nanos(), greaterThanOrEqualTo(0L));
assertEquals(0, searchResponse.getFailedShards());

View File

@ -21,13 +21,18 @@ package org.elasticsearch.client.documentation;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
@ -52,8 +57,8 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
RestHighLevelClient client = highLevelClient();
{
Response createIndexResponse = client().performRequest("PUT", "/posts");
assertEquals(200, createIndexResponse.getStatusLine().getStatusCode());
CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("posts"));
assertTrue(createIndexResponse.isAcknowledged());
}
{
@ -61,14 +66,26 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
DeleteIndexRequest request = new DeleteIndexRequest("posts"); // <1>
// end::delete-index-request
// tag::delete-index-request-timeout
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
request.timeout("2m"); // <2>
// end::delete-index-request-timeout
// tag::delete-index-request-masterTimeout
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
request.masterNodeTimeout("1m"); // <2>
// end::delete-index-request-masterTimeout
// tag::delete-index-request-indicesOptions
request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
// end::delete-index-request-indicesOptions
// tag::delete-index-execute
DeleteIndexResponse deleteIndexResponse = client.indices().deleteIndex(request);
// end::delete-index-execute
assertTrue(deleteIndexResponse.isAcknowledged());
// tag::delete-index-response
boolean acknowledged = deleteIndexResponse.isAcknowledged(); // <1>
// end::delete-index-response
assertTrue(acknowledged);
// tag::delete-index-execute-async
client.indices().deleteIndexAsync(request, new ActionListener<DeleteIndexResponse>() {
@ -85,26 +102,11 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
// end::delete-index-execute-async
}
{
DeleteIndexRequest request = new DeleteIndexRequest("posts");
// tag::delete-index-request-timeout
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
request.timeout("2m"); // <2>
// end::delete-index-request-timeout
// tag::delete-index-request-masterTimeout
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
request.timeout("1m"); // <2>
// end::delete-index-request-masterTimeout
// tag::delete-index-request-indicesOptions
request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
// end::delete-index-request-indicesOptions
}
{
// tag::delete-index-notfound
try {
DeleteIndexRequest request = new DeleteIndexRequest("does_not_exist");
DeleteIndexResponse deleteIndexResponse = client.indices().deleteIndex(request);
client.indices().deleteIndex(request);
} catch (ElasticsearchException exception) {
if (exception.status() == RestStatus.NOT_FOUND) {
// <1>
@ -113,4 +115,79 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
// end::delete-index-notfound
}
}
public void testCreateIndex() throws IOException {
RestHighLevelClient client = highLevelClient();
{
// tag::create-index-request
CreateIndexRequest request = new CreateIndexRequest("twitter"); // <1>
// end::create-index-request
// tag::create-index-request-settings
request.settings(Settings.builder() // <1>
.put("index.number_of_shards", 3)
.put("index.number_of_replicas", 2)
);
// end::create-index-request-settings
// tag::create-index-request-mappings
request.mapping("tweet", // <1>
" {\n" +
" \"tweet\": {\n" +
" \"properties\": {\n" +
" \"message\": {\n" +
" \"type\": \"text\"\n" +
" }\n" +
" }\n" +
" }\n" +
" }", // <2>
XContentType.JSON);
// end::create-index-request-mappings
// tag::create-index-request-aliases
request.alias(
new Alias("twitter_alias") // <1>
);
// end::create-index-request-aliases
// tag::create-index-request-timeout
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
request.timeout("2m"); // <2>
// end::create-index-request-timeout
// tag::create-index-request-masterTimeout
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
request.masterNodeTimeout("1m"); // <2>
// end::create-index-request-masterTimeout
// tag::create-index-request-waitForActiveShards
request.waitForActiveShards(2); // <1>
request.waitForActiveShards(ActiveShardCount.DEFAULT); // <2>
// end::create-index-request-waitForActiveShards
// tag::create-index-execute
CreateIndexResponse createIndexResponse = client.indices().createIndex(request);
// end::create-index-execute
// tag::create-index-response
boolean acknowledged = createIndexResponse.isAcknowledged(); // <1>
boolean shardsAcked = createIndexResponse.isShardsAcked(); // <2>
// end::create-index-response
assertTrue(acknowledged);
assertTrue(shardsAcked);
// tag::create-index-execute-async
client.indices().createIndexAsync(request, new ActionListener<CreateIndexResponse>() {
@Override
public void onResponse(CreateIndexResponse createIndexResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
});
// end::create-index-execute-async
}
}
}

View File

@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.aggregations.MultiBucketConsumerService;
import org.elasticsearch.transport.TcpTransport;
import java.io.IOException;
@ -986,7 +987,10 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class,
org.elasticsearch.env.ShardLockObtainFailedException::new, 147, Version.V_5_0_2),
UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException.class,
org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException::new, 148, Version.V_5_2_0);
org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException::new, 148, Version.V_5_2_0),
TOO_MANY_BUCKETS_EXCEPTION(MultiBucketConsumerService.TooManyBucketsException.class,
MultiBucketConsumerService.TooManyBucketsException::new, 149,
Version.V_7_0_0_alpha1);
final Class<? extends ElasticsearchException> exceptionClass;
final CheckedFunction<StreamInput, ? extends ElasticsearchException, IOException> constructor;

View File

@ -131,10 +131,13 @@ public class Version implements Comparable<Version> {
public static final int V_6_0_1_ID = 6000199;
public static final Version V_6_0_1 =
new Version(V_6_0_1_ID, org.apache.lucene.util.Version.LUCENE_7_0_1);
public static final int V_6_0_2_ID = 6000299;
public static final Version V_6_0_2 =
new Version(V_6_0_2_ID, org.apache.lucene.util.Version.LUCENE_7_0_1);
public static final int V_6_1_0_ID = 6010099;
public static final Version V_6_1_0 = new Version(V_6_1_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
public static final int V_6_2_0_ID = 6020099;
public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_0);
public static final int V_7_0_0_alpha1_ID = 7000001;
public static final Version V_7_0_0_alpha1 =
new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_0);
@ -157,6 +160,8 @@ public class Version implements Comparable<Version> {
return V_6_1_0;
case V_6_2_0_ID:
return V_6_2_0;
case V_6_0_2_ID:
return V_6_0_2;
case V_6_0_1_ID:
return V_6_0_1;
case V_6_0_0_ID:

View File

@ -21,10 +21,13 @@ package org.elasticsearch.action.admin.indices.alias;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
@ -33,11 +36,17 @@ import org.elasticsearch.index.query.QueryBuilder;
import java.io.IOException;
import java.util.Map;
import java.util.Objects;
/**
* Represents an alias, to be associated with an index
*/
public class Alias implements Streamable {
public class Alias implements Streamable, ToXContentObject {
private static final ParseField FILTER = new ParseField("filter");
private static final ParseField ROUTING = new ParseField("routing");
private static final ParseField INDEX_ROUTING = new ParseField("index_routing", "indexRouting", "index-routing");
private static final ParseField SEARCH_ROUTING = new ParseField("search_routing", "searchRouting", "search-routing");
private String name;
@ -196,16 +205,16 @@ public class Alias implements Streamable {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("filter".equals(currentFieldName)) {
if (FILTER.match(currentFieldName)) {
Map<String, Object> filter = parser.mapOrdered();
alias.filter(filter);
}
} else if (token == XContentParser.Token.VALUE_STRING) {
if ("routing".equals(currentFieldName)) {
if (ROUTING.match(currentFieldName)) {
alias.routing(parser.text());
} else if ("index_routing".equals(currentFieldName) || "indexRouting".equals(currentFieldName) || "index-routing".equals(currentFieldName)) {
} else if (INDEX_ROUTING.match(currentFieldName)) {
alias.indexRouting(parser.text());
} else if ("search_routing".equals(currentFieldName) || "searchRouting".equals(currentFieldName) || "search-routing".equals(currentFieldName)) {
} else if (SEARCH_ROUTING.match(currentFieldName)) {
alias.searchRouting(parser.text());
}
}
@ -213,6 +222,29 @@ public class Alias implements Streamable {
return alias;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name);
if (filter != null) {
builder.rawField(FILTER.getPreferredName(), new BytesArray(filter), XContentType.JSON);
}
if (indexRouting != null && indexRouting.equals(searchRouting)) {
builder.field(ROUTING.getPreferredName(), indexRouting);
} else {
if (indexRouting != null) {
builder.field(INDEX_ROUTING.getPreferredName(), indexRouting);
}
if (searchRouting != null) {
builder.field(SEARCH_ROUTING.getPreferredName(), searchRouting);
}
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;

View File

@ -30,6 +30,7 @@ import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.MapBuilder;
@ -37,6 +38,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
@ -65,7 +67,11 @@ import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
* @see org.elasticsearch.client.Requests#createIndexRequest(String)
* @see CreateIndexResponse
*/
public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest> implements IndicesRequest {
public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest> implements IndicesRequest, ToXContentObject {
private static final ParseField MAPPINGS = new ParseField("mappings");
private static final ParseField SETTINGS = new ParseField("settings");
private static final ParseField ALIASES = new ParseField("aliases");
private String cause = "";
@ -376,14 +382,14 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
public CreateIndexRequest source(Map<String, ?> source) {
for (Map.Entry<String, ?> entry : source.entrySet()) {
String name = entry.getKey();
if (name.equals("settings")) {
if (SETTINGS.match(name)) {
settings((Map<String, Object>) entry.getValue());
} else if (name.equals("mappings")) {
} else if (MAPPINGS.match(name)) {
Map<String, Object> mappings = (Map<String, Object>) entry.getValue();
for (Map.Entry<String, Object> entry1 : mappings.entrySet()) {
mapping(entry1.getKey(), (Map<String, Object>) entry1.getValue());
}
} else if (name.equals("aliases")) {
} else if (ALIASES.match(name)) {
aliases((Map<String, Object>) entry.getValue());
} else {
// maybe custom?
@ -520,4 +526,32 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
out.writeBoolean(updateAllTypes);
waitForActiveShards.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.startObject(SETTINGS.getPreferredName());
settings.toXContent(builder, params);
builder.endObject();
builder.startObject(MAPPINGS.getPreferredName());
for (Map.Entry<String, String> entry : mappings.entrySet()) {
builder.rawField(entry.getKey(), new BytesArray(entry.getValue()), XContentType.JSON);
}
builder.endObject();
builder.startObject(ALIASES.getPreferredName());
for (Alias alias : aliases) {
alias.toXContent(builder, params);
}
builder.endObject();
for (Map.Entry<String, IndexMetaData.Custom> entry : customs.entrySet()) {
builder.field(entry.getKey(), entry.getValue(), params);
}
builder.endObject();
return builder;
}
}

View File

@ -39,20 +39,17 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru
*/
public class CreateIndexResponse extends AcknowledgedResponse implements ToXContentObject {
private static final String SHARDS_ACKNOWLEDGED = "shards_acknowledged";
private static final String INDEX = "index";
private static final ParseField SHARDS_ACKNOWLEDGED_PARSER = new ParseField(SHARDS_ACKNOWLEDGED);
private static final ParseField INDEX_PARSER = new ParseField(INDEX);
private static final ParseField SHARDS_ACKNOWLEDGED = new ParseField("shards_acknowledged");
private static final ParseField INDEX = new ParseField("index");
private static final ConstructingObjectParser<CreateIndexResponse, Void> PARSER = new ConstructingObjectParser<>("create_index",
true, args -> new CreateIndexResponse((boolean) args[0], (boolean) args[1], (String) args[2]));
static {
declareAcknowledgedField(PARSER);
PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SHARDS_ACKNOWLEDGED_PARSER,
PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SHARDS_ACKNOWLEDGED,
ObjectParser.ValueType.BOOLEAN);
PARSER.declareField(constructorArg(), (parser, context) -> parser.text(), INDEX_PARSER, ObjectParser.ValueType.STRING);
PARSER.declareField(constructorArg(), (parser, context) -> parser.text(), INDEX, ObjectParser.ValueType.STRING);
}
private boolean shardsAcked;
@ -102,8 +99,8 @@ public class CreateIndexResponse extends AcknowledgedResponse implements ToXCont
}
public void addCustomFields(XContentBuilder builder) throws IOException {
builder.field(SHARDS_ACKNOWLEDGED, isShardsAcked());
builder.field(INDEX, index());
builder.field(SHARDS_ACKNOWLEDGED.getPreferredName(), isShardsAcked());
builder.field(INDEX.getPreferredName(), index());
}
@Override

View File

@ -36,9 +36,11 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.List;
/**
@ -46,10 +48,15 @@ import java.util.List;
*/
public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndexRequest, GetIndexResponse> {
private final IndicesService indicesService;
@Inject
public TransportGetIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, GetIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, GetIndexRequest::new, indexNameExpressionResolver);
ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService) {
super(settings, GetIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, GetIndexRequest::new,
indexNameExpressionResolver);
this.indicesService = indicesService;
}
@Override
@ -60,7 +67,8 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
@Override
protected ClusterBlockException checkBlock(GetIndexRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ,
indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
@ -82,8 +90,14 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
switch (feature) {
case MAPPINGS:
if (!doneMappings) {
mappingsResult = state.metaData().findMappings(concreteIndices, request.types());
try {
mappingsResult = state.metaData().findMappings(concreteIndices, request.types(),
indicesService.getFieldFilter());
doneMappings = true;
} catch (IOException e) {
listener.onFailure(e);
return;
}
}
break;
case ALIASES:

View File

@ -29,13 +29,12 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentFieldMappers;
@ -50,7 +49,10 @@ import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import static java.util.Collections.singletonMap;
@ -69,7 +71,8 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
public TransportGetFieldMappingsIndexAction(Settings settings, ClusterService clusterService, TransportService transportService,
IndicesService indicesService, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, GetFieldMappingsIndexRequest::new, ThreadPool.Names.MANAGEMENT);
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
GetFieldMappingsIndexRequest::new, ThreadPool.Names.MANAGEMENT);
this.clusterService = clusterService;
this.indicesService = indicesService;
}
@ -90,6 +93,9 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, ShardId shardId) {
assert shardId != null;
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
Predicate<String> metadataFieldPredicate = indicesService::isMetaDataField;
Predicate<String> fieldPredicate = metadataFieldPredicate.or(indicesService.getFieldFilter().apply(shardId.getIndexName()));
Collection<String> typeIntersection;
if (request.types().length == 0) {
typeIntersection = indexService.mapperService().types();
@ -104,16 +110,15 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
}
}
MapBuilder<String, Map<String, FieldMappingMetaData>> typeMappings = new MapBuilder<>();
Map<String, Map<String, FieldMappingMetaData>> typeMappings = new HashMap<>();
for (String type : typeIntersection) {
DocumentMapper documentMapper = indexService.mapperService().documentMapper(type);
Map<String, FieldMappingMetaData> fieldMapping = findFieldMappingsByType(documentMapper, request);
Map<String, FieldMappingMetaData> fieldMapping = findFieldMappingsByType(fieldPredicate, documentMapper, request);
if (!fieldMapping.isEmpty()) {
typeMappings.put(type, fieldMapping);
}
}
return new GetFieldMappingsResponse(singletonMap(shardId.getIndexName(), typeMappings.immutableMap()));
return new GetFieldMappingsResponse(singletonMap(shardId.getIndexName(), Collections.unmodifiableMap(typeMappings)));
}
@Override
@ -163,47 +168,50 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
}
};
private Map<String, FieldMappingMetaData> findFieldMappingsByType(DocumentMapper documentMapper, GetFieldMappingsIndexRequest request) {
MapBuilder<String, FieldMappingMetaData> fieldMappings = new MapBuilder<>();
private static Map<String, FieldMappingMetaData> findFieldMappingsByType(Predicate<String> fieldPredicate,
DocumentMapper documentMapper,
GetFieldMappingsIndexRequest request) {
Map<String, FieldMappingMetaData> fieldMappings = new HashMap<>();
final DocumentFieldMappers allFieldMappers = documentMapper.mappers();
for (String field : request.fields()) {
if (Regex.isMatchAllPattern(field)) {
for (FieldMapper fieldMapper : allFieldMappers) {
addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults());
addFieldMapper(fieldPredicate, fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults());
}
} else if (Regex.isSimpleMatchPattern(field)) {
for (FieldMapper fieldMapper : allFieldMappers) {
if (Regex.simpleMatch(field, fieldMapper.fieldType().name())) {
addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings,
request.includeDefaults());
addFieldMapper(fieldPredicate, fieldMapper.fieldType().name(),
fieldMapper, fieldMappings, request.includeDefaults());
}
}
} else {
// not a pattern
FieldMapper fieldMapper = allFieldMappers.smartNameFieldMapper(field);
if (fieldMapper != null) {
addFieldMapper(field, fieldMapper, fieldMappings, request.includeDefaults());
addFieldMapper(fieldPredicate, field, fieldMapper, fieldMappings, request.includeDefaults());
} else if (request.probablySingleFieldRequest()) {
fieldMappings.put(field, FieldMappingMetaData.NULL);
}
}
}
return fieldMappings.immutableMap();
return Collections.unmodifiableMap(fieldMappings);
}
private void addFieldMapper(String field, FieldMapper fieldMapper, MapBuilder<String, FieldMappingMetaData> fieldMappings, boolean includeDefaults) {
private static void addFieldMapper(Predicate<String> fieldPredicate,
String field, FieldMapper fieldMapper, Map<String, FieldMappingMetaData> fieldMappings,
boolean includeDefaults) {
if (fieldMappings.containsKey(field)) {
return;
}
if (fieldPredicate.test(field)) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.startObject();
fieldMapper.toXContent(builder, includeDefaults ? includeDefaultsParams : ToXContent.EMPTY_PARAMS);
builder.endObject();
fieldMappings.put(field, new FieldMappingMetaData(fieldMapper.fieldType().name(), builder.bytes()));
BytesReference bytes = XContentHelper.toXContent(fieldMapper, XContentType.JSON,
includeDefaults ? includeDefaultsParams : ToXContent.EMPTY_PARAMS, false);
fieldMappings.put(field, new FieldMappingMetaData(fieldMapper.fieldType().name(), bytes));
} catch (IOException e) {
throw new ElasticsearchException("failed to serialize XContent of field [" + field + "]", e);
}
}
}
}

View File

@ -31,15 +31,23 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
public class TransportGetMappingsAction extends TransportClusterInfoAction<GetMappingsRequest, GetMappingsResponse> {
private final IndicesService indicesService;
@Inject
public TransportGetMappingsAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, GetMappingsAction.NAME, transportService, clusterService, threadPool, actionFilters, GetMappingsRequest::new, indexNameExpressionResolver);
ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService) {
super(settings, GetMappingsAction.NAME, transportService, clusterService, threadPool, actionFilters, GetMappingsRequest::new,
indexNameExpressionResolver);
this.indicesService = indicesService;
}
@Override
@ -50,7 +58,8 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction<GetMa
@Override
protected ClusterBlockException checkBlock(GetMappingsRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ,
indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
@ -59,11 +68,15 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction<GetMa
}
@Override
protected void doMasterOperation(final GetMappingsRequest request, String[] concreteIndices, final ClusterState state, final ActionListener<GetMappingsResponse> listener) {
protected void doMasterOperation(final GetMappingsRequest request, String[] concreteIndices, final ClusterState state,
final ActionListener<GetMappingsResponse> listener) {
logger.trace("serving getMapping request based on version {}", state.version());
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> result = state.metaData().findMappings(
concreteIndices, request.types()
);
try {
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> result =
state.metaData().findMappings(concreteIndices, request.types(), indicesService.getFieldFilter());
listener.onResponse(new GetMappingsResponse(result));
} catch (IOException e) {
listener.onFailure(e);
}
}
}

View File

@ -40,6 +40,7 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.function.Predicate;
public class TransportFieldCapabilitiesIndexAction extends TransportSingleShardAction<FieldCapabilitiesIndexRequest,
FieldCapabilitiesIndexResponse> {
@ -77,14 +78,17 @@ public class TransportFieldCapabilitiesIndexAction extends TransportSingleShardA
for (String field : request.fields()) {
fieldNames.addAll(mapperService.simpleMatchToIndexNames(field));
}
Predicate<String> fieldPredicate = indicesService.getFieldFilter().apply(shardId.getIndexName());
Map<String, FieldCapabilities> responseMap = new HashMap<>();
for (String field : fieldNames) {
MappedFieldType ft = mapperService.fullName(field);
if (ft != null) {
FieldCapabilities fieldCap = new FieldCapabilities(field, ft.typeName(), ft.isSearchable(), ft.isAggregatable());
if (indicesService.isMetaDataField(field) || fieldPredicate.test(field)) {
responseMap.put(field, fieldCap);
}
}
}
return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), responseMap);
}

View File

@ -23,20 +23,36 @@ import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.CompositeIndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.CheckedBiConsumer;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue;
/**
* A multi search API request.
*/
public class MultiSearchRequest extends ActionRequest implements CompositeIndicesRequest {
public static final int MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT = 0;
private int maxConcurrentSearchRequests = 0;
private List<SearchRequest> requests = new ArrayList<>();
@ -131,4 +147,171 @@ public class MultiSearchRequest extends ActionRequest implements CompositeIndice
request.writeTo(out);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MultiSearchRequest that = (MultiSearchRequest) o;
return maxConcurrentSearchRequests == that.maxConcurrentSearchRequests &&
Objects.equals(requests, that.requests) &&
Objects.equals(indicesOptions, that.indicesOptions);
}
@Override
public int hashCode() {
return Objects.hash(maxConcurrentSearchRequests, requests, indicesOptions);
}
public static void readMultiLineFormat(BytesReference data,
XContent xContent,
CheckedBiConsumer<SearchRequest, XContentParser, IOException> consumer,
String[] indices,
IndicesOptions indicesOptions,
String[] types,
String routing,
String searchType,
NamedXContentRegistry registry,
boolean allowExplicitIndex) throws IOException {
int from = 0;
int length = data.length();
byte marker = xContent.streamSeparator();
while (true) {
int nextMarker = findNextMarker(marker, from, data, length);
if (nextMarker == -1) {
break;
}
// support first line with \n
if (nextMarker == 0) {
from = nextMarker + 1;
continue;
}
SearchRequest searchRequest = new SearchRequest();
if (indices != null) {
searchRequest.indices(indices);
}
if (indicesOptions != null) {
searchRequest.indicesOptions(indicesOptions);
}
if (types != null && types.length > 0) {
searchRequest.types(types);
}
if (routing != null) {
searchRequest.routing(routing);
}
if (searchType != null) {
searchRequest.searchType(searchType);
}
IndicesOptions defaultOptions = SearchRequest.DEFAULT_INDICES_OPTIONS;
// now parse the action
if (nextMarker - from > 0) {
try (XContentParser parser = xContent.createParser(registry, data.slice(from, nextMarker - from))) {
Map<String, Object> source = parser.map();
for (Map.Entry<String, Object> entry : source.entrySet()) {
Object value = entry.getValue();
if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) {
if (!allowExplicitIndex) {
throw new IllegalArgumentException("explicit index in multi search is not allowed");
}
searchRequest.indices(nodeStringArrayValue(value));
} else if ("type".equals(entry.getKey()) || "types".equals(entry.getKey())) {
searchRequest.types(nodeStringArrayValue(value));
} else if ("search_type".equals(entry.getKey()) || "searchType".equals(entry.getKey())) {
searchRequest.searchType(nodeStringValue(value, null));
} else if ("request_cache".equals(entry.getKey()) || "requestCache".equals(entry.getKey())) {
searchRequest.requestCache(nodeBooleanValue(value, entry.getKey()));
} else if ("preference".equals(entry.getKey())) {
searchRequest.preference(nodeStringValue(value, null));
} else if ("routing".equals(entry.getKey())) {
searchRequest.routing(nodeStringValue(value, null));
}
}
defaultOptions = IndicesOptions.fromMap(source, defaultOptions);
}
}
searchRequest.indicesOptions(defaultOptions);
// move pointers
from = nextMarker + 1;
// now for the body
nextMarker = findNextMarker(marker, from, data, length);
if (nextMarker == -1) {
break;
}
BytesReference bytes = data.slice(from, nextMarker - from);
try (XContentParser parser = xContent.createParser(registry, bytes)) {
consumer.accept(searchRequest, parser);
}
// move pointers
from = nextMarker + 1;
}
}
private static int findNextMarker(byte marker, int from, BytesReference data, int length) {
for (int i = from; i < length; i++) {
if (data.get(i) == marker) {
return i;
}
}
if (from != length) {
throw new IllegalArgumentException("The msearch request must be terminated by a newline [\n]");
}
return -1;
}
public static byte[] writeMultiLineFormat(MultiSearchRequest multiSearchRequest, XContent xContent) throws IOException {
ByteArrayOutputStream output = new ByteArrayOutputStream();
for (SearchRequest request : multiSearchRequest.requests()) {
try (XContentBuilder xContentBuilder = XContentBuilder.builder(xContent)) {
xContentBuilder.startObject();
if (request.indices() != null) {
xContentBuilder.field("index", request.indices());
}
if (request.indicesOptions() != null && request.indicesOptions() != SearchRequest.DEFAULT_INDICES_OPTIONS) {
if (request.indicesOptions().expandWildcardsOpen() && request.indicesOptions().expandWildcardsClosed()) {
xContentBuilder.field("expand_wildcards", "all");
} else if (request.indicesOptions().expandWildcardsOpen()) {
xContentBuilder.field("expand_wildcards", "open");
} else if (request.indicesOptions().expandWildcardsClosed()) {
xContentBuilder.field("expand_wildcards", "closed");
} else {
xContentBuilder.field("expand_wildcards", "none");
}
xContentBuilder.field("ignore_unavailable", request.indicesOptions().ignoreUnavailable());
xContentBuilder.field("allow_no_indices", request.indicesOptions().allowNoIndices());
}
if (request.types() != null) {
xContentBuilder.field("types", request.types());
}
if (request.searchType() != null) {
xContentBuilder.field("search_type", request.searchType().name().toLowerCase(Locale.ROOT));
}
if (request.requestCache() != null) {
xContentBuilder.field("request_cache", request.requestCache());
}
if (request.preference() != null) {
xContentBuilder.field("preference", request.preference());
}
if (request.routing() != null) {
xContentBuilder.field("routing", request.routing());
}
xContentBuilder.endObject();
xContentBuilder.bytes().writeTo(output);
}
output.write(xContent.streamSeparator());
try (XContentBuilder xContentBuilder = XContentBuilder.builder(xContent)) {
if (request.source() != null) {
request.source().toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS);
} else {
xContentBuilder.startObject();
xContentBuilder.endObject();
}
xContentBuilder.bytes().writeTo(output);
}
output.write(xContent.streamSeparator());
}
return output.toByteArray();
}
}

View File

@ -24,23 +24,39 @@ import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import java.io.IOException;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
/**
* A multi search response.
*/
public class MultiSearchResponse extends ActionResponse implements Iterable<MultiSearchResponse.Item>, ToXContentObject {
private static final ParseField RESPONSES = new ParseField(Fields.RESPONSES);
private static final ParseField TOOK_IN_MILLIS = new ParseField("took");
private static final ConstructingObjectParser<MultiSearchResponse, Void> PARSER = new ConstructingObjectParser<>("multi_search",
true, a -> new MultiSearchResponse(((List<Item>)a[0]).toArray(new Item[0]), (long) a[1]));
static {
PARSER.declareObjectArray(constructorArg(), (p, c) -> itemFromXContent(p), RESPONSES);
PARSER.declareLong(constructorArg(), TOOK_IN_MILLIS);
}
/**
* A search response item, holding the actual search response, or an error message if it failed.
*/
@ -188,6 +204,45 @@ public class MultiSearchResponse extends ActionResponse implements Iterable<Mult
return builder;
}
public static MultiSearchResponse fromXContext(XContentParser parser) {
return PARSER.apply(parser, null);
}
private static MultiSearchResponse.Item itemFromXContent(XContentParser parser) throws IOException {
// This parsing logic is a bit tricky here, because the multi search response itself is tricky:
// 1) The json objects inside the responses array are either a search response or a serialized exception
// 2) Each response json object gets a status field injected that ElasticsearchException.failureFromXContent(...) does not parse,
// but SearchResponse.innerFromXContent(...) parses and then ignores. The status field is not needed to parse
// the response item. However in both cases this method does need to parse the 'status' field otherwise the parsing of
// the response item in the next json array element will fail due to parsing errors.
Item item = null;
String fieldName = null;
Token token = parser.nextToken();
assert token == Token.FIELD_NAME;
outer: for (; token != Token.END_OBJECT; token = parser.nextToken()) {
switch (token) {
case FIELD_NAME:
fieldName = parser.currentName();
if ("error".equals(fieldName)) {
item = new Item(null, ElasticsearchException.failureFromXContent(parser));
} else if ("status".equals(fieldName) == false) {
item = new Item(SearchResponse.innerFromXContent(parser), null);
break outer;
}
break;
case VALUE_NUMBER:
if ("status".equals(fieldName)) {
// Ignore the status value
}
break;
}
}
assert parser.currentToken() == Token.END_OBJECT;
return item;
}
static final class Fields {
static final String RESPONSES = "responses";
static final String STATUS = "status";

View File

@ -65,6 +65,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.function.IntFunction;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
@ -73,13 +74,16 @@ public final class SearchPhaseController extends AbstractComponent {
private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0];
private final BigArrays bigArrays;
private final ScriptService scriptService;
private final Function<Boolean, ReduceContext> reduceContextFunction;
public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService) {
/**
* Constructor.
* @param settings Node settings
* @param reduceContextFunction A function that builds a context for the reduce of an {@link InternalAggregation}
*/
public SearchPhaseController(Settings settings, Function<Boolean, ReduceContext> reduceContextFunction) {
super(settings);
this.bigArrays = bigArrays;
this.scriptService = scriptService;
this.reduceContextFunction = reduceContextFunction;
}
public AggregatedDfs aggregateDfs(Collection<DfsSearchResult> results) {
@ -496,7 +500,7 @@ public final class SearchPhaseController extends AbstractComponent {
}
}
final Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions));
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, true);
ReduceContext reduceContext = reduceContextFunction.apply(true);
final InternalAggregations aggregations = aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList,
firstResult.pipelineAggregators(), reduceContext);
final SearchProfileShardResults shardResults = profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults);
@ -513,7 +517,7 @@ public final class SearchPhaseController extends AbstractComponent {
* that relevant for the final reduce step. For final reduce see {@link #reduceAggs(List, List, ReduceContext)}
*/
private InternalAggregations reduceAggsIncrementally(List<InternalAggregations> aggregationsList) {
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, false);
ReduceContext reduceContext = reduceContextFunction.apply(false);
return aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList,
null, reduceContext);
}

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.StatusToXContentObject;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.rest.action.RestActions;
import org.elasticsearch.search.SearchHits;
@ -242,9 +243,14 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
}
public static SearchResponse fromXContent(XContentParser parser) throws IOException {
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
XContentParser.Token token;
String currentFieldName = null;
ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
parser.nextToken();
return innerFromXContent(parser);
}
static SearchResponse innerFromXContent(XContentParser parser) throws IOException {
ensureExpectedToken(Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation);
String currentFieldName = parser.currentName();
SearchHits hits = null;
Aggregations aggs = null;
Suggest suggest = null;
@ -259,8 +265,8 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
String scrollId = null;
List<ShardSearchFailure> failures = new ArrayList<>();
Clusters clusters = Clusters.EMPTY;
while((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) {
if (token == Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (SCROLL_ID.match(currentFieldName)) {
@ -276,7 +282,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
} else {
parser.skipChildren();
}
} else if (token == XContentParser.Token.START_OBJECT) {
} else if (token == Token.START_OBJECT) {
if (SearchHits.Fields.HITS.equals(currentFieldName)) {
hits = SearchHits.fromXContent(parser);
} else if (Aggregations.AGGREGATIONS_FIELD.equals(currentFieldName)) {
@ -286,8 +292,8 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
} else if (SearchProfileShardResults.PROFILE_FIELD.equals(currentFieldName)) {
profile = SearchProfileShardResults.fromXContent(parser);
} else if (RestActions._SHARDS_FIELD.match(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
while ((token = parser.nextToken()) != Token.END_OBJECT) {
if (token == Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (RestActions.FAILED_FIELD.match(currentFieldName)) {
@ -301,9 +307,9 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
} else {
parser.skipChildren();
}
} else if (token == XContentParser.Token.START_ARRAY) {
} else if (token == Token.START_ARRAY) {
if (RestActions.FAILURES_FIELD.match(currentFieldName)) {
while((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
while((token = parser.nextToken()) != Token.END_ARRAY) {
failures.add(ShardSearchFailure.fromXContent(parser));
}
} else {

View File

@ -76,7 +76,7 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
int maxConcurrentSearches = request.maxConcurrentSearchRequests();
if (maxConcurrentSearches == 0) {
if (maxConcurrentSearches == MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) {
maxConcurrentSearches = defaultMaxConcurrentSearches(availableProcessors, clusterState);
}

View File

@ -37,11 +37,10 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru
*/
public abstract class AcknowledgedResponse extends ActionResponse {
private static final String ACKNOWLEDGED = "acknowledged";
private static final ParseField ACKNOWLEDGED_PARSER = new ParseField(ACKNOWLEDGED);
private static final ParseField ACKNOWLEDGED = new ParseField("acknowledged");
protected static <T extends AcknowledgedResponse> void declareAcknowledgedField(ConstructingObjectParser<T, Void> PARSER) {
PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), ACKNOWLEDGED_PARSER,
PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), ACKNOWLEDGED,
ObjectParser.ValueType.BOOLEAN);
}
@ -78,6 +77,6 @@ public abstract class AcknowledgedResponse extends ActionResponse {
}
protected void addAcknowledgedField(XContentBuilder builder) throws IOException {
builder.field(ACKNOWLEDGED, isAcknowledged());
builder.field(ACKNOWLEDGED.getPreferredName(), isAcknowledged());
}
}

View File

@ -93,6 +93,9 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
ImmutableOpenMap<String, CompressedXContent> mappings,
ImmutableOpenMap<String, AliasMetaData> aliases,
ImmutableOpenMap<String, IndexMetaData.Custom> customs) {
if (patterns == null || patterns.isEmpty()) {
throw new IllegalArgumentException("Index patterns must not be null or empty; got " + patterns);
}
this.name = name;
this.order = order;
this.version = version;
@ -244,7 +247,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
out.writeStringList(patterns);
} else {
out.writeString(patterns.size() > 0 ? patterns.get(0) : "");
out.writeString(patterns.get(0));
}
Settings.writeSettingsToStream(settings, out);
out.writeVInt(mappings.size());

View File

@ -107,15 +107,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
initMappers(withoutType);
}
private MappingMetaData() {
this.type = "";
try {
this.source = new CompressedXContent("{}");
} catch (IOException ex) {
throw new IllegalStateException("Cannot create MappingMetaData prototype", ex);
}
}
private void initMappers(Map<String, Object> withoutType) {
if (withoutType.containsKey("_routing")) {
boolean required = false;
@ -143,13 +134,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
}
}
public MappingMetaData(String type, CompressedXContent source, Routing routing, boolean hasParentField) {
this.type = type;
this.source = source;
this.routing = routing;
this.hasParentField = hasParentField;
}
void updateDefaultMapping(MappingMetaData defaultMapping) {
if (routing == Routing.EMPTY) {
routing = defaultMapping.routing();
@ -250,5 +234,4 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
public static Diff<MappingMetaData> readDiffFrom(StreamInput in) throws IOException {
return readDiffFrom(MappingMetaData::new, in);
}
}

View File

@ -48,11 +48,13 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.plugins.MapperPlugin;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
@ -69,6 +71,8 @@ import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.function.Predicate;
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
@ -324,32 +328,38 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
return false;
}
/*
* Finds all mappings for types and concrete indices. Types are expanded to
* include all types that match the glob patterns in the types array. Empty
* types array, null or {"_all"} will be expanded to all types available for
* the given indices.
/**
* Finds all mappings for types and concrete indices. Types are expanded to include all types that match the glob
* patterns in the types array. Empty types array, null or {"_all"} will be expanded to all types available for
* the given indices. Only fields that match the provided field filter will be returned (default is a predicate
* that always returns true, which can be overridden via plugins)
*
* @see MapperPlugin#getFieldFilter()
*
*/
public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> findMappings(String[] concreteIndices, final String[] types) {
public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> findMappings(String[] concreteIndices,
final String[] types,
Function<String, Predicate<String>> fieldFilter)
throws IOException {
assert types != null;
assert concreteIndices != null;
if (concreteIndices.length == 0) {
return ImmutableOpenMap.of();
}
boolean isAllTypes = isAllTypes(types);
ImmutableOpenMap.Builder<String, ImmutableOpenMap<String, MappingMetaData>> indexMapBuilder = ImmutableOpenMap.builder();
Iterable<String> intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys());
for (String index : intersection) {
IndexMetaData indexMetaData = indices.get(index);
ImmutableOpenMap.Builder<String, MappingMetaData> filteredMappings;
if (isAllTypes(types)) {
indexMapBuilder.put(index, indexMetaData.getMappings()); // No types specified means get it all
Predicate<String> fieldPredicate = fieldFilter.apply(index);
if (isAllTypes) {
indexMapBuilder.put(index, filterFields(indexMetaData.getMappings(), fieldPredicate));
} else {
filteredMappings = ImmutableOpenMap.builder();
ImmutableOpenMap.Builder<String, MappingMetaData> filteredMappings = ImmutableOpenMap.builder();
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.getMappings()) {
if (Regex.simpleMatch(types, cursor.key)) {
filteredMappings.put(cursor.key, cursor.value);
filteredMappings.put(cursor.key, filterFields(cursor.value, fieldPredicate));
}
}
if (!filteredMappings.isEmpty()) {
@ -360,6 +370,95 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
return indexMapBuilder.build();
}
private static ImmutableOpenMap<String, MappingMetaData> filterFields(ImmutableOpenMap<String, MappingMetaData> mappings,
Predicate<String> fieldPredicate) throws IOException {
if (fieldPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) {
return mappings;
}
ImmutableOpenMap.Builder<String, MappingMetaData> builder = ImmutableOpenMap.builder(mappings.size());
for (ObjectObjectCursor<String, MappingMetaData> cursor : mappings) {
builder.put(cursor.key, filterFields(cursor.value, fieldPredicate));
}
return builder.build(); // No types specified means return them all
}
@SuppressWarnings("unchecked")
private static MappingMetaData filterFields(MappingMetaData mappingMetaData, Predicate<String> fieldPredicate) throws IOException {
if (fieldPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) {
return mappingMetaData;
}
Map<String, Object> sourceAsMap = XContentHelper.convertToMap(mappingMetaData.source().compressedReference(), true).v2();
Map<String, Object> mapping;
if (sourceAsMap.size() == 1 && sourceAsMap.containsKey(mappingMetaData.type())) {
mapping = (Map<String, Object>) sourceAsMap.get(mappingMetaData.type());
} else {
mapping = sourceAsMap;
}
Map<String, Object> properties = (Map<String, Object>)mapping.get("properties");
if (properties == null || properties.isEmpty()) {
return mappingMetaData;
}
filterFields("", properties, fieldPredicate);
return new MappingMetaData(mappingMetaData.type(), sourceAsMap);
}
@SuppressWarnings("unchecked")
private static boolean filterFields(String currentPath, Map<String, Object> fields, Predicate<String> fieldPredicate) {
assert fieldPredicate != MapperPlugin.NOOP_FIELD_PREDICATE;
Iterator<Map.Entry<String, Object>> entryIterator = fields.entrySet().iterator();
while (entryIterator.hasNext()) {
Map.Entry<String, Object> entry = entryIterator.next();
String newPath = mergePaths(currentPath, entry.getKey());
Object value = entry.getValue();
boolean mayRemove = true;
boolean isMultiField = false;
if (value instanceof Map) {
Map<String, Object> map = (Map<String, Object>) value;
Map<String, Object> properties = (Map<String, Object>)map.get("properties");
if (properties != null) {
mayRemove = filterFields(newPath, properties, fieldPredicate);
} else {
Map<String, Object> subFields = (Map<String, Object>)map.get("fields");
if (subFields != null) {
isMultiField = true;
if (mayRemove = filterFields(newPath, subFields, fieldPredicate)) {
map.remove("fields");
}
}
}
} else {
throw new IllegalStateException("cannot filter mappings, found unknown element of type [" + value.getClass() + "]");
}
//only remove a field if it has no sub-fields left and it has to be excluded
if (fieldPredicate.test(newPath) == false) {
if (mayRemove) {
entryIterator.remove();
} else if (isMultiField) {
//multi fields that should be excluded but hold subfields that don't have to be excluded are converted to objects
Map<String, Object> map = (Map<String, Object>) value;
Map<String, Object> subFields = (Map<String, Object>)map.get("fields");
assert subFields.size() > 0;
map.put("properties", subFields);
map.remove("fields");
map.remove("type");
}
}
}
//return true if the ancestor may be removed, as it has no sub-fields left
return fields.size() == 0;
}
private static String mergePaths(String path, String field) {
if (path.length() == 0) {
return field;
}
return path + "." + field;
}
/**
* Returns all the concrete indices.
*/

View File

@ -38,6 +38,7 @@ import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
@ -54,7 +55,6 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.function.Predicate;
import static org.elasticsearch.action.support.ContextPreservingActionListener.wrapPreservingContext;
@ -164,13 +164,16 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
Settings.Builder settingsForOpenIndices = Settings.builder();
final Set<String> skippedSettings = new HashSet<>();
indexScopedSettings.validate(normalizedSettings, false); // don't validate dependencies here we check it below
// never allow to change the number of shards
indexScopedSettings.validate(normalizedSettings.filter(s -> Regex.isSimpleMatchPattern(s) == false /* don't validate wildcards */),
false); //don't validate dependencies here we check it below never allow to change the number of shards
for (String key : normalizedSettings.keySet()) {
Setting setting = indexScopedSettings.get(key);
assert setting != null; // we already validated the normalized settings
boolean isWildcard = setting == null && Regex.isSimpleMatchPattern(key);
assert setting != null // we already validated the normalized settings
|| (isWildcard && normalizedSettings.hasValue(key) == false)
: "unknown setting: " + key + " isWildcard: " + isWildcard + " hasValue: " + normalizedSettings.hasValue(key);
settingsForClosedIndices.copy(key, normalizedSettings);
if (setting.isDynamic()) {
if (isWildcard || setting.isDynamic()) {
settingsForOpenIndices.copy(key, normalizedSettings);
} else {
skippedSettings.add(key);

View File

@ -133,8 +133,11 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting");
Objects.requireNonNull(state.get(), "please set initial state before starting");
addListener(localNodeMasterListeners);
threadPoolExecutor = EsExecutors.newSinglePrioritizing(CLUSTER_UPDATE_THREAD_NAME,
daemonThreadFactory(settings, CLUSTER_UPDATE_THREAD_NAME), threadPool.getThreadContext(), threadPool.scheduler());
threadPoolExecutor = EsExecutors.newSinglePrioritizing(
nodeName() + "/" + CLUSTER_UPDATE_THREAD_NAME,
daemonThreadFactory(settings, CLUSTER_UPDATE_THREAD_NAME),
threadPool.getThreadContext(),
threadPool.scheduler());
}
class UpdateTask extends SourcePrioritizedRunnable implements Function<ClusterState, ClusterState> {

View File

@ -104,8 +104,11 @@ public class MasterService extends AbstractLifecycleComponent {
protected synchronized void doStart() {
Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting");
Objects.requireNonNull(clusterStateSupplier, "please set a cluster state supplier before starting");
threadPoolExecutor = EsExecutors.newSinglePrioritizing(MASTER_UPDATE_THREAD_NAME,
daemonThreadFactory(settings, MASTER_UPDATE_THREAD_NAME), threadPool.getThreadContext(), threadPool.scheduler());
threadPoolExecutor = EsExecutors.newSinglePrioritizing(
nodeName() + "/" + MASTER_UPDATE_THREAD_NAME,
daemonThreadFactory(settings, MASTER_UPDATE_THREAD_NAME),
threadPool.getThreadContext(),
threadPool.scheduler());
taskBatcher = new Batcher(logger, threadPoolExecutor);
}

View File

@ -0,0 +1,89 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.bytes;
import org.apache.lucene.util.BytesRef;
import java.nio.ByteBuffer;
/**
* This is a {@link BytesReference} backed by a {@link ByteBuffer}. The byte buffer can either be a heap or
* direct byte buffer. The reference is composed of the space between the {@link ByteBuffer#position} and
* {@link ByteBuffer#limit} at construction time. If the position or limit of the underlying byte buffer is
* changed, those changes will not be reflected in this reference. However, modifying the limit or position
* of the underlying byte buffer is not recommended as those can be used during {@link ByteBuffer#get()}
* bounds checks. Use {@link ByteBuffer#duplicate()} at creation time if you plan on modifying the markers of
* the underlying byte buffer. Any changes to the underlying data in the byte buffer will be reflected.
*/
public class ByteBufferReference extends BytesReference {
private final ByteBuffer buffer;
private final int offset;
private final int length;
public ByteBufferReference(ByteBuffer buffer) {
this.buffer = buffer;
this.offset = buffer.position();
this.length = buffer.remaining();
}
@Override
public byte get(int index) {
return buffer.get(index + offset);
}
@Override
public int length() {
return length;
}
@Override
public BytesReference slice(int from, int length) {
if (from < 0 || (from + length) > this.length) {
throw new IndexOutOfBoundsException("can't slice a buffer with length [" + this.length + "], with slice parameters from ["
+ from + "], length [" + length + "]");
}
ByteBuffer newByteBuffer = buffer.duplicate();
newByteBuffer.position(offset + from);
newByteBuffer.limit(offset + from + length);
return new ByteBufferReference(newByteBuffer);
}
/**
* This will return a bytes ref composed of the bytes. If this is a direct byte buffer, the bytes will
* have to be copied.
*
* @return the bytes ref
*/
@Override
public BytesRef toBytesRef() {
if (buffer.hasArray()) {
return new BytesRef(buffer.array(), buffer.arrayOffset() + offset, length);
}
final byte[] copy = new byte[length];
buffer.get(copy, offset, length);
return new BytesRef(copy);
}
@Override
public long ramBytesUsed() {
return buffer.capacity();
}
}

View File

@ -241,6 +241,11 @@ public enum GeoShapeType {
}
return coordinates;
}
@Override
public String wktName() {
return BBOX;
}
},
CIRCLE("circle") {
@Override
@ -273,11 +278,13 @@ public enum GeoShapeType {
private final String shapename;
private static Map<String, GeoShapeType> shapeTypeMap = new HashMap<>();
private static final String BBOX = "BBOX";
static {
for (GeoShapeType type : values()) {
shapeTypeMap.put(type.shapename, type);
}
shapeTypeMap.put(ENVELOPE.wktName().toLowerCase(Locale.ROOT), ENVELOPE);
}
GeoShapeType(String shapename) {
@ -300,6 +307,11 @@ public enum GeoShapeType {
ShapeBuilder.Orientation orientation, boolean coerce);
abstract CoordinateNode validate(CoordinateNode coordinates, boolean coerce);
/** wkt shape name */
public String wktName() {
return this.shapename;
}
public static List<Entry> getShapeWriteables() {
List<Entry> namedWriteables = new ArrayList<>();
namedWriteables.add(new Entry(ShapeBuilder.class, PointBuilder.TYPE.shapeName(), PointBuilder::new));
@ -313,4 +325,9 @@ public enum GeoShapeType {
namedWriteables.add(new Entry(ShapeBuilder.class, GeometryCollectionBuilder.TYPE.shapeName(), GeometryCollectionBuilder::new));
return namedWriteables;
}
@Override
public String toString() {
return this.shapename;
}
}

View File

@ -168,6 +168,11 @@ public class CircleBuilder extends ShapeBuilder<Circle, CircleBuilder> {
return TYPE;
}
@Override
public String toWKT() {
throw new UnsupportedOperationException("The WKT spec does not support CIRCLE geometry");
}
@Override
public int hashCode() {
return Objects.hash(center, radius, unit.ordinal());

View File

@ -20,6 +20,7 @@
package org.elasticsearch.common.geo.builders;
import org.elasticsearch.common.geo.GeoShapeType;
import org.elasticsearch.common.geo.parsers.GeoWKTParser;
import org.elasticsearch.common.geo.parsers.ShapeParser;
import org.locationtech.spatial4j.shape.Rectangle;
import com.vividsolutions.jts.geom.Coordinate;
@ -70,6 +71,28 @@ public class EnvelopeBuilder extends ShapeBuilder<Rectangle, EnvelopeBuilder> {
return this.bottomRight;
}
@Override
protected StringBuilder contentToWKT() {
StringBuilder sb = new StringBuilder();
sb.append(GeoWKTParser.LPAREN);
// minX, maxX, maxY, minY
sb.append(topLeft.x);
sb.append(GeoWKTParser.COMMA);
sb.append(GeoWKTParser.SPACE);
sb.append(bottomRight.x);
sb.append(GeoWKTParser.COMMA);
sb.append(GeoWKTParser.SPACE);
// TODO support Z??
sb.append(topLeft.y);
sb.append(GeoWKTParser.COMMA);
sb.append(GeoWKTParser.SPACE);
sb.append(bottomRight.y);
sb.append(GeoWKTParser.RPAREN);
return sb;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();

View File

@ -21,6 +21,7 @@ package org.elasticsearch.common.geo.builders;
import org.elasticsearch.common.geo.GeoShapeType;
import org.elasticsearch.common.geo.parsers.ShapeParser;
import org.elasticsearch.common.geo.parsers.GeoWKTParser;
import org.locationtech.spatial4j.shape.Shape;
import org.elasticsearch.ElasticsearchException;
@ -136,6 +137,23 @@ public class GeometryCollectionBuilder extends ShapeBuilder {
return builder.endObject();
}
@Override
protected StringBuilder contentToWKT() {
StringBuilder sb = new StringBuilder();
if (shapes.isEmpty()) {
sb.append(GeoWKTParser.EMPTY);
} else {
sb.append(GeoWKTParser.LPAREN);
sb.append(shapes.get(0).toWKT());
for (int i = 1; i < shapes.size(); ++i) {
sb.append(GeoWKTParser.COMMA);
sb.append(shapes.get(i).toWKT());
}
sb.append(GeoWKTParser.RPAREN);
}
return sb;
}
@Override
public GeoShapeType type() {
return TYPE;

View File

@ -20,8 +20,8 @@
package org.elasticsearch.common.geo.builders;
import org.elasticsearch.common.geo.GeoShapeType;
import org.elasticsearch.common.geo.parsers.GeoWKTParser;
import org.elasticsearch.common.geo.parsers.ShapeParser;
import org.locationtech.spatial4j.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.geom.LineString;
@ -82,6 +82,25 @@ public class MultiLineStringBuilder extends ShapeBuilder<JtsGeometry, MultiLineS
return TYPE;
}
@Override
protected StringBuilder contentToWKT() {
final StringBuilder sb = new StringBuilder();
if (lines.isEmpty()) {
sb.append(GeoWKTParser.EMPTY);
} else {
sb.append(GeoWKTParser.LPAREN);
if (lines.size() > 0) {
sb.append(ShapeBuilder.coordinateListToWKT(lines.get(0).coordinates));
}
for (int i = 1; i < lines.size(); ++i) {
sb.append(GeoWKTParser.COMMA);
sb.append(ShapeBuilder.coordinateListToWKT(lines.get(i).coordinates));
}
sb.append(GeoWKTParser.RPAREN);
}
return sb;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();

View File

@ -21,6 +21,7 @@ package org.elasticsearch.common.geo.builders;
import org.elasticsearch.common.geo.GeoShapeType;
import org.elasticsearch.common.geo.parsers.ShapeParser;
import org.elasticsearch.common.geo.parsers.GeoWKTParser;
import org.locationtech.spatial4j.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate;
@ -101,6 +102,37 @@ public class MultiPolygonBuilder extends ShapeBuilder {
return polygons;
}
private static String polygonCoordinatesToWKT(PolygonBuilder polygon) {
StringBuilder sb = new StringBuilder();
sb.append(GeoWKTParser.LPAREN);
sb.append(ShapeBuilder.coordinateListToWKT(polygon.shell().coordinates));
for (LineStringBuilder hole : polygon.holes()) {
sb.append(GeoWKTParser.COMMA);
sb.append(ShapeBuilder.coordinateListToWKT(hole.coordinates));
}
sb.append(GeoWKTParser.RPAREN);
return sb.toString();
}
@Override
protected StringBuilder contentToWKT() {
final StringBuilder sb = new StringBuilder();
if (polygons.isEmpty()) {
sb.append(GeoWKTParser.EMPTY);
} else {
sb.append(GeoWKTParser.LPAREN);
if (polygons.size() > 0) {
sb.append(polygonCoordinatesToWKT(polygons.get(0)));
}
for (int i = 1; i < polygons.size(); ++i) {
sb.append(GeoWKTParser.COMMA);
sb.append(polygonCoordinatesToWKT(polygons.get(i)));
}
sb.append(GeoWKTParser.RPAREN);
}
return sb;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();

View File

@ -729,6 +729,19 @@ public class PolygonBuilder extends ShapeBuilder<JtsGeometry, PolygonBuilder> {
}
}
@Override
protected StringBuilder contentToWKT() {
StringBuilder sb = new StringBuilder();
sb.append('(');
sb.append(ShapeBuilder.coordinateListToWKT(shell.coordinates));
for (LineStringBuilder hole : holes) {
sb.append(", ");
sb.append(ShapeBuilder.coordinateListToWKT(hole.coordinates));
}
sb.append(')');
return sb;
}
@Override
public int hashCode() {
return Objects.hash(shell, holes, orientation);

View File

@ -27,6 +27,7 @@ import org.apache.logging.log4j.Logger;
import org.elasticsearch.Assertions;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.geo.GeoShapeType;
import org.elasticsearch.common.geo.parsers.GeoWKTParser;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -339,6 +340,47 @@ public abstract class ShapeBuilder<T extends Shape, E extends ShapeBuilder<T,E>>
}
}
protected StringBuilder contentToWKT() {
return coordinateListToWKT(this.coordinates);
}
public String toWKT() {
StringBuilder sb = new StringBuilder();
sb.append(type().wktName());
sb.append(GeoWKTParser.SPACE);
sb.append(contentToWKT());
return sb.toString();
}
protected static StringBuilder coordinateListToWKT(final List<Coordinate> coordinates) {
final StringBuilder sb = new StringBuilder();
if (coordinates.isEmpty()) {
sb.append(GeoWKTParser.EMPTY);
} else {
// walk through coordinates:
sb.append(GeoWKTParser.LPAREN);
sb.append(coordinateToWKT(coordinates.get(0)));
for (int i = 1; i < coordinates.size(); ++i) {
sb.append(GeoWKTParser.COMMA);
sb.append(GeoWKTParser.SPACE);
sb.append(coordinateToWKT(coordinates.get(i)));
}
sb.append(GeoWKTParser.RPAREN);
}
return sb;
}
private static String coordinateToWKT(final Coordinate coordinate) {
final StringBuilder sb = new StringBuilder();
sb.append(coordinate.x + GeoWKTParser.SPACE + coordinate.y);
if (Double.isNaN(coordinate.z) == false) {
sb.append(GeoWKTParser.SPACE + coordinate.z);
}
return sb.toString();
}
protected static final IntersectionOrder INTERSECTION_ORDER = new IntersectionOrder();
private static final class IntersectionOrder implements Comparator<Edge> {

View File

@ -0,0 +1,321 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.geo.parsers;
import com.vividsolutions.jts.geom.Coordinate;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.geo.GeoShapeType;
import org.elasticsearch.common.geo.builders.CoordinatesBuilder;
import org.elasticsearch.common.geo.builders.EnvelopeBuilder;
import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder;
import org.elasticsearch.common.geo.builders.LineStringBuilder;
import org.elasticsearch.common.geo.builders.MultiLineStringBuilder;
import org.elasticsearch.common.geo.builders.MultiPointBuilder;
import org.elasticsearch.common.geo.builders.MultiPolygonBuilder;
import org.elasticsearch.common.geo.builders.PointBuilder;
import org.elasticsearch.common.geo.builders.PolygonBuilder;
import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.io.StreamTokenizer;
import java.util.List;
/**
* Parses shape geometry represented in WKT format
*
* complies with OGC® document: 12-063r5 and ISO/IEC 13249-3:2016 standard
* located at http://docs.opengeospatial.org/is/12-063r5/12-063r5.html
*/
public class GeoWKTParser {
public static final String EMPTY = "EMPTY";
public static final String SPACE = Loggers.SPACE;
public static final String LPAREN = "(";
public static final String RPAREN = ")";
public static final String COMMA = ",";
private static final String NAN = "NaN";
private static final String NUMBER = "<NUMBER>";
private static final String EOF = "END-OF-STREAM";
private static final String EOL = "END-OF-LINE";
// no instance
private GeoWKTParser() {}
public static ShapeBuilder parse(XContentParser parser)
throws IOException, ElasticsearchParseException {
FastStringReader reader = new FastStringReader(parser.text());
try {
// setup the tokenizer; configured to read words w/o numbers
StreamTokenizer tokenizer = new StreamTokenizer(reader);
tokenizer.resetSyntax();
tokenizer.wordChars('a', 'z');
tokenizer.wordChars('A', 'Z');
tokenizer.wordChars(128 + 32, 255);
tokenizer.wordChars('0', '9');
tokenizer.wordChars('-', '-');
tokenizer.wordChars('+', '+');
tokenizer.wordChars('.', '.');
tokenizer.whitespaceChars(0, ' ');
tokenizer.commentChar('#');
ShapeBuilder builder = parseGeometry(tokenizer);
checkEOF(tokenizer);
return builder;
} finally {
reader.close();
}
}
/** parse geometry from the stream tokenizer */
private static ShapeBuilder parseGeometry(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
final GeoShapeType type = GeoShapeType.forName(nextWord(stream));
switch (type) {
case POINT:
return parsePoint(stream);
case MULTIPOINT:
return parseMultiPoint(stream);
case LINESTRING:
return parseLine(stream);
case MULTILINESTRING:
return parseMultiLine(stream);
case POLYGON:
return parsePolygon(stream);
case MULTIPOLYGON:
return parseMultiPolygon(stream);
case ENVELOPE:
return parseBBox(stream);
case GEOMETRYCOLLECTION:
return parseGeometryCollection(stream);
default:
throw new IllegalArgumentException("Unknown geometry type: " + type);
}
}
private static EnvelopeBuilder parseBBox(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
if (nextEmptyOrOpen(stream).equals(EMPTY)) {
return null;
}
double minLon = nextNumber(stream);
nextComma(stream);
double maxLon = nextNumber(stream);
nextComma(stream);
double maxLat = nextNumber(stream);
nextComma(stream);
double minLat = nextNumber(stream);
nextCloser(stream);
return new EnvelopeBuilder(new Coordinate(minLon, maxLat), new Coordinate(maxLon, minLat));
}
private static PointBuilder parsePoint(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
if (nextEmptyOrOpen(stream).equals(EMPTY)) {
return null;
}
PointBuilder pt = new PointBuilder(nextNumber(stream), nextNumber(stream));
if (isNumberNext(stream) == true) {
nextNumber(stream);
}
nextCloser(stream);
return pt;
}
private static List<Coordinate> parseCoordinateList(StreamTokenizer stream)
throws IOException, ElasticsearchParseException {
CoordinatesBuilder coordinates = new CoordinatesBuilder();
boolean isOpenParen = false;
if (isNumberNext(stream) || (isOpenParen = nextWord(stream).equals(LPAREN))) {
coordinates.coordinate(parseCoordinate(stream));
}
if (isOpenParen && nextCloser(stream).equals(RPAREN) == false) {
throw new ElasticsearchParseException("expected: [{}]" + RPAREN + " but found: [{}]" + tokenString(stream), stream.lineno());
}
while (nextCloserOrComma(stream).equals(COMMA)) {
isOpenParen = false;
if (isNumberNext(stream) || (isOpenParen = nextWord(stream).equals(LPAREN))) {
coordinates.coordinate(parseCoordinate(stream));
}
if (isOpenParen && nextCloser(stream).equals(RPAREN) == false) {
throw new ElasticsearchParseException("expected: " + RPAREN + " but found: " + tokenString(stream), stream.lineno());
}
}
return coordinates.build();
}
private static Coordinate parseCoordinate(StreamTokenizer stream)
throws IOException, ElasticsearchParseException {
final double lon = nextNumber(stream);
final double lat = nextNumber(stream);
Double z = null;
if (isNumberNext(stream)) {
z = nextNumber(stream);
}
return z == null ? new Coordinate(lon, lat) : new Coordinate(lon, lat, z);
}
private static MultiPointBuilder parseMultiPoint(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
String token = nextEmptyOrOpen(stream);
if (token.equals(EMPTY)) {
return null;
}
return new MultiPointBuilder(parseCoordinateList(stream));
}
private static LineStringBuilder parseLine(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
String token = nextEmptyOrOpen(stream);
if (token.equals(EMPTY)) {
return null;
}
return new LineStringBuilder(parseCoordinateList(stream));
}
private static MultiLineStringBuilder parseMultiLine(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
String token = nextEmptyOrOpen(stream);
if (token.equals(EMPTY)) {
return null;
}
MultiLineStringBuilder builder = new MultiLineStringBuilder();
builder.linestring(parseLine(stream));
while (nextCloserOrComma(stream).equals(COMMA)) {
builder.linestring(parseLine(stream));
}
return builder;
}
private static PolygonBuilder parsePolygon(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
if (nextEmptyOrOpen(stream).equals(EMPTY)) {
return null;
}
PolygonBuilder builder = new PolygonBuilder(parseLine(stream), ShapeBuilder.Orientation.RIGHT);
while (nextCloserOrComma(stream).equals(COMMA)) {
builder.hole(parseLine(stream));
}
return builder;
}
private static MultiPolygonBuilder parseMultiPolygon(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
if (nextEmptyOrOpen(stream).equals(EMPTY)) {
return null;
}
MultiPolygonBuilder builder = new MultiPolygonBuilder().polygon(parsePolygon(stream));
while (nextCloserOrComma(stream).equals(COMMA)) {
builder.polygon(parsePolygon(stream));
}
return builder;
}
private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer stream)
throws IOException, ElasticsearchParseException {
if (nextEmptyOrOpen(stream).equals(EMPTY)) {
return null;
}
GeometryCollectionBuilder builder = new GeometryCollectionBuilder().shape(parseGeometry(stream));
while (nextCloserOrComma(stream).equals(COMMA)) {
builder.shape(parseGeometry(stream));
}
return builder;
}
/** next word in the stream */
private static String nextWord(StreamTokenizer stream) throws ElasticsearchParseException, IOException {
switch (stream.nextToken()) {
case StreamTokenizer.TT_WORD:
final String word = stream.sval;
return word.equalsIgnoreCase(EMPTY) ? EMPTY : word;
case '(': return LPAREN;
case ')': return RPAREN;
case ',': return COMMA;
}
throw new ElasticsearchParseException("expected word but found: " + tokenString(stream), stream.lineno());
}
private static double nextNumber(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
if (stream.nextToken() == StreamTokenizer.TT_WORD) {
if (stream.sval.equalsIgnoreCase(NAN)) {
return Double.NaN;
} else {
try {
return Double.parseDouble(stream.sval);
} catch (NumberFormatException e) {
throw new ElasticsearchParseException("invalid number found: " + stream.sval, stream.lineno());
}
}
}
throw new ElasticsearchParseException("expected number but found: " + tokenString(stream), stream.lineno());
}
private static String tokenString(StreamTokenizer stream) {
switch (stream.ttype) {
case StreamTokenizer.TT_WORD: return stream.sval;
case StreamTokenizer.TT_EOF: return EOF;
case StreamTokenizer.TT_EOL: return EOL;
case StreamTokenizer.TT_NUMBER: return NUMBER;
}
return "'" + (char) stream.ttype + "'";
}
private static boolean isNumberNext(StreamTokenizer stream) throws IOException {
final int type = stream.nextToken();
stream.pushBack();
return type == StreamTokenizer.TT_WORD;
}
private static String nextEmptyOrOpen(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
final String next = nextWord(stream);
if (next.equals(EMPTY) || next.equals(LPAREN)) {
return next;
}
throw new ElasticsearchParseException("expected " + EMPTY + " or " + LPAREN
+ " but found: " + tokenString(stream), stream.lineno());
}
private static String nextCloser(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
if (nextWord(stream).equals(RPAREN)) {
return RPAREN;
}
throw new ElasticsearchParseException("expected " + RPAREN + " but found: " + tokenString(stream), stream.lineno());
}
private static String nextComma(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
if (nextWord(stream).equals(COMMA) == true) {
return COMMA;
}
throw new ElasticsearchParseException("expected " + COMMA + " but found: " + tokenString(stream), stream.lineno());
}
private static String nextCloserOrComma(StreamTokenizer stream) throws IOException, ElasticsearchParseException {
String token = nextWord(stream);
if (token.equals(COMMA) || token.equals(RPAREN)) {
return token;
}
throw new ElasticsearchParseException("expected " + COMMA + " or " + RPAREN
+ " but found: " + tokenString(stream), stream.lineno());
}
/** next word in the stream */
private static void checkEOF(StreamTokenizer stream) throws ElasticsearchParseException, IOException {
if (stream.nextToken() != StreamTokenizer.TT_EOF) {
throw new ElasticsearchParseException("expected end of WKT string but found additional text: "
+ tokenString(stream), stream.lineno());
}
}
}

View File

@ -51,6 +51,8 @@ public interface ShapeParser {
return null;
} if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
return GeoJsonParser.parse(parser, shapeMapper);
} else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
return GeoWKTParser.parse(parser);
}
throw new ElasticsearchParseException("shape must be an object consisting of type and coordinates");
}

View File

@ -500,6 +500,16 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
return updateSettings(toApply, target, updates, type, false);
}
/**
* Returns <code>true</code> if the given key is a valid delete key
*/
private boolean isValidDelete(String key, boolean onlyDynamic) {
return isFinalSetting(key) == false && // it's not a final setting
(onlyDynamic && isDynamicSetting(key) // it's a dynamicSetting and we only do dynamic settings
|| get(key) == null && key.startsWith(ARCHIVED_SETTINGS_PREFIX) // the setting is not registered AND it's been archived
|| (onlyDynamic == false && get(key) != null)); // if it's not dynamic AND we have a key
}
/**
* Updates a target settings builder with new, updated or deleted settings from a given settings builder.
*
@ -519,21 +529,16 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
final Predicate<String> canUpdate = (key) -> (
isFinalSetting(key) == false && // it's not a final setting
((onlyDynamic == false && get(key) != null) || isDynamicSetting(key)));
final Predicate<String> canRemove = (key) ->(// we can delete if
isFinalSetting(key) == false && // it's not a final setting
(onlyDynamic && isDynamicSetting(key) // it's a dynamicSetting and we only do dynamic settings
|| get(key) == null && key.startsWith(ARCHIVED_SETTINGS_PREFIX) // the setting is not registered AND it's been archived
|| (onlyDynamic == false && get(key) != null))); // if it's not dynamic AND we have a key
for (String key : toApply.keySet()) {
boolean isNull = toApply.get(key) == null;
if (isNull && (canRemove.test(key) || key.endsWith("*"))) {
boolean isDelete = toApply.hasValue(key) == false;
if (isDelete && (isValidDelete(key, onlyDynamic) || key.endsWith("*"))) {
// this either accepts null values that suffice the canUpdate test OR wildcard expressions (key ends with *)
// we don't validate if there is any dynamic setting with that prefix yet we could do in the future
toRemove.add(key);
// we don't set changed here it's set after we apply deletes below if something actually changed
} else if (get(key) == null) {
throw new IllegalArgumentException(type + " setting [" + key + "], not recognized");
} else if (isNull == false && canUpdate.test(key)) {
} else if (isDelete == false && canUpdate.test(key)) {
validate(key, toApply, false); // we might not have a full picture here do to a dependency validation
settingsBuilder.copy(key, toApply);
updates.copy(key, toApply);
@ -546,7 +551,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
}
}
}
changed |= applyDeletes(toRemove, target, canRemove);
changed |= applyDeletes(toRemove, target, k -> isValidDelete(k, onlyDynamic));
target.put(settingsBuilder.build());
return changed;
}

View File

@ -85,6 +85,7 @@ import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.aggregations.MultiBucketConsumerService;
import org.elasticsearch.search.fetch.subphase.highlight.FastVectorHighlighter;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.RemoteClusterAware;
@ -360,6 +361,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
SearchService.DEFAULT_KEEPALIVE_SETTING,
SearchService.KEEPALIVE_INTERVAL_SETTING,
SearchService.MAX_KEEPALIVE_SETTING,
MultiBucketConsumerService.MAX_BUCKET_SETTING,
SearchService.LOW_LEVEL_CANCELLATION_SETTING,
Node.WRITE_PORTS_FILE_SETTING,
Node.NODE_NAME_SETTING,

View File

@ -49,6 +49,7 @@ import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.lucene.codecs.CodecUtil;
@ -59,7 +60,6 @@ import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.bootstrap.BootstrapSettings;
import org.elasticsearch.cli.ExitCodes;
import org.elasticsearch.cli.UserException;
import org.elasticsearch.common.Randomness;
@ -75,6 +75,11 @@ import org.elasticsearch.common.Randomness;
*/
public class KeyStoreWrapper implements SecureSettings {
/**
* A regex for the valid characters that a setting name in the keystore may use.
*/
private static final Pattern ALLOWED_SETTING_NAME = Pattern.compile("[a-z0-9_\\-.]+");
public static final Setting<SecureString> SEED_SETTING = SecureSetting.secureString("keystore.seed", null);
/** Characters that may be used in the bootstrap seed setting added to all keystores. */
@ -383,6 +388,18 @@ public class KeyStoreWrapper implements SecureSettings {
return Base64.getDecoder().wrap(bytesStream);
}
/**
* Ensure the given setting name is allowed.
*
* @throws IllegalArgumentException if the setting name is not valid
*/
public static void validateSettingName(String setting) {
if (ALLOWED_SETTING_NAME.matcher(setting).matches() == false) {
throw new IllegalArgumentException("Setting name [" + setting + "] does not match the allowed setting name pattern ["
+ ALLOWED_SETTING_NAME.pattern() + "]");
}
}
/**
* Set a string setting.
*
@ -390,6 +407,7 @@ public class KeyStoreWrapper implements SecureSettings {
*/
void setString(String setting, char[] value) throws GeneralSecurityException {
assert isLoaded();
validateSettingName(setting);
if (ASCII_ENCODER.canEncode(CharBuffer.wrap(value)) == false) {
throw new IllegalArgumentException("Value must be ascii");
}
@ -401,6 +419,7 @@ public class KeyStoreWrapper implements SecureSettings {
/** Set a file setting. */
void setFile(String setting, byte[] bytes) throws GeneralSecurityException {
assert isLoaded();
validateSettingName(setting);
bytes = Base64.getEncoder().encode(bytes);
char[] chars = new char[bytes.length];
for (int i = 0; i < chars.length; ++i) {

View File

@ -46,6 +46,7 @@ public abstract class SecureSetting<T> extends Setting<T> {
private SecureSetting(String key, Property... properties) {
super(key, (String)null, null, ArrayUtils.concat(properties, FIXED_PROPERTIES, Property.class));
assert assertAllowedProperties(properties);
KeyStoreWrapper.validateSettingName(key);
}
private boolean assertAllowedProperties(Setting.Property... properties) {

View File

@ -306,6 +306,13 @@ public final class Settings implements ToXContentFragment {
}
}
/**
* Returns <code>true</code> iff the given key has a value in this settings object
*/
public boolean hasValue(String key) {
return settings.get(key) != null;
}
/**
* We have to lazy initialize the deprecation logger as otherwise a static logger here would be constructed before logging is configured
* leading to a runtime failure (see {@link LogConfigurator#checkErrorListener()} ). The premature construction would come from any
@ -617,7 +624,7 @@ public final class Settings implements ToXContentFragment {
}
/**
* Parsers the generated xconten from {@link Settings#toXContent(XContentBuilder, Params)} into a new Settings object.
* Parsers the generated xcontent from {@link Settings#toXContent(XContentBuilder, Params)} into a new Settings object.
* Note this method requires the parser to either be positioned on a null token or on
* {@link org.elasticsearch.common.xcontent.XContentParser.Token#START_OBJECT}.
*/
@ -1229,8 +1236,9 @@ public final class Settings implements ToXContentFragment {
Iterator<Map.Entry<String, Object>> iterator = map.entrySet().iterator();
while(iterator.hasNext()) {
Map.Entry<String, Object> entry = iterator.next();
if (entry.getKey().startsWith(prefix) == false) {
replacements.put(prefix + entry.getKey(), entry.getValue());
String key = entry.getKey();
if (key.startsWith(prefix) == false && key.endsWith("*") == false) {
replacements.put(prefix + key, entry.getValue());
iterator.remove();
}
}

View File

@ -246,13 +246,16 @@ public class EsExecutors {
* waiting if necessary for space to become available.
*/
static class ForceQueuePolicy implements XRejectedExecutionHandler {
@Override
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
try {
// force queue policy should only be used with a scaling queue
assert executor.getQueue() instanceof ExecutorScalingQueue;
executor.getQueue().put(r);
} catch (InterruptedException e) {
//should never happen since we never wait
throw new EsRejectedExecutionException(e);
} catch (final InterruptedException e) {
// a scaling queue never blocks so a put to it can never be interrupted
throw new AssertionError(e);
}
}
@ -260,6 +263,7 @@ public class EsExecutors {
public long rejected() {
return 0;
}
}
}

View File

@ -27,29 +27,20 @@ import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
public class EsRejectedExecutionException extends ElasticsearchException {
private final boolean isExecutorShutdown;
public EsRejectedExecutionException(String message, boolean isExecutorShutdown, Object... args) {
super(message, args);
public EsRejectedExecutionException(String message, boolean isExecutorShutdown) {
super(message);
this.isExecutorShutdown = isExecutorShutdown;
}
public EsRejectedExecutionException(String message, Object... args) {
this(message, false, args);
}
public EsRejectedExecutionException(String message, boolean isExecutorShutdown) {
this(message, isExecutorShutdown, new Object[0]);
public EsRejectedExecutionException(String message) {
this(message, false);
}
public EsRejectedExecutionException() {
super((String)null);
this.isExecutorShutdown = false;
}
public EsRejectedExecutionException(Throwable e) {
super(null, e);
this.isExecutorShutdown = false;
this(null, false);
}
@Override
@ -79,4 +70,5 @@ public class EsRejectedExecutionException extends ElasticsearchException {
public boolean isExecutorShutdown() {
return isExecutorShutdown;
}
}

View File

@ -37,7 +37,11 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor {
/**
* Name used in error reporting.
*/
protected final String name;
private final String name;
final String getName() {
return name;
}
EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, ThreadContext contextHolder) {
@ -138,15 +142,16 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor {
}
@Override
public String toString() {
public final String toString() {
StringBuilder b = new StringBuilder();
b.append(getClass().getSimpleName()).append('[');
b.append(name).append(", ");
b.append("name = ").append(name).append(", ");
if (getQueue() instanceof SizeBlockingQueue) {
@SuppressWarnings("rawtypes")
SizeBlockingQueue queue = (SizeBlockingQueue) getQueue();
b.append("queue capacity = ").append(queue.capacity()).append(", ");
}
appendThreadPoolExecutorDetails(b);
/*
* ThreadPoolExecutor has some nice information in its toString but we
* can't get at it easily without just getting the toString.
@ -155,6 +160,16 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor {
return b.toString();
}
/**
* Append details about this thread pool to the specified {@link StringBuilder}. All details should be appended as key/value pairs in
* the form "%s = %s, "
*
* @param sb the {@link StringBuilder} to append to
*/
protected void appendThreadPoolExecutorDetails(final StringBuilder sb) {
}
protected Runnable wrapRunnable(Runnable command) {
return contextHolder.preserveContext(command);
}

View File

@ -22,21 +22,16 @@ package org.elasticsearch.common.util.concurrent;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.common.ExponentiallyWeightedMovingAverage;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ResizableBlockingQueue;
import java.util.Locale;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Stream;
/**
* An extension to thread pool executor, which automatically adjusts the queue size of the
@ -80,8 +75,8 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
this.maxQueueSize = maxQueueSize;
this.targetedResponseTimeNanos = targetedResponseTime.getNanos();
this.executionEWMA = new ExponentiallyWeightedMovingAverage(EWMA_ALPHA, 0);
logger.debug("thread pool [{}] will adjust queue by [{}] when determining automatic queue size",
name, QUEUE_ADJUSTMENT_AMOUNT);
logger.debug(
"thread pool [{}] will adjust queue by [{}] when determining automatic queue size", getName(), QUEUE_ADJUSTMENT_AMOUNT);
}
@Override
@ -180,7 +175,7 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
final long avgTaskTime = totalNanos / tasksPerFrame;
logger.debug("[{}]: there were [{}] tasks in [{}], avg task time [{}], EWMA task execution [{}], " +
"[{} tasks/s], optimal queue is [{}], current capacity [{}]",
name,
getName(),
tasksPerFrame,
TimeValue.timeValueNanos(totalRuntime),
TimeValue.timeValueNanos(avgTaskTime),
@ -196,7 +191,7 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
final int newCapacity =
workQueue.adjustCapacity(desiredQueueSize, QUEUE_ADJUSTMENT_AMOUNT, minQueueSize, maxQueueSize);
if (oldCapacity != newCapacity && logger.isDebugEnabled()) {
logger.debug("adjusted [{}] queue size by [{}], old capacity: [{}], new capacity: [{}]", name,
logger.debug("adjusted [{}] queue size by [{}], old capacity: [{}], new capacity: [{}]", getName(),
newCapacity > oldCapacity ? QUEUE_ADJUSTMENT_AMOUNT : -QUEUE_ADJUSTMENT_AMOUNT,
oldCapacity, newCapacity);
}
@ -205,7 +200,7 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
logger.warn((Supplier<?>) () -> new ParameterizedMessage(
"failed to calculate optimal queue size for [{}] thread pool, " +
"total frame time [{}ns], tasks [{}], task execution time [{}ns]",
name, totalRuntime, tasksPerFrame, totalNanos),
getName(), totalRuntime, tasksPerFrame, totalNanos),
e);
} finally {
// Finally, decrement the task count and time back to their starting values. We
@ -224,7 +219,8 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
// - Adjustment happens and we decrement the tasks by 10, taskCount is now 15
// - Since taskCount will now be incremented forever, it will never be 10 again,
// so there will be no further adjustments
logger.debug("[{}]: too many incoming tasks while queue size adjustment occurs, resetting measurements to 0", name);
logger.debug(
"[{}]: too many incoming tasks while queue size adjustment occurs, resetting measurements to 0", getName());
totalTaskNanos.getAndSet(1);
taskCount.getAndSet(0);
startNs = System.nanoTime();
@ -237,26 +233,13 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
}
@Override
public String toString() {
StringBuilder b = new StringBuilder();
b.append(getClass().getSimpleName()).append('[');
b.append(name).append(", ");
@SuppressWarnings("rawtypes")
ResizableBlockingQueue queue = (ResizableBlockingQueue) getQueue();
b.append("queue capacity = ").append(getCurrentCapacity()).append(", ");
b.append("min queue capacity = ").append(minQueueSize).append(", ");
b.append("max queue capacity = ").append(maxQueueSize).append(", ");
b.append("frame size = ").append(tasksPerFrame).append(", ");
b.append("targeted response rate = ").append(TimeValue.timeValueNanos(targetedResponseTimeNanos)).append(", ");
b.append("task execution EWMA = ").append(TimeValue.timeValueNanos((long)executionEWMA.getAverage())).append(", ");
b.append("adjustment amount = ").append(QUEUE_ADJUSTMENT_AMOUNT).append(", ");
/*
* ThreadPoolExecutor has some nice information in its toString but we
* can't get at it easily without just getting the toString.
*/
b.append(super.toString()).append(']');
return b.toString();
protected void appendThreadPoolExecutorDetails(StringBuilder sb) {
sb.append("min queue capacity = ").append(minQueueSize).append(", ");
sb.append("max queue capacity = ").append(maxQueueSize).append(", ");
sb.append("frame size = ").append(tasksPerFrame).append(", ");
sb.append("targeted response rate = ").append(TimeValue.timeValueNanos(targetedResponseTimeNanos)).append(", ");
sb.append("task execution EWMA = ").append(TimeValue.timeValueNanos((long) executionEWMA.getAverage())).append(", ");
sb.append("adjustment amount = ").append(QUEUE_ADJUSTMENT_AMOUNT).append(", ");
}
}

View File

@ -74,7 +74,6 @@ public final class ThreadContext implements Closeable, Writeable {
private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct();
private final Map<String, String> defaultHeader;
private final ContextThreadLocal threadLocal;
private boolean isSystemContext;
/**
* Creates a new ThreadContext instance
@ -121,7 +120,6 @@ public final class ThreadContext implements Closeable, Writeable {
return () -> threadLocal.set(context);
}
/**
* Just like {@link #stashContext()} but no default context is set.
* @param preserveResponseHeaders if set to <code>true</code> the response headers of the restore thread will be preserved.

View File

@ -167,8 +167,9 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[unicast_connect]");
unicastZenPingExecutorService = EsExecutors.newScaling(
"unicast_connect",
0, concurrentConnects,
nodeName() + "/" + "unicast_connect",
0,
concurrentConnects,
60,
TimeUnit.SECONDS,
threadFactory,

View File

@ -69,7 +69,9 @@ public final class EngineConfig {
private final QueryCache queryCache;
private final QueryCachingPolicy queryCachingPolicy;
@Nullable
private final List<ReferenceManager.RefreshListener> refreshListeners;
private final List<ReferenceManager.RefreshListener> externalRefreshListener;
@Nullable
private final List<ReferenceManager.RefreshListener> internalRefreshListener;
@Nullable
private final Sort indexSort;
private final boolean forceNewHistoryUUID;
@ -120,7 +122,8 @@ public final class EngineConfig {
Similarity similarity, CodecService codecService, Engine.EventListener eventListener,
QueryCache queryCache, QueryCachingPolicy queryCachingPolicy,
boolean forceNewHistoryUUID, TranslogConfig translogConfig, TimeValue flushMergesAfter,
List<ReferenceManager.RefreshListener> refreshListeners, Sort indexSort,
List<ReferenceManager.RefreshListener> externalRefreshListener,
List<ReferenceManager.RefreshListener> internalRefreshListener, Sort indexSort,
TranslogRecoveryRunner translogRecoveryRunner, CircuitBreakerService circuitBreakerService) {
if (openMode == null) {
throw new IllegalArgumentException("openMode must not be null");
@ -147,7 +150,8 @@ public final class EngineConfig {
this.flushMergesAfter = flushMergesAfter;
this.openMode = openMode;
this.forceNewHistoryUUID = forceNewHistoryUUID;
this.refreshListeners = refreshListeners;
this.externalRefreshListener = externalRefreshListener;
this.internalRefreshListener = internalRefreshListener;
this.indexSort = indexSort;
this.translogRecoveryRunner = translogRecoveryRunner;
this.circuitBreakerService = circuitBreakerService;
@ -343,12 +347,18 @@ public final class EngineConfig {
}
/**
* The refresh listeners to add to Lucene
* The refresh listeners to add to Lucene for externally visible refreshes
*/
public List<ReferenceManager.RefreshListener> getRefreshListeners() {
return refreshListeners;
public List<ReferenceManager.RefreshListener> getExternalRefreshListener() {
return externalRefreshListener;
}
/**
* The refresh listeners to add to Lucene for internally visible refreshes. These listeners will also be invoked on external refreshes
*/
public List<ReferenceManager.RefreshListener> getInternalRefreshListener() { return internalRefreshListener;}
/**
* returns true if the engine is allowed to optimize indexing operations with an auto-generated ID
*/

View File

@ -232,9 +232,12 @@ public class InternalEngine extends Engine {
assert pendingTranslogRecovery.get() == false : "translog recovery can't be pending before we set it";
// don't allow commits until we are done with recovering
pendingTranslogRecovery.set(openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG);
for (ReferenceManager.RefreshListener listener: engineConfig.getRefreshListeners()) {
for (ReferenceManager.RefreshListener listener: engineConfig.getExternalRefreshListener()) {
this.externalSearcherManager.addListener(listener);
}
for (ReferenceManager.RefreshListener listener: engineConfig.getInternalRefreshListener()) {
this.internalSearcherManager.addListener(listener);
}
success = true;
} finally {
if (success == false) {
@ -426,11 +429,6 @@ public class InternalEngine extends Engine {
} else if (translog.isCurrent(translogGeneration) == false) {
commitIndexWriter(indexWriter, translog, lastCommittedSegmentInfos.getUserData().get(Engine.SYNC_COMMIT_ID));
refreshLastCommittedSegmentInfos();
} else if (lastCommittedSegmentInfos.getUserData().containsKey(HISTORY_UUID_KEY) == false) {
assert historyUUID != null;
// put the history uuid into the index
commitIndexWriter(indexWriter, translog, lastCommittedSegmentInfos.getUserData().get(Engine.SYNC_COMMIT_ID));
refreshLastCommittedSegmentInfos();
}
// clean up what's not needed
translog.trimUnreferencedReaders();

View File

@ -369,7 +369,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
public void setStrategyName(String strategyName) {
checkIfFrozen();
this.strategyName = strategyName;
if (this.strategyName.equals(SpatialStrategy.TERM)) {
if (this.strategyName.equals(SpatialStrategy.TERM.getStrategyName())) {
this.pointsOnly = true;
}
}

View File

@ -105,6 +105,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT,
Property.Dynamic, Property.IndexScope, Property.Deprecated);
//TODO this needs to be cleaned up: _timestamp and _ttl are not supported anymore, _field_names, _seq_no, _version and _source are
//also missing, not sure if on purpose. See IndicesModule#getMetadataMappers
private static ObjectHashSet<String> META_FIELDS = ObjectHashSet.from(
"_uid", "_id", "_type", "_parent", "_routing", "_index",
"_size", "_timestamp", "_ttl"

View File

@ -39,10 +39,13 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardClosedException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
/**
* Background global checkpoint sync action initiated when a shard goes inactive. This is needed because while we send the global checkpoint
* on every replication operation, after the last operation completes the global checkpoint could advance but without a follow-up operation
@ -116,16 +119,24 @@ public class GlobalCheckpointSyncAction extends TransportReplicationAction<
@Override
protected PrimaryResult<Request, ReplicationResponse> shardOperationOnPrimary(
final Request request, final IndexShard indexShard) throws Exception {
indexShard.getTranslog().sync();
maybeSyncTranslog(indexShard);
return new PrimaryResult<>(request, new ReplicationResponse());
}
@Override
protected ReplicaResult shardOperationOnReplica(final Request request, final IndexShard indexShard) throws Exception {
indexShard.getTranslog().sync();
maybeSyncTranslog(indexShard);
return new ReplicaResult();
}
private void maybeSyncTranslog(final IndexShard indexShard) throws IOException {
final Translog translog = indexShard.getTranslog();
if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST &&
translog.getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) {
indexShard.getTranslog().sync();
}
}
public static final class Request extends ReplicationRequest<Request> {
private Request() {

View File

@ -48,7 +48,6 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
@ -66,7 +65,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.AsyncIOProcessor;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexModule;
@ -416,13 +414,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
logger.debug("failed to refresh due to move to cluster wide started", e);
}
if (newRouting.primary()) {
final DiscoveryNode recoverySourceNode = recoveryState.getSourceNode();
if (currentRouting.isRelocationTarget() == false || recoverySourceNode.getVersion().before(Version.V_6_0_0_alpha1)) {
if (newRouting.primary() && currentRouting.isRelocationTarget() == false) {
// there was no primary context hand-off in < 6.0.0, need to manually activate the shard
getEngine().seqNoService().activatePrimaryMode(getEngine().seqNoService().getLocalCheckpoint());
}
}
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
} else if (state == IndexShardState.RELOCATED &&
@ -485,15 +480,18 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* subsequently fails before the primary/replica re-sync completes successfully and we are now being
* promoted, the local checkpoint tracker here could be left in a state where it would re-issue sequence
* numbers. To ensure that this is not the case, we restore the state of the local checkpoint tracker by
* replaying the translog and marking any operations there are completed. Rolling the translog generation is
* not strictly needed here (as we will never have collisions between sequence numbers in a translog
* generation in a new primary as it takes the last known sequence number as a starting point), but it
* simplifies reasoning about the relationship between primary terms and translog generations.
* replaying the translog and marking any operations there are completed.
*/
getEngine().rollTranslogGeneration();
getEngine().restoreLocalCheckpointFromTranslog();
getEngine().fillSeqNoGaps(newPrimaryTerm);
getEngine().seqNoService().updateLocalCheckpointForShard(currentRouting.allocationId().getId(),
final Engine engine = getEngine();
engine.restoreLocalCheckpointFromTranslog();
/* Rolling the translog generation is not strictly needed here (as we will never have collisions between
* sequence numbers in a translog generation in a new primary as it takes the last known sequence number
* as a starting point), but it simplifies reasoning about the relationship between primary terms and
* translog generations.
*/
engine.rollTranslogGeneration();
engine.fillSeqNoGaps(newPrimaryTerm);
engine.seqNoService().updateLocalCheckpointForShard(currentRouting.allocationId().getId(),
getEngine().seqNoService().getLocalCheckpoint());
primaryReplicaSyncer.accept(this, new ActionListener<ResyncTask>() {
@Override
@ -1337,6 +1335,17 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
active.set(true);
newEngine.recoverFromTranslog();
}
assertSequenceNumbersInCommit();
}
private boolean assertSequenceNumbersInCommit() throws IOException {
final Map<String, String> userData = SegmentInfos.readLatestCommit(store.directory()).getUserData();
assert userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) : "commit point doesn't contains a local checkpoint";
assert userData.containsKey(SequenceNumbers.MAX_SEQ_NO) : "commit point doesn't contains a maximum sequence number";
assert userData.containsKey(Engine.HISTORY_UUID_KEY) : "commit point doesn't contains a history uuid";
assert userData.get(Engine.HISTORY_UUID_KEY).equals(getHistoryUUID()) : "commit point history uuid ["
+ userData.get(Engine.HISTORY_UUID_KEY) + "] is different than engine [" + getHistoryUUID() + "]";
return true;
}
private boolean assertMaxUnsafeAutoIdInCommit() throws IOException {
@ -2185,8 +2194,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener,
indexCache.query(), cachingPolicy, forceNewHistoryUUID, translogConfig,
IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()),
Arrays.asList(refreshListeners, new RefreshMetricUpdater(refreshMetric)), indexSort,
this::runTranslogRecovery, circuitBreakerService);
Collections.singletonList(refreshListeners),
Collections.singletonList(new RefreshMetricUpdater(refreshMetric)),
indexSort, this::runTranslogRecovery, circuitBreakerService);
}
/**

View File

@ -217,7 +217,13 @@ public class TermVectorsService {
if (perFieldAnalyzer != null && perFieldAnalyzer.containsKey(field)) {
analyzer = mapperService.getIndexAnalyzers().get(perFieldAnalyzer.get(field).toString());
} else {
analyzer = mapperService.fullName(field).indexAnalyzer();
MappedFieldType fieldType = mapperService.fullName(field);
if (fieldType instanceof KeywordFieldMapper.KeywordFieldType) {
KeywordFieldMapper.KeywordFieldType keywordFieldType = (KeywordFieldMapper.KeywordFieldType) fieldType;
analyzer = keywordFieldType.normalizer() == null ? keywordFieldType.indexAnalyzer() : keywordFieldType.normalizer();
} else {
analyzer = fieldType.indexAnalyzer();
}
}
if (analyzer == null) {
analyzer = mapperService.getIndexAnalyzers().getDefaultIndexAnalyzer();

View File

@ -21,6 +21,7 @@ package org.elasticsearch.index.translog;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.RamUsageEstimator;
/**
* A {@link CountedBitSet} wraps a {@link FixedBitSet} but automatically releases the internal bitset
@ -28,11 +29,14 @@ import org.apache.lucene.util.FixedBitSet;
* from translog as these numbers are likely to form contiguous ranges (eg. filling all bits).
*/
final class CountedBitSet extends BitSet {
static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(CountedBitSet.class);
private short onBits; // Number of bits are set.
private FixedBitSet bitset;
CountedBitSet(short numBits) {
assert numBits > 0;
if (numBits <= 0) {
throw new IllegalArgumentException("Number of bits must be positive. Given [" + numBits + "]");
}
this.onBits = 0;
this.bitset = new FixedBitSet(numBits);
}
@ -41,7 +45,6 @@ final class CountedBitSet extends BitSet {
public boolean get(int index) {
assert 0 <= index && index < this.length();
assert bitset == null || onBits < bitset.length() : "Bitset should be released when all bits are set";
return bitset == null ? true : bitset.get(index);
}
@ -52,7 +55,7 @@ final class CountedBitSet extends BitSet {
// Ignore set when bitset is full.
if (bitset != null) {
boolean wasOn = bitset.getAndSet(index);
final boolean wasOn = bitset.getAndSet(index);
if (wasOn == false) {
onBits++;
// Once all bits are set, we can simply just return YES for all indexes.
@ -66,12 +69,12 @@ final class CountedBitSet extends BitSet {
@Override
public void clear(int startIndex, int endIndex) {
throw new UnsupportedOperationException("Not implemented yet");
throw new UnsupportedOperationException();
}
@Override
public void clear(int index) {
throw new UnsupportedOperationException("Not implemented yet");
throw new UnsupportedOperationException();
}
@Override
@ -86,20 +89,19 @@ final class CountedBitSet extends BitSet {
@Override
public int prevSetBit(int index) {
throw new UnsupportedOperationException("Not implemented yet");
throw new UnsupportedOperationException();
}
@Override
public int nextSetBit(int index) {
throw new UnsupportedOperationException("Not implemented yet");
throw new UnsupportedOperationException();
}
@Override
public long ramBytesUsed() {
throw new UnsupportedOperationException("Not implemented yet");
return BASE_RAM_BYTES_USED + (bitset == null ? 0 : bitset.ramBytesUsed());
}
// Exposed for testing
boolean isInternalBitsetReleased() {
return bitset == null;
}

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition;
import org.elasticsearch.action.resync.TransportResyncReplicationAction;
import org.elasticsearch.index.shard.PrimaryReplicaSyncer;
import org.elasticsearch.common.geo.ShapesAvailability;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
@ -33,12 +32,12 @@ import org.elasticsearch.index.mapper.BooleanFieldMapper;
import org.elasticsearch.index.mapper.CompletionFieldMapper;
import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.FieldNamesFieldMapper;
import org.elasticsearch.index.mapper.GeoPointFieldMapper;
import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.IndexFieldMapper;
import org.elasticsearch.index.mapper.IpFieldMapper;
import org.elasticsearch.index.mapper.KeywordFieldMapper;
import org.elasticsearch.index.mapper.GeoPointFieldMapper;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.NumberFieldMapper;
@ -52,6 +51,7 @@ import org.elasticsearch.index.mapper.TypeFieldMapper;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.mapper.VersionFieldMapper;
import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction;
import org.elasticsearch.index.shard.PrimaryReplicaSyncer;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.indices.mapper.MapperRegistry;
@ -64,6 +64,9 @@ import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.function.Predicate;
/**
* Configures classes and services that are shared by indices on each node.
@ -73,7 +76,8 @@ public class IndicesModule extends AbstractModule {
private final MapperRegistry mapperRegistry;
public IndicesModule(List<MapperPlugin> mapperPlugins) {
this.mapperRegistry = new MapperRegistry(getMappers(mapperPlugins), getMetadataMappers(mapperPlugins));
this.mapperRegistry = new MapperRegistry(getMappers(mapperPlugins), getMetadataMappers(mapperPlugins),
getFieldFilter(mapperPlugins));
registerBuiltinWritables();
}
@ -118,23 +122,42 @@ public class IndicesModule extends AbstractModule {
return Collections.unmodifiableMap(mappers);
}
private Map<String, MetadataFieldMapper.TypeParser> getMetadataMappers(List<MapperPlugin> mapperPlugins) {
private static final Map<String, MetadataFieldMapper.TypeParser> builtInMetadataMappers = initBuiltInMetadataMappers();
private static Map<String, MetadataFieldMapper.TypeParser> initBuiltInMetadataMappers() {
Map<String, MetadataFieldMapper.TypeParser> builtInMetadataMappers;
// Use a LinkedHashMap for metadataMappers because iteration order matters
builtInMetadataMappers = new LinkedHashMap<>();
// UID first so it will be the first stored field to load (so will benefit from "fields: []" early termination
builtInMetadataMappers.put(UidFieldMapper.NAME, new UidFieldMapper.TypeParser());
builtInMetadataMappers.put(IdFieldMapper.NAME, new IdFieldMapper.TypeParser());
builtInMetadataMappers.put(RoutingFieldMapper.NAME, new RoutingFieldMapper.TypeParser());
builtInMetadataMappers.put(IndexFieldMapper.NAME, new IndexFieldMapper.TypeParser());
builtInMetadataMappers.put(SourceFieldMapper.NAME, new SourceFieldMapper.TypeParser());
builtInMetadataMappers.put(TypeFieldMapper.NAME, new TypeFieldMapper.TypeParser());
builtInMetadataMappers.put(VersionFieldMapper.NAME, new VersionFieldMapper.TypeParser());
builtInMetadataMappers.put(ParentFieldMapper.NAME, new ParentFieldMapper.TypeParser());
builtInMetadataMappers.put(SeqNoFieldMapper.NAME, new SeqNoFieldMapper.TypeParser());
//_field_names must be added last so that it has a chance to see all the other mappers
builtInMetadataMappers.put(FieldNamesFieldMapper.NAME, new FieldNamesFieldMapper.TypeParser());
return Collections.unmodifiableMap(builtInMetadataMappers);
}
private static Map<String, MetadataFieldMapper.TypeParser> getMetadataMappers(List<MapperPlugin> mapperPlugins) {
Map<String, MetadataFieldMapper.TypeParser> metadataMappers = new LinkedHashMap<>();
// builtin metadata mappers
// UID first so it will be the first stored field to load (so will benefit from "fields: []" early termination
metadataMappers.put(UidFieldMapper.NAME, new UidFieldMapper.TypeParser());
metadataMappers.put(IdFieldMapper.NAME, new IdFieldMapper.TypeParser());
metadataMappers.put(RoutingFieldMapper.NAME, new RoutingFieldMapper.TypeParser());
metadataMappers.put(IndexFieldMapper.NAME, new IndexFieldMapper.TypeParser());
metadataMappers.put(SourceFieldMapper.NAME, new SourceFieldMapper.TypeParser());
metadataMappers.put(TypeFieldMapper.NAME, new TypeFieldMapper.TypeParser());
metadataMappers.put(VersionFieldMapper.NAME, new VersionFieldMapper.TypeParser());
metadataMappers.put(ParentFieldMapper.NAME, new ParentFieldMapper.TypeParser());
metadataMappers.put(SeqNoFieldMapper.NAME, new SeqNoFieldMapper.TypeParser());
// _field_names is not registered here, see below
int i = 0;
Map.Entry<String, MetadataFieldMapper.TypeParser> fieldNamesEntry = null;
for (Map.Entry<String, MetadataFieldMapper.TypeParser> entry : builtInMetadataMappers.entrySet()) {
if (i < builtInMetadataMappers.size() - 1) {
metadataMappers.put(entry.getKey(), entry.getValue());
} else {
assert entry.getKey().equals(FieldNamesFieldMapper.NAME) : "_field_names must be the last registered mapper, order counts";
fieldNamesEntry = entry;
}
i++;
}
assert fieldNamesEntry != null;
for (MapperPlugin mapperPlugin : mapperPlugins) {
for (Map.Entry<String, MetadataFieldMapper.TypeParser> entry : mapperPlugin.getMetadataMappers().entrySet()) {
@ -147,11 +170,49 @@ public class IndicesModule extends AbstractModule {
}
}
// we register _field_names here so that it has a chance to see all other mappers, including from plugins
metadataMappers.put(FieldNamesFieldMapper.NAME, new FieldNamesFieldMapper.TypeParser());
// we register _field_names here so that it has a chance to see all the other mappers, including from plugins
metadataMappers.put(fieldNamesEntry.getKey(), fieldNamesEntry.getValue());
return Collections.unmodifiableMap(metadataMappers);
}
/**
* Returns a set containing all of the builtin metadata fields
*/
public static Set<String> getBuiltInMetaDataFields() {
return builtInMetadataMappers.keySet();
}
private static Function<String, Predicate<String>> getFieldFilter(List<MapperPlugin> mapperPlugins) {
Function<String, Predicate<String>> fieldFilter = MapperPlugin.NOOP_FIELD_FILTER;
for (MapperPlugin mapperPlugin : mapperPlugins) {
fieldFilter = and(fieldFilter, mapperPlugin.getFieldFilter());
}
return fieldFilter;
}
private static Function<String, Predicate<String>> and(Function<String, Predicate<String>> first,
Function<String, Predicate<String>> second) {
//the purpose of this method is to not chain no-op field predicates, so that we can easily find out when no plugins plug in
//a field filter, hence skip the mappings filtering part as a whole, as it requires parsing mappings into a map.
if (first == MapperPlugin.NOOP_FIELD_FILTER) {
return second;
}
if (second == MapperPlugin.NOOP_FIELD_FILTER) {
return first;
}
return index -> {
Predicate<String> firstPredicate = first.apply(index);
Predicate<String> secondPredicate = second.apply(index);
if (firstPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) {
return secondPredicate;
}
if (secondPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) {
return firstPredicate;
}
return firstPredicate.and(secondPredicate);
};
}
@Override
protected void configure() {
bind(IndicesStore.class).asEagerSingleton();

View File

@ -127,7 +127,9 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.LongSupplier;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
@ -1262,4 +1264,22 @@ public class IndicesService extends AbstractLifecycleComponent
}
}
}
/**
* Returns a function which given an index name, returns a predicate which fields must match in order to be returned by get mappings,
* get index, get field mappings and field capabilities API. Useful to filter the fields that such API return.
* The predicate receives the the field name as input argument. In case multiple plugins register a field filter through
* {@link org.elasticsearch.plugins.MapperPlugin#getFieldFilter()}, only fields that match all the registered filters will be
* returned by get mappings, get index, get field mappings and field capabilities API.
*/
public Function<String, Predicate<String>> getFieldFilter() {
return mapperRegistry.getFieldFilter();
}
/**
* Returns true if the provided field is a registered metadata field (including ones registered via plugins), false otherwise.
*/
public boolean isMetaDataField(String field) {
return mapperRegistry.isMetaDataField(field);
}
}

View File

@ -21,10 +21,13 @@ package org.elasticsearch.indices.mapper;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.plugins.MapperPlugin;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Predicate;
/**
* A registry for all field mappers.
@ -33,11 +36,14 @@ public final class MapperRegistry {
private final Map<String, Mapper.TypeParser> mapperParsers;
private final Map<String, MetadataFieldMapper.TypeParser> metadataMapperParsers;
private final Function<String, Predicate<String>> fieldFilter;
public MapperRegistry(Map<String, Mapper.TypeParser> mapperParsers,
Map<String, MetadataFieldMapper.TypeParser> metadataMapperParsers) {
Map<String, MetadataFieldMapper.TypeParser> metadataMapperParsers, Function<String, Predicate<String>> fieldFilter) {
this.mapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(mapperParsers));
this.metadataMapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(metadataMapperParsers));
this.fieldFilter = fieldFilter;
}
/**
@ -55,4 +61,22 @@ public final class MapperRegistry {
public Map<String, MetadataFieldMapper.TypeParser> getMetadataMapperParsers() {
return metadataMapperParsers;
}
/**
* Returns true if the provide field is a registered metadata field, false otherwise
*/
public boolean isMetaDataField(String field) {
return getMetadataMapperParsers().containsKey(field);
}
/**
* Returns a function that given an index name, returns a predicate that fields must match in order to be returned by get mappings,
* get index, get field mappings and field capabilities API. Useful to filter the fields that such API return.
* The predicate receives the field name as input arguments. In case multiple plugins register a field filter through
* {@link MapperPlugin#getFieldFilter()}, only fields that match all the registered filters will be returned by get mappings,
* get index, get field mappings and field capabilities API.
*/
public Function<String, Predicate<String>> getFieldFilter() {
return fieldFilter;
}
}

View File

@ -149,12 +149,13 @@ public class RecoverySourceHandler {
final Translog translog = shard.getTranslog();
final long startingSeqNo;
final long requiredSeqNoRangeStart;
final boolean isSequenceNumberBasedRecoveryPossible = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO &&
isTargetSameHistory() && isTranslogReadyForSequenceNumberBasedRecovery();
if (isSequenceNumberBasedRecoveryPossible) {
logger.trace("performing sequence numbers based recovery. starting at [{}]", request.startingSeqNo());
startingSeqNo = request.startingSeqNo();
requiredSeqNoRangeStart = startingSeqNo;
} else {
final Engine.IndexCommitRef phase1Snapshot;
try {
@ -162,10 +163,12 @@ public class RecoverySourceHandler {
} catch (final Exception e) {
throw new RecoveryEngineException(shard.shardId(), 1, "snapshot failed", e);
}
// we set this to unassigned to create a translog roughly according to the retention policy
// on the target
startingSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
// we set this to 0 to create a translog roughly according to the retention policy
// on the target. Note that it will still filter out legacy operations with no sequence numbers
startingSeqNo = 0;
// but we must have everything above the local checkpoint in the commit
requiredSeqNoRangeStart =
Long.parseLong(phase1Snapshot.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1;
try {
phase1(phase1Snapshot.getIndexCommit(), translog::totalOperations);
} catch (final Exception e) {
@ -178,6 +181,9 @@ public class RecoverySourceHandler {
}
}
}
assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo;
assert requiredSeqNoRangeStart >= startingSeqNo : "requiredSeqNoRangeStart [" + requiredSeqNoRangeStart + "] is lower than ["
+ startingSeqNo + "]";
runUnderPrimaryPermit(() -> shard.initiateTracking(request.targetAllocationId()));
@ -187,10 +193,19 @@ public class RecoverySourceHandler {
throw new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e);
}
final long endingSeqNo = shard.seqNoStats().getMaxSeqNo();
/*
* We need to wait for all operations up to the current max to complete, otherwise we can not guarantee that all
* operations in the required range will be available for replaying from the translog of the source.
*/
cancellableThreads.execute(() -> shard.waitForOpsToComplete(endingSeqNo));
logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo);
logger.trace("snapshot translog for recovery; current size is [{}]", translog.estimateTotalOperationsFromMinSeq(startingSeqNo));
final long targetLocalCheckpoint;
try(Translog.Snapshot snapshot = translog.newSnapshotFromMinSeqNo(startingSeqNo)) {
targetLocalCheckpoint = phase2(startingSeqNo, snapshot);
targetLocalCheckpoint = phase2(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, snapshot);
} catch (Exception e) {
throw new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e);
}
@ -224,7 +239,8 @@ public class RecoverySourceHandler {
/**
* Determines if the source translog is ready for a sequence-number-based peer recovery. The main condition here is that the source
* translog contains all operations between the local checkpoint on the target and the current maximum sequence number on the source.
* translog contains all operations above the local checkpoint on the target. We already know the that translog contains or will contain
* all ops above the source local checkpoint, so we can stop check there.
*
* @return {@code true} if the source is ready for a sequence-number-based recovery
* @throws IOException if an I/O exception occurred reading the translog snapshot
@ -232,18 +248,10 @@ public class RecoverySourceHandler {
boolean isTranslogReadyForSequenceNumberBasedRecovery() throws IOException {
final long startingSeqNo = request.startingSeqNo();
assert startingSeqNo >= 0;
final long endingSeqNo = shard.seqNoStats().getMaxSeqNo();
logger.trace("testing sequence numbers in range: [{}, {}]", startingSeqNo, endingSeqNo);
final long localCheckpoint = shard.getLocalCheckpoint();
logger.trace("testing sequence numbers in range: [{}, {}]", startingSeqNo, localCheckpoint);
// the start recovery request is initialized with the starting sequence number set to the target shard's local checkpoint plus one
if (startingSeqNo - 1 <= endingSeqNo) {
/*
* We need to wait for all operations up to the current max to complete, otherwise we can not guarantee that all
* operations in the required range will be available for replaying from the translog of the source.
*/
cancellableThreads.execute(() -> shard.waitForOpsToComplete(endingSeqNo));
logger.trace("all operations up to [{}] completed, checking translog content", endingSeqNo);
if (startingSeqNo - 1 <= localCheckpoint) {
final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1);
try (Translog.Snapshot snapshot = shard.getTranslog().newSnapshotFromMinSeqNo(startingSeqNo)) {
Translog.Operation operation;
@ -253,7 +261,7 @@ public class RecoverySourceHandler {
}
}
}
return tracker.getCheckpoint() >= endingSeqNo;
return tracker.getCheckpoint() >= localCheckpoint;
} else {
return false;
}
@ -435,11 +443,13 @@ public class RecoverySourceHandler {
*
* @param startingSeqNo the sequence number to start recovery from, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} if all
* ops should be sent
* @param requiredSeqNoRangeStart the lower sequence number of the required range (ending with endingSeqNo)
* @param endingSeqNo the highest sequence number that should be sent
* @param snapshot a snapshot of the translog
*
* @return the local checkpoint on the target
*/
long phase2(final long startingSeqNo, final Translog.Snapshot snapshot) throws IOException {
long phase2(final long startingSeqNo, long requiredSeqNoRangeStart, long endingSeqNo, final Translog.Snapshot snapshot)
throws IOException {
if (shard.state() == IndexShardState.CLOSED) {
throw new IndexShardClosedException(request.shardId());
}
@ -447,10 +457,11 @@ public class RecoverySourceHandler {
final StopWatch stopWatch = new StopWatch().start();
logger.trace("recovery [phase2]: sending transaction log operations");
logger.trace("recovery [phase2]: sending transaction log operations (seq# from [" + startingSeqNo + "], " +
"required [" + requiredSeqNoRangeStart + ":" + endingSeqNo + "]");
// send all the snapshot's translog operations to the target
final SendSnapshotResult result = sendSnapshot(startingSeqNo, snapshot);
final SendSnapshotResult result = sendSnapshot(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, snapshot);
stopWatch.stop();
logger.trace("recovery [phase2]: took [{}]", stopWatch.totalTime());
@ -512,17 +523,25 @@ public class RecoverySourceHandler {
* Operations are bulked into a single request depending on an operation count limit or size-in-bytes limit.
*
* @param startingSeqNo the sequence number for which only operations with a sequence number greater than this will be sent
* @param snapshot the translog snapshot to replay operations from
* @return the local checkpoint on the target and the total number of operations sent
* @param requiredSeqNoRangeStart the lower sequence number of the required range
* @param endingSeqNo the upper bound of the sequence number range to be sent (inclusive)
* @param snapshot the translog snapshot to replay operations from @return the local checkpoint on the target and the
* total number of operations sent
* @throws IOException if an I/O exception occurred reading the translog snapshot
*/
protected SendSnapshotResult sendSnapshot(final long startingSeqNo, final Translog.Snapshot snapshot) throws IOException {
protected SendSnapshotResult sendSnapshot(final long startingSeqNo, long requiredSeqNoRangeStart, long endingSeqNo,
final Translog.Snapshot snapshot) throws IOException {
assert requiredSeqNoRangeStart <= endingSeqNo + 1:
"requiredSeqNoRangeStart " + requiredSeqNoRangeStart + " is larger than endingSeqNo " + endingSeqNo;
assert startingSeqNo <= requiredSeqNoRangeStart :
"startingSeqNo " + startingSeqNo + " is larger than requiredSeqNoRangeStart " + requiredSeqNoRangeStart;
int ops = 0;
long size = 0;
int skippedOps = 0;
int totalSentOps = 0;
final AtomicLong targetLocalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO);
final List<Translog.Operation> operations = new ArrayList<>();
final LocalCheckpointTracker requiredOpsTracker = new LocalCheckpointTracker(endingSeqNo, requiredSeqNoRangeStart - 1);
final int expectedTotalOps = snapshot.totalOperations();
if (expectedTotalOps == 0) {
@ -539,12 +558,9 @@ public class RecoverySourceHandler {
throw new IndexShardClosedException(request.shardId());
}
cancellableThreads.checkForCancel();
/*
* If we are doing a sequence-number-based recovery, we have to skip older ops for which no sequence number was assigned, and
* any ops before the starting sequence number.
*/
final long seqNo = operation.seqNo();
if (startingSeqNo >= 0 && (seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || seqNo < startingSeqNo)) {
if (seqNo < startingSeqNo || seqNo > endingSeqNo) {
skippedOps++;
continue;
}
@ -552,6 +568,7 @@ public class RecoverySourceHandler {
ops++;
size += operation.estimateSize();
totalSentOps++;
requiredOpsTracker.markSeqNoAsCompleted(seqNo);
// check if this request is past bytes threshold, and if so, send it off
if (size >= chunkSizeInBytes) {
@ -572,6 +589,12 @@ public class RecoverySourceHandler {
: String.format(Locale.ROOT, "expected total [%d], overridden [%d], skipped [%d], total sent [%d]",
expectedTotalOps, snapshot.overriddenOperations(), skippedOps, totalSentOps);
if (requiredOpsTracker.getCheckpoint() < endingSeqNo) {
throw new IllegalStateException("translog replay failed to cover required sequence numbers" +
" (required range [" + requiredSeqNoRangeStart + ":" + endingSeqNo + "). first missing op is ["
+ (requiredOpsTracker.getCheckpoint() + 1) + "]");
}
logger.trace("sent final batch of [{}][{}] (total: [{}]) translog operations", ops, new ByteSizeValue(size), expectedTotalOps);
return new SendSnapshotResult(targetLocalCheckpoint.get(), totalSentOps);

View File

@ -100,7 +100,6 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.indices.recovery.PeerRecoverySourceService;
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.recovery.RecoverySettings;
@ -449,6 +448,11 @@ public class Node implements Closeable {
transportService, indicesService, pluginsService, circuitBreakerService, scriptModule.getScriptService(),
httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter(), responseCollectorService,
searchTransportService);
final SearchService searchService = newSearchService(clusterService, indicesService,
threadPool, scriptModule.getScriptService(), bigArrays, searchModule.getFetchPhase(),
responseCollectorService);
modules.add(b -> {
b.bind(Node.class).toInstance(this);
b.bind(NodeService.class).toInstance(nodeService);
@ -470,12 +474,10 @@ public class Node implements Closeable {
b.bind(MetaDataUpgrader.class).toInstance(metaDataUpgrader);
b.bind(MetaStateService.class).toInstance(metaStateService);
b.bind(IndicesService.class).toInstance(indicesService);
b.bind(SearchService.class).toInstance(newSearchService(clusterService, indicesService,
threadPool, scriptModule.getScriptService(), bigArrays, searchModule.getFetchPhase(),
responseCollectorService));
b.bind(SearchService.class).toInstance(searchService);
b.bind(SearchTransportService.class).toInstance(searchTransportService);
b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(settings, bigArrays,
scriptModule.getScriptService()));
b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(settings,
searchService::createReduceContext));
b.bind(Transport.class).toInstance(transport);
b.bind(TransportService.class).toInstance(transportService);
b.bind(NetworkService.class).toInstance(networkService);

View File

@ -19,12 +19,14 @@
package org.elasticsearch.plugins;
import java.util.Collections;
import java.util.Map;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import java.util.Collections;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Predicate;
/**
* An extension point for {@link Plugin} implementations to add custom mappers
*/
@ -32,7 +34,7 @@ public interface MapperPlugin {
/**
* Returns additional mapper implementations added by this plugin.
*
* <p>
* The key of the returned {@link Map} is the unique name for the mapper which will be used
* as the mapping {@code type}, and the value is a {@link Mapper.TypeParser} to parse the
* mapper settings into a {@link Mapper}.
@ -43,7 +45,7 @@ public interface MapperPlugin {
/**
* Returns additional metadata mapper implementations added by this plugin.
*
* <p>
* The key of the returned {@link Map} is the unique name for the metadata mapper, which
* is used in the mapping json to configure the metadata mapper, and the value is a
* {@link MetadataFieldMapper.TypeParser} to parse the mapper settings into a
@ -52,4 +54,25 @@ public interface MapperPlugin {
default Map<String, MetadataFieldMapper.TypeParser> getMetadataMappers() {
return Collections.emptyMap();
}
/**
* Returns a function that given an index name returns a predicate which fields must match in order to be returned by get mappings,
* get index, get field mappings and field capabilities API. Useful to filter the fields that such API return. The predicate receives
* the field name as input argument and should return true to show the field and false to hide it.
*/
default Function<String, Predicate<String>> getFieldFilter() {
return NOOP_FIELD_FILTER;
}
/**
* The default field predicate applied, which doesn't filter anything. That means that by default get mappings, get index
* get field mappings and field capabilities API will return every field that's present in the mappings.
*/
Predicate<String> NOOP_FIELD_PREDICATE = field -> true;
/**
* The default field filter applied, which doesn't filter anything. That means that by default get mappings, get index
* get field mappings and field capabilities API will return every field that's present in the mappings.
*/
Function<String, Predicate<String>> NOOP_FIELD_FILTER = index -> NOOP_FIELD_PREDICATE;
}

View File

@ -24,6 +24,7 @@ import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.CheckedBiConsumer;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
@ -93,12 +94,8 @@ public class RestMultiSearchAction extends BaseRestHandler {
parseMultiLineRequest(restRequest, multiRequest.indicesOptions(), allowExplicitIndex, (searchRequest, parser) -> {
try {
searchRequest.source(SearchSourceBuilder.fromXContent(parser));
multiRequest.add(searchRequest);
} catch (IOException e) {
throw new ElasticsearchParseException("Exception when parsing search request", e);
}
});
List<SearchRequest> requests = multiRequest.requests();
preFilterShardSize = Math.max(1, preFilterShardSize / (requests.size()+1));
@ -113,7 +110,7 @@ public class RestMultiSearchAction extends BaseRestHandler {
* Parses a multi-line {@link RestRequest} body, instantiating a {@link SearchRequest} for each line and applying the given consumer.
*/
public static void parseMultiLineRequest(RestRequest request, IndicesOptions indicesOptions, boolean allowExplicitIndex,
BiConsumer<SearchRequest, XContentParser> consumer) throws IOException {
CheckedBiConsumer<SearchRequest, XContentParser, IOException> consumer) throws IOException {
String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
String[] types = Strings.splitStringByCommaToArray(request.param("type"));
@ -123,83 +120,8 @@ public class RestMultiSearchAction extends BaseRestHandler {
final Tuple<XContentType, BytesReference> sourceTuple = request.contentOrSourceParam();
final XContent xContent = sourceTuple.v1().xContent();
final BytesReference data = sourceTuple.v2();
int from = 0;
int length = data.length();
byte marker = xContent.streamSeparator();
while (true) {
int nextMarker = findNextMarker(marker, from, data, length);
if (nextMarker == -1) {
break;
}
// support first line with \n
if (nextMarker == 0) {
from = nextMarker + 1;
continue;
}
SearchRequest searchRequest = new SearchRequest();
if (indices != null) {
searchRequest.indices(indices);
}
if (indicesOptions != null) {
searchRequest.indicesOptions(indicesOptions);
}
if (types != null && types.length > 0) {
searchRequest.types(types);
}
if (routing != null) {
searchRequest.routing(routing);
}
if (searchType != null) {
searchRequest.searchType(searchType);
}
IndicesOptions defaultOptions = IndicesOptions.strictExpandOpenAndForbidClosed();
// now parse the action
if (nextMarker - from > 0) {
try (XContentParser parser = xContent.createParser(request.getXContentRegistry(), data.slice(from, nextMarker - from))) {
Map<String, Object> source = parser.map();
for (Map.Entry<String, Object> entry : source.entrySet()) {
Object value = entry.getValue();
if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) {
if (!allowExplicitIndex) {
throw new IllegalArgumentException("explicit index in multi search is not allowed");
}
searchRequest.indices(nodeStringArrayValue(value));
} else if ("type".equals(entry.getKey()) || "types".equals(entry.getKey())) {
searchRequest.types(nodeStringArrayValue(value));
} else if ("search_type".equals(entry.getKey()) || "searchType".equals(entry.getKey())) {
searchRequest.searchType(nodeStringValue(value, null));
} else if ("request_cache".equals(entry.getKey()) || "requestCache".equals(entry.getKey())) {
searchRequest.requestCache(nodeBooleanValue(value, entry.getKey()));
} else if ("preference".equals(entry.getKey())) {
searchRequest.preference(nodeStringValue(value, null));
} else if ("routing".equals(entry.getKey())) {
searchRequest.routing(nodeStringValue(value, null));
}
}
defaultOptions = IndicesOptions.fromMap(source, defaultOptions);
}
}
searchRequest.indicesOptions(defaultOptions);
// move pointers
from = nextMarker + 1;
// now for the body
nextMarker = findNextMarker(marker, from, data, length);
if (nextMarker == -1) {
break;
}
BytesReference bytes = data.slice(from, nextMarker - from);
try (XContentParser parser = xContent.createParser(request.getXContentRegistry(), bytes)) {
consumer.accept(searchRequest, parser);
}
// move pointers
from = nextMarker + 1;
}
MultiSearchRequest.readMultiLineFormat(data, xContent, consumer, indices, indicesOptions, types, routing,
searchType, request.getXContentRegistry(), allowExplicitIndex);
}
@Override
@ -207,18 +129,6 @@ public class RestMultiSearchAction extends BaseRestHandler {
return true;
}
private static int findNextMarker(byte marker, int from, BytesReference data, int length) {
for (int i = from; i < length; i++) {
if (data.get(i) == marker) {
return i;
}
}
if (from != length) {
throw new IllegalArgumentException("The msearch request must be terminated by a newline [\n]");
}
return -1;
}
@Override
protected Set<String> responseParams() {
return RESPONSE_PARAMS;

View File

@ -60,6 +60,8 @@ import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.SearchScript;
import org.elasticsearch.search.aggregations.AggregationInitializationException;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.MultiBucketConsumerService;
import org.elasticsearch.search.aggregations.SearchContextAggregations;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.collapse.CollapseContext;
@ -118,6 +120,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
Setting.positiveTimeSetting("search.max_keep_alive", timeValueHours(24), Property.NodeScope, Property.Dynamic);
public static final Setting<TimeValue> KEEPALIVE_INTERVAL_SETTING =
Setting.positiveTimeSetting("search.keep_alive_interval", timeValueMinutes(1), Property.NodeScope);
/**
* Enables low-level, frequent search cancellation checks. Enabling low-level checks will make long running searches to react
* to the cancellation request faster. However, since it will produce more cancellation checks it might slow the search performance
@ -163,6 +166,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
private final ConcurrentMapLong<SearchContext> activeContexts = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency();
private final MultiBucketConsumerService multiBucketConsumerService;
public SearchService(ClusterService clusterService, IndicesService indicesService,
ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, FetchPhase fetchPhase,
ResponseCollectorService responseCollectorService) {
@ -175,6 +180,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
this.bigArrays = bigArrays;
this.queryPhase = new QueryPhase(settings);
this.fetchPhase = fetchPhase;
this.multiBucketConsumerService = new MultiBucketConsumerService(clusterService, settings);
TimeValue keepAliveInterval = KEEPALIVE_INTERVAL_SETTING.get(settings);
setKeepAlives(DEFAULT_KEEPALIVE_SETTING.get(settings), MAX_KEEPALIVE_SETTING.get(settings));
@ -741,7 +747,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
if (source.aggregations() != null) {
try {
AggregatorFactories factories = source.aggregations().build(context, null);
context.aggregations(new SearchContextAggregations(factories));
context.aggregations(new SearchContextAggregations(factories, multiBucketConsumerService.create()));
} catch (IOException e) {
throw new AggregationInitializationException("Failed to create aggregators", e);
}
@ -1017,4 +1023,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
public IndicesService getIndicesService() {
return indicesService;
}
public InternalAggregation.ReduceContext createReduceContext(boolean finalReduce) {
return new InternalAggregation.ReduceContext(bigArrays, scriptService, multiBucketConsumerService.create(), finalReduce);
}
}

View File

@ -123,6 +123,7 @@ public class AggregationPhase implements SearchPhase {
}
List<InternalAggregation> aggregations = new ArrayList<>(aggregators.length);
context.aggregations().resetBucketMultiConsumer();
for (Aggregator aggregator : context.aggregations().aggregators()) {
try {
aggregator.postCollection();

View File

@ -22,6 +22,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.BigArray;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.action.search.RestSearchAction;
@ -33,6 +34,7 @@ import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.function.IntConsumer;
/**
* An internal implementation of {@link Aggregation}. Serves as a base class for all aggregation implementations.
@ -43,11 +45,17 @@ public abstract class InternalAggregation implements Aggregation, NamedWriteable
private final BigArrays bigArrays;
private final ScriptService scriptService;
private final IntConsumer multiBucketConsumer;
private final boolean isFinalReduce;
public ReduceContext(BigArrays bigArrays, ScriptService scriptService, boolean isFinalReduce) {
this(bigArrays, scriptService, (s) -> {}, isFinalReduce);
}
public ReduceContext(BigArrays bigArrays, ScriptService scriptService, IntConsumer multiBucketConsumer, boolean isFinalReduce) {
this.bigArrays = bigArrays;
this.scriptService = scriptService;
this.multiBucketConsumer = multiBucketConsumer;
this.isFinalReduce = isFinalReduce;
}
@ -67,6 +75,14 @@ public abstract class InternalAggregation implements Aggregation, NamedWriteable
public ScriptService scriptService() {
return scriptService;
}
/**
* Adds <tt>count</tt> buckets to the global count for the request and fails if this number is greater than
* the maximum number of buckets allowed in a response
*/
public void consumeBucketsAndMaybeBreak(int size) {
multiBucketConsumer.accept(size);
}
}
protected final String name;

View File

@ -22,6 +22,7 @@ package org.elasticsearch.search.aggregations;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregation;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import java.io.IOException;
@ -82,6 +83,39 @@ public abstract class InternalMultiBucketAggregation<A extends InternalMultiBuck
}
}
/**
* Counts the number of inner buckets inside the provided {@link InternalBucket}
*/
public static int countInnerBucket(InternalBucket bucket) {
int count = 0;
for (Aggregation agg : bucket.getAggregations().asList()) {
count += countInnerBucket(agg);
}
return count;
}
/**
* Counts the number of inner buckets inside the provided {@link Aggregation}
*/
public static int countInnerBucket(Aggregation agg) {
int size = 0;
if (agg instanceof MultiBucketsAggregation) {
MultiBucketsAggregation multi = (MultiBucketsAggregation) agg;
for (MultiBucketsAggregation.Bucket bucket : multi.getBuckets()) {
++ size;
for (Aggregation bucketAgg : bucket.getAggregations().asList()) {
size += countInnerBucket(bucketAgg);
}
}
} else if (agg instanceof SingleBucketAggregation) {
SingleBucketAggregation single = (SingleBucketAggregation) agg;
for (Aggregation bucketAgg : single.getAggregations().asList()) {
size += countInnerBucket(bucketAgg);
}
}
return size;
}
public abstract static class InternalBucket implements Bucket, Writeable {
public Object getProperty(String containingAggName, List<String> path) {

View File

@ -0,0 +1,126 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
import java.io.IOException;
import java.util.function.IntConsumer;
/**
* An aggregation service that creates instances of {@link MultiBucketConsumer}.
* The consumer is used by {@link BucketsAggregator} and {@link InternalMultiBucketAggregation} to limit the number of buckets created
* in {@link Aggregator#buildAggregation} and {@link InternalAggregation#reduce}.
* The limit can be set by changing the `search.max_buckets` cluster setting and defaults to 10000.
*/
public class MultiBucketConsumerService {
public static final int DEFAULT_MAX_BUCKETS = 10000;
public static final Setting<Integer> MAX_BUCKET_SETTING =
Setting.intSetting("search.max_buckets", DEFAULT_MAX_BUCKETS, 0, Setting.Property.NodeScope, Setting.Property.Dynamic);
private volatile int maxBucket;
public MultiBucketConsumerService(ClusterService clusterService, Settings settings) {
this.maxBucket = MAX_BUCKET_SETTING.get(settings);
clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_BUCKET_SETTING, this::setMaxBucket);
}
private void setMaxBucket(int maxBucket) {
this.maxBucket = maxBucket;
}
public static class TooManyBucketsException extends AggregationExecutionException {
private final int maxBuckets;
public TooManyBucketsException(String message, int maxBuckets) {
super(message);
this.maxBuckets = maxBuckets;
}
public TooManyBucketsException(StreamInput in) throws IOException {
super(in);
maxBuckets = in.readInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeInt(maxBuckets);
}
public int getMaxBuckets() {
return maxBuckets;
}
@Override
public RestStatus status() {
return RestStatus.SERVICE_UNAVAILABLE;
}
@Override
protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("max_buckets", maxBuckets);
}
}
/**
* An {@link IntConsumer} that throws a {@link TooManyBucketsException}
* when the sum of the provided values is above the limit (`search.max_buckets`).
* It is used by aggregators to limit the number of bucket creation during
* {@link Aggregator#buildAggregation} and {@link InternalAggregation#reduce}.
*/
public static class MultiBucketConsumer implements IntConsumer {
private final int limit;
// aggregations execute in a single thread so no atomic here
private int count;
public MultiBucketConsumer(int limit) {
this.limit = limit;
}
@Override
public void accept(int value) {
count += value;
if (count > limit) {
throw new TooManyBucketsException("Trying to create too many buckets. Must be less than or equal to: [" + limit
+ "] but was [" + count + "]. This limit can be set by changing the [" +
MAX_BUCKET_SETTING.getKey() + "] cluster level setting.", limit);
}
}
public void reset() {
this.count = 0;
}
public int getCount() {
return count;
}
}
public MultiBucketConsumer create() {
return new MultiBucketConsumer(maxBucket);
}
}

View File

@ -18,19 +18,25 @@
*/
package org.elasticsearch.search.aggregations;
import java.util.function.IntConsumer;
import static org.elasticsearch.search.aggregations.MultiBucketConsumerService.MultiBucketConsumer;
/**
* The aggregation context that is part of the search context.
*/
public class SearchContextAggregations {
private final AggregatorFactories factories;
private final MultiBucketConsumer multiBucketConsumer;
private Aggregator[] aggregators;
/**
* Creates a new aggregation context with the parsed aggregator factories
*/
public SearchContextAggregations(AggregatorFactories factories) {
public SearchContextAggregations(AggregatorFactories factories, MultiBucketConsumer multiBucketConsumer) {
this.factories = factories;
this.multiBucketConsumer = multiBucketConsumer;
}
public AggregatorFactories factories() {
@ -50,4 +56,15 @@ public class SearchContextAggregations {
this.aggregators = aggregators;
}
/**
* Returns a consumer for multi bucket aggregation that checks the total number of buckets
* created in the response
*/
public IntConsumer multiBucketConsumer() {
return multiBucketConsumer;
}
void resetBucketMultiConsumer() {
multiBucketConsumer.reset();
}
}

View File

@ -34,10 +34,12 @@ import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.function.IntConsumer;
public abstract class BucketsAggregator extends AggregatorBase {
private final BigArrays bigArrays;
private final IntConsumer multiBucketConsumer;
private IntArray docCounts;
public BucketsAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent,
@ -45,6 +47,11 @@ public abstract class BucketsAggregator extends AggregatorBase {
super(name, factories, context, parent, pipelineAggregators, metaData);
bigArrays = context.bigArrays();
docCounts = bigArrays.newIntArray(1, true);
if (context.aggregations() != null) {
multiBucketConsumer = context.aggregations().multiBucketConsumer();
} else {
multiBucketConsumer = (count) -> {};
}
}
/**
@ -104,6 +111,14 @@ public abstract class BucketsAggregator extends AggregatorBase {
}
}
/**
* Adds <tt>count</tt> buckets to the global count for the request and fails if this number is greater than
* the maximum number of buckets allowed in a response
*/
protected final void consumeBucketsAndMaybeBreak(int count) {
multiBucketConsumer.accept(count);
}
/**
* Required method to build the child aggregations of the given bucket (identified by the bucket ordinal).
*/

View File

@ -210,6 +210,7 @@ public class AdjacencyMatrixAggregator extends BucketsAggregator {
InternalAdjacencyMatrix.InternalBucket bucket = new InternalAdjacencyMatrix.InternalBucket(keys[i],
docCount, bucketAggregations(bucketOrd));
buckets.add(bucket);
consumeBucketsAndMaybeBreak(1);
}
}
int pos = keys.length;
@ -223,6 +224,7 @@ public class AdjacencyMatrixAggregator extends BucketsAggregator {
InternalAdjacencyMatrix.InternalBucket bucket = new InternalAdjacencyMatrix.InternalBucket(intersectKey,
docCount, bucketAggregations(bucketOrd));
buckets.add(bucket);
consumeBucketsAndMaybeBreak(1);
}
pos++;
}

View File

@ -214,7 +214,10 @@ public class InternalAdjacencyMatrix
for (List<InternalBucket> sameRangeList : bucketsMap.values()) {
InternalBucket reducedBucket = sameRangeList.get(0).reduce(sameRangeList, reduceContext);
if(reducedBucket.docCount >= 1){
reduceContext.consumeBucketsAndMaybeBreak(1);
reducedBuckets.add(reducedBucket);
} else {
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(reducedBucket));
}
}
Collections.sort(reducedBuckets, Comparator.comparing(InternalBucket::getKey));

View File

@ -83,6 +83,7 @@ final class CompositeAggregator extends BucketsAggregator {
@Override
public InternalAggregation buildAggregation(long zeroBucket) throws IOException {
assert zeroBucket == 0L;
consumeBucketsAndMaybeBreak(keys.size());
// Replay all documents that contain at least one top bucket (collected during the first pass).
grow(keys.size()+1);

View File

@ -132,6 +132,7 @@ public class InternalComposite
if (lastBucket != null && bucketIt.current.compareKey(lastBucket) != 0) {
InternalBucket reduceBucket = buckets.get(0).reduce(buckets, reduceContext);
buckets.clear();
reduceContext.consumeBucketsAndMaybeBreak(1);
result.add(reduceBucket);
if (result.size() >= size) {
break;
@ -145,6 +146,7 @@ public class InternalComposite
}
if (buckets.size() > 0) {
InternalBucket reduceBucket = buckets.get(0).reduce(buckets, reduceContext);
reduceContext.consumeBucketsAndMaybeBreak(1);
result.add(reduceBucket);
}
return new InternalComposite(name, size, sourceNames, result, reverseMuls, pipelineAggregators(), metaData);

View File

@ -166,6 +166,7 @@ public class FiltersAggregator extends BucketsAggregator {
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException {
consumeBucketsAndMaybeBreak(keys.length + (showOtherBucket ? 1 : 0));
List<InternalFilters.InternalBucket> buckets = new ArrayList<>(keys.length);
for (int i = 0; i < keys.length; i++) {
long bucketOrd = bucketOrd(owningBucketOrdinal, i);

View File

@ -223,7 +223,8 @@ public class InternalFilters extends InternalMultiBucketAggregation<InternalFilt
}
}
InternalFilters reduced = new InternalFilters(name, new ArrayList<InternalBucket>(bucketsList.size()), keyed, pipelineAggregators(),
reduceContext.consumeBucketsAndMaybeBreak(bucketsList.size());
InternalFilters reduced = new InternalFilters(name, new ArrayList<>(bucketsList.size()), keyed, pipelineAggregators(),
getMetaData());
for (List<InternalBucket> sameRangeList : bucketsList) {
reduced.buckets.add((sameRangeList.get(0)).reduce(sameRangeList, reduceContext));

View File

@ -106,6 +106,7 @@ public class GeoHashGridAggregator extends BucketsAggregator {
public InternalGeoHashGrid buildAggregation(long owningBucketOrdinal) throws IOException {
assert owningBucketOrdinal == 0;
final int size = (int) Math.min(bucketOrds.size(), shardSize);
consumeBucketsAndMaybeBreak(size);
InternalGeoHashGrid.BucketPriorityQueue ordered = new InternalGeoHashGrid.BucketPriorityQueue(size);
OrdinalBucket spare = null;

View File

@ -211,7 +211,12 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation<Internal
BucketPriorityQueue ordered = new BucketPriorityQueue(size);
for (LongObjectPagedHashMap.Cursor<List<Bucket>> cursor : buckets) {
List<Bucket> sameCellBuckets = cursor.value;
ordered.insertWithOverflow(sameCellBuckets.get(0).reduce(sameCellBuckets, reduceContext));
Bucket removed = ordered.insertWithOverflow(sameCellBuckets.get(0).reduce(sameCellBuckets, reduceContext));
if (removed != null) {
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(removed));
} else {
reduceContext.consumeBucketsAndMaybeBreak(1);
}
}
buckets.close();
Bucket[] list = new Bucket[ordered.size()];

View File

@ -127,6 +127,8 @@ class DateHistogramAggregator extends BucketsAggregator {
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException {
assert owningBucketOrdinal == 0;
consumeBucketsAndMaybeBreak((int) bucketOrds.size());
List<InternalDateHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size());
for (long i = 0; i < bucketOrds.size(); i++) {
buckets.add(new InternalDateHistogram.Bucket(bucketOrds.get(i), bucketDocCount(i), keyed, formatter, bucketAggregations(i)));

View File

@ -131,6 +131,7 @@ class HistogramAggregator extends BucketsAggregator {
@Override
public InternalAggregation buildAggregation(long bucket) throws IOException {
assert bucket == 0;
consumeBucketsAndMaybeBreak((int) bucketOrds.size());
List<InternalHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size());
for (long i = 0; i < bucketOrds.size(); i++) {
double roundKey = Double.longBitsToDouble(bucketOrds.get(i));

View File

@ -344,7 +344,10 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
// the key changes, reduce what we already buffered and reset the buffer for current buckets
final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) {
reduceContext.consumeBucketsAndMaybeBreak(1);
reducedBuckets.add(reduced);
} else {
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(reduced));
}
currentBuckets.clear();
key = top.current.key;
@ -365,7 +368,10 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
if (currentBuckets.isEmpty() == false) {
final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) {
reduceContext.consumeBucketsAndMaybeBreak(1);
reducedBuckets.add(reduced);
} else {
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(reduced));
}
}
}
@ -388,6 +394,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
long key = bounds.getMin() + offset;
long max = bounds.getMax() + offset;
while (key <= max) {
reduceContext.consumeBucketsAndMaybeBreak(1);
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
key = nextKey(key).longValue();
}
@ -397,6 +404,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
long key = bounds.getMin() + offset;
if (key < firstBucket.key) {
while (key < firstBucket.key) {
reduceContext.consumeBucketsAndMaybeBreak(1);
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
key = nextKey(key).longValue();
}
@ -412,6 +420,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
if (lastBucket != null) {
long key = nextKey(lastBucket.key).longValue();
while (key < nextBucket.key) {
reduceContext.consumeBucketsAndMaybeBreak(1);
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
key = nextKey(key).longValue();
}
@ -425,6 +434,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
long key = nextKey(lastBucket.key).longValue();
long max = bounds.getMax() + offset;
while (key <= max) {
reduceContext.consumeBucketsAndMaybeBreak(1);
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
key = nextKey(key).longValue();
}

View File

@ -326,7 +326,10 @@ public final class InternalHistogram extends InternalMultiBucketAggregation<Inte
// Using Double.compare instead of != to handle NaN correctly.
final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) {
reduceContext.consumeBucketsAndMaybeBreak(1);
reducedBuckets.add(reduced);
} else {
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(reduced));
}
currentBuckets.clear();
key = top.current.key;
@ -347,7 +350,10 @@ public final class InternalHistogram extends InternalMultiBucketAggregation<Inte
if (currentBuckets.isEmpty() == false) {
final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) {
reduceContext.consumeBucketsAndMaybeBreak(1);
reducedBuckets.add(reduced);
} else {
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(reduced));
}
}
}
@ -374,6 +380,7 @@ public final class InternalHistogram extends InternalMultiBucketAggregation<Inte
if (iter.hasNext() == false) {
// fill with empty buckets
for (double key = round(emptyBucketInfo.minBound); key <= emptyBucketInfo.maxBound; key = nextKey(key)) {
reduceContext.consumeBucketsAndMaybeBreak(1);
iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs));
}
} else {
@ -381,6 +388,7 @@ public final class InternalHistogram extends InternalMultiBucketAggregation<Inte
if (Double.isFinite(emptyBucketInfo.minBound)) {
// fill with empty buckets until the first key
for (double key = round(emptyBucketInfo.minBound); key < first.key; key = nextKey(key)) {
reduceContext.consumeBucketsAndMaybeBreak(1);
iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs));
}
}
@ -393,6 +401,7 @@ public final class InternalHistogram extends InternalMultiBucketAggregation<Inte
if (lastBucket != null) {
double key = nextKey(lastBucket.key);
while (key < nextBucket.key) {
reduceContext.consumeBucketsAndMaybeBreak(1);
iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs));
key = nextKey(key);
}
@ -403,6 +412,7 @@ public final class InternalHistogram extends InternalMultiBucketAggregation<Inte
// finally, adding the empty buckets *after* the actual data (based on the extended_bounds.max requested by the user)
for (double key = nextKey(lastBucket.key); key <= emptyBucketInfo.maxBound; key = nextKey(key)) {
reduceContext.consumeBucketsAndMaybeBreak(1);
iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs));
}
}

View File

@ -325,6 +325,7 @@ public final class BinaryRangeAggregator extends BucketsAggregator {
@Override
public InternalAggregation buildAggregation(long bucket) throws IOException {
consumeBucketsAndMaybeBreak(ranges.length);
List<InternalBinaryRange.Bucket> buckets = new ArrayList<>(ranges.length);
for (int i = 0; i < ranges.length; ++i) {
long bucketOrd = bucket * ranges.length + i;

View File

@ -241,6 +241,7 @@ public final class InternalBinaryRange
@Override
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
reduceContext.consumeBucketsAndMaybeBreak(buckets.size());
long[] docCounts = new long[buckets.size()];
InternalAggregations[][] aggs = new InternalAggregations[buckets.size()][];
for (int i = 0; i < aggs.length; ++i) {

View File

@ -302,6 +302,7 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
@SuppressWarnings("unchecked")
@Override
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
reduceContext.consumeBucketsAndMaybeBreak(ranges.size());
List<Bucket>[] rangeList = new List[ranges.size()];
for (int i = 0; i < rangeList.length; ++i) {
rangeList[i] = new ArrayList<>();

View File

@ -323,6 +323,7 @@ public class RangeAggregator extends BucketsAggregator {
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException {
consumeBucketsAndMaybeBreak(ranges.length);
List<org.elasticsearch.search.aggregations.bucket.range.Range.Bucket> buckets = new ArrayList<>(ranges.length);
for (int i = 0; i < ranges.length; i++) {
Range range = ranges[i];

View File

@ -131,6 +131,9 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri
// global stats
spare.updateScore(significanceHeuristic);
spare = ordered.insertWithOverflow(spare);
if (spare == null) {
consumeBucketsAndMaybeBreak(1);
}
}
final SignificantStringTerms.Bucket[] list = new SignificantStringTerms.Bucket[ordered.size()];

View File

@ -241,7 +241,14 @@ public abstract class InternalSignificantTerms<A extends InternalSignificantTerm
final B b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext);
b.updateScore(heuristic);
if (((b.score > 0) && (b.subsetDf >= minDocCount)) || reduceContext.isFinalReduce() == false) {
ordered.insertWithOverflow(b);
B removed = ordered.insertWithOverflow(b);
if (removed == null) {
reduceContext.consumeBucketsAndMaybeBreak(1);
} else {
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(removed));
}
} else {
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(b));
}
}
B[] list = createBucketsArray(ordered.size());

View File

@ -101,6 +101,9 @@ public class SignificantLongTermsAggregator extends LongTermsAggregator {
spare.bucketOrd = i;
spare = ordered.insertWithOverflow(spare);
if (spare == null) {
consumeBucketsAndMaybeBreak(1);
}
}
final SignificantLongTerms.Bucket[] list = new SignificantLongTerms.Bucket[ordered.size()];

View File

@ -107,6 +107,9 @@ public class SignificantStringTermsAggregator extends StringTermsAggregator {
spare.bucketOrd = i;
spare = ordered.insertWithOverflow(spare);
if (spare == null) {
consumeBucketsAndMaybeBreak(1);
}
}
final SignificantStringTerms.Bucket[] list = new SignificantStringTerms.Bucket[ordered.size()];

View File

@ -221,6 +221,9 @@ public class SignificantTextAggregator extends BucketsAggregator {
spare.bucketOrd = i;
spare = ordered.insertWithOverflow(spare);
if (spare == null) {
consumeBucketsAndMaybeBreak(1);
}
}
final SignificantStringTerms.Bucket[] list = new SignificantStringTerms.Bucket[ordered.size()];

View File

@ -204,6 +204,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
if (bucketCountThresholds.getShardMinDocCount() <= spare.docCount) {
spare = ordered.insertWithOverflow(spare);
if (spare == null) {
consumeBucketsAndMaybeBreak(1);
spare = new OrdBucket(-1, 0, null, showTermDocCountError, 0);
}
}

View File

@ -293,7 +293,12 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
B removed = ordered.insertWithOverflow(b);
if (removed != null) {
otherDocCount += removed.getDocCount();
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(removed));
} else {
reduceContext.consumeBucketsAndMaybeBreak(1);
}
} else {
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(b));
}
}
B[] list = createBucketsArray(ordered.size());

View File

@ -125,7 +125,6 @@ public class LongTermsAggregator extends TermsAggregator {
}
final int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize());
long otherDocCount = 0;
BucketPriorityQueue<LongTerms.Bucket> ordered = new BucketPriorityQueue<>(size, order.comparator(this));
LongTerms.Bucket spare = null;
@ -138,7 +137,10 @@ public class LongTermsAggregator extends TermsAggregator {
otherDocCount += spare.docCount;
spare.bucketOrd = i;
if (bucketCountThresholds.getShardMinDocCount() <= spare.docCount) {
spare = (LongTerms.Bucket) ordered.insertWithOverflow(spare);
spare = ordered.insertWithOverflow(spare);
if (spare == null) {
consumeBucketsAndMaybeBreak(1);
}
}
}

View File

@ -144,6 +144,9 @@ public class StringTermsAggregator extends AbstractStringTermsAggregator {
spare.bucketOrd = i;
if (bucketCountThresholds.getShardMinDocCount() <= spare.docCount) {
spare = ordered.insertWithOverflow(spare);
if (spare == null) {
consumeBucketsAndMaybeBreak(1);
}
}
}

View File

@ -31,7 +31,6 @@ import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.apache.lucene.search.highlight.TextFragment;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ExceptionsHelper;
@ -48,6 +47,9 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.search.fetch.subphase.highlight.UnifiedHighlighter.convertFieldValue;
import static org.elasticsearch.search.fetch.subphase.highlight.UnifiedHighlighter.getAnalyzer;
public class PlainHighlighter implements Highlighter {
private static final String CACHE_KEY = "highlight-plain";
@ -100,18 +102,12 @@ public class PlainHighlighter implements Highlighter {
int numberOfFragments = field.fieldOptions().numberOfFragments() == 0 ? 1 : field.fieldOptions().numberOfFragments();
ArrayList<TextFragment> fragsList = new ArrayList<>();
List<Object> textsToHighlight;
Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer();
Analyzer analyzer = getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), mapper.fieldType());
try {
textsToHighlight = HighlightUtils.loadFieldValues(field, mapper, context, hitContext);
for (Object textToHighlight : textsToHighlight) {
String text;
if (textToHighlight instanceof BytesRef) {
text = mapper.fieldType().valueForDisplay(textToHighlight).toString();
} else {
text = textToHighlight.toString();
}
String text = convertFieldValue(mapper.fieldType(), textToHighlight);
try (TokenStream tokenStream = analyzer.tokenStream(mapper.fieldType().name(), text)) {
if (!tokenStream.hasAttribute(CharTermAttribute.class) || !tokenStream.hasAttribute(OffsetAttribute.class)) {

View File

@ -32,8 +32,11 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.KeywordFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
import org.elasticsearch.search.fetch.FetchSubPhase;
import org.elasticsearch.search.internal.SearchContext;
@ -50,8 +53,6 @@ import java.util.stream.Collectors;
import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR;
public class UnifiedHighlighter implements Highlighter {
private static final String CACHE_KEY = "highlight-unified";
@Override
public boolean canHighlight(FieldMapper fieldMapper) {
return true;
@ -63,36 +64,20 @@ public class UnifiedHighlighter implements Highlighter {
SearchContextHighlight.Field field = highlighterContext.field;
SearchContext context = highlighterContext.context;
FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
if (!hitContext.cache().containsKey(CACHE_KEY)) {
hitContext.cache().put(CACHE_KEY, new HighlighterEntry());
}
HighlighterEntry highlighterEntry = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
MapperHighlighterEntry mapperHighlighterEntry = highlighterEntry.mappers.get(fieldMapper);
if (mapperHighlighterEntry == null) {
Encoder encoder = field.fieldOptions().encoder().equals("html") ?
HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
CustomPassageFormatter passageFormatter =
new CustomPassageFormatter(field.fieldOptions().preTags()[0],
Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
CustomPassageFormatter passageFormatter = new CustomPassageFormatter(field.fieldOptions().preTags()[0],
field.fieldOptions().postTags()[0], encoder);
mapperHighlighterEntry = new MapperHighlighterEntry(passageFormatter);
}
List<Snippet> snippets = new ArrayList<>();
int numberOfFragments;
try {
Analyzer analyzer =
context.mapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer();
final Analyzer analyzer =
getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), fieldMapper.fieldType());
List<Object> fieldValues = HighlightUtils.loadFieldValues(field, fieldMapper, context, hitContext);
fieldValues = fieldValues.stream().map(obj -> {
if (obj instanceof BytesRef) {
return fieldMapper.fieldType().valueForDisplay(obj).toString();
} else {
return obj;
}
}).collect(Collectors.toList());
fieldValues = fieldValues.stream()
.map((s) -> convertFieldValue(fieldMapper.fieldType(), s))
.collect(Collectors.toList());
final IndexSearcher searcher = new IndexSearcher(hitContext.reader());
final CustomUnifiedHighlighter highlighter;
final String fieldValue = mergeFieldValues(fieldValues, MULTIVAL_SEP_CHAR);
@ -102,15 +87,14 @@ public class UnifiedHighlighter implements Highlighter {
// breaks the text on, so we don't lose the distinction between the different values of a field and we
// get back a snippet per value
CustomSeparatorBreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR);
highlighter = new CustomUnifiedHighlighter(searcher, analyzer, offsetSource,
mapperHighlighterEntry.passageFormatter, field.fieldOptions().boundaryScannerLocale(),
breakIterator, fieldValue, field.fieldOptions().noMatchSize());
highlighter = new CustomUnifiedHighlighter(searcher, analyzer, offsetSource, passageFormatter,
field.fieldOptions().boundaryScannerLocale(), breakIterator, fieldValue, field.fieldOptions().noMatchSize());
numberOfFragments = fieldValues.size(); // we are highlighting the whole content, one snippet per value
} else {
//using paragraph separator we make sure that each field value holds a discrete passage for highlighting
BreakIterator bi = getBreakIterator(field);
highlighter = new CustomUnifiedHighlighter(searcher, analyzer, offsetSource,
mapperHighlighterEntry.passageFormatter, field.fieldOptions().boundaryScannerLocale(), bi,
highlighter = new CustomUnifiedHighlighter(searcher, analyzer, offsetSource, passageFormatter,
field.fieldOptions().boundaryScannerLocale(), bi,
fieldValue, field.fieldOptions().noMatchSize());
numberOfFragments = field.fieldOptions().numberOfFragments();
}
@ -210,6 +194,24 @@ public class UnifiedHighlighter implements Highlighter {
return filteredSnippets;
}
static Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type) {
if (type instanceof KeywordFieldMapper.KeywordFieldType) {
KeywordFieldMapper.KeywordFieldType keywordFieldType = (KeywordFieldMapper.KeywordFieldType) type;
if (keywordFieldType.normalizer() != null) {
return keywordFieldType.normalizer();
}
}
return docMapper.mappers().indexAnalyzer();
}
static String convertFieldValue(MappedFieldType type, Object value) {
if (value instanceof BytesRef) {
return type.valueForDisplay(value).toString();
} else {
return value.toString();
}
}
private static String mergeFieldValues(List<Object> fieldValues, char valuesSeparator) {
//postings highlighter accepts all values in a single string, as offsets etc. need to match with content
//loaded from stored fields, we merge all values using a proper separator
@ -226,17 +228,4 @@ public class UnifiedHighlighter implements Highlighter {
}
return OffsetSource.ANALYSIS;
}
private static class HighlighterEntry {
Map<FieldMapper, MapperHighlighterEntry> mappers = new HashMap<>();
}
private static class MapperHighlighterEntry {
final CustomPassageFormatter passageFormatter;
private MapperHighlighterEntry(CustomPassageFormatter passageFormatter) {
this.passageFormatter = passageFormatter;
}
}
}

Some files were not shown because too many files have changed in this diff Show More