Merge branch 'master' into ccr

* master: (24 commits)
  Watcher: Ensure mail message ids are unique per watch action (#30112)
  REST: Remove GET support for clear cache indices (#29525)
  SQL: Correct error message (#30138)
  Require acknowledgement to start_trial license (#30135)
  Fix a bug in FieldCapabilitiesRequest#equals and hashCode. (#30181)
  SQL: Add BinaryMathProcessor to named writeables list (#30127)
  Tests: Use buildDir as base for generated-resources (#30191)
  Fix SliceBuilderTests#testRandom failures
  Build: Fix deb version to use tilde with prerelease versions (#29000)
  Fix edge cases in CompositeKeyExtractorTests (#30175)
  Document time unit limitations for date histograms (#30177)
  Add support for field capabilities to the high-level REST client. (#29664)
  Remove licenses missed by the migration (#30128)
  [DOCS] Updates docker installation package details (#30110)
  Fix TermsSetQueryBuilder.doEquals() method (#29629)
  [Monitoring] Remove unhelpful Monitoring tests (#30144)
  [Test] Fix RenameProcessorTests.testRenameExistingFieldNullValue() (#29655)
  add copyright/scope configuration for intellij to Contributing Guide (#29688)
  [test] include oss tar in packaging tests (#30155)
  TEST: Update settings should go through cluster state (#29682)
  ...
This commit is contained in:
Nhat Nguyen 2018-04-27 09:23:31 -04:00
commit 112b5f1744
95 changed files with 1875 additions and 760 deletions

View File

@ -107,6 +107,8 @@ We support development in the Eclipse and IntelliJ IDEs. For Eclipse, the
minimum version that we support is [Eclipse Oxygen][eclipse] (version 4.7). For
IntelliJ, the minimum version that we support is [IntelliJ 2017.2][intellij].
### Configuring IDEs And Running Tests
Eclipse users can automatically configure their IDE: `./gradlew eclipse`
then `File: Import: Existing Projects into Workspace`. Select the
option `Search for nested projects`. Additionally you will want to
@ -144,6 +146,9 @@ For IntelliJ, go to
For Eclipse, go to `Preferences->Java->Installed JREs` and add `-ea` to
`VM Arguments`.
### Java Language Formatting Guidelines
Please follow these formatting guidelines:
* Java indent is 4 spaces
@ -155,6 +160,33 @@ Please follow these formatting guidelines:
* IntelliJ: `Preferences/Settings->Editor->Code Style->Java->Imports`. There are two configuration options: `Class count to use import with '*'` and `Names count to use static import with '*'`. Set their values to 99999 or some other absurdly high value.
* Don't worry too much about import order. Try not to change it but don't worry about fighting your IDE to stop it from doing so.
### License Headers
We require license headers on all Java files. You will notice that all the Java files in
the top-level `x-pack` directory contain a separate license from the rest of the repository. This
directory contains commercial code that is associated with a separate license. It can be helpful
to have the IDE automatically insert the appropriate license header depending which part of the project
contributions are made to.
#### IntelliJ: Copyright & Scope Profiles
To have IntelliJ insert the correct license, it is necessary to create to copyright profiles.
These may potentially be called `apache2` and `commercial`. These can be created in
`Preferences/Settings->Editor->Copyright->Copyright Profiles`. To associate these profiles to
their respective directories, two "Scopes" will need to be created. These can be created in
`Preferences/Settings->Appearances & Behavior->Scopes`. When creating scopes, be sure to choose
the `shared` scope type. Create a scope, `apache2`, with
the associated pattern of `!file[group:x-pack]:*/`. This pattern will exclude all the files contained in
the `x-pack` directory. The other scope, `commercial`, will have the inverse pattern of `file[group:x-pack]:*/`.
The two scopes, together, should account for all the files in the project. To associate the scopes
with their copyright-profiles, go into `Preferences/Settings->Editor>Copyright` and use the `+` to add
the associations `apache2/apache2` and `commercial/commercial`.
Configuring these options in IntelliJ can be quite buggy, so do not be alarmed if you have to open/close
the settings window and/or restart IntelliJ to see your changes take effect.
### Creating A Distribution
To create a distribution from the source, simply run:
```sh
@ -169,6 +201,8 @@ The archive distributions (tar and zip) can be found under:
`./distribution/archives/(tar|zip)/build/distributions/`
### Running The Full Test Suite
Before submitting your changes, run the test suite to make sure that nothing is broken, with:
```sh

View File

@ -37,8 +37,15 @@ class VagrantTestPlugin implements Plugin<Project> {
'ubuntu-1404',
]
/** All onboarded archives by default, available for Bats tests even if not used **/
static List<String> DISTRIBUTION_ARCHIVES = ['tar', 'rpm', 'deb', 'oss-rpm', 'oss-deb']
/** All distributions to bring into test VM, whether or not they are used **/
static List<String> DISTRIBUTIONS = [
'archives:tar',
'archives:oss-tar',
'packages:rpm',
'packages:oss-rpm',
'packages:deb',
'packages:oss-deb'
]
/** Packages onboarded for upgrade tests **/
static List<String> UPGRADE_FROM_ARCHIVES = ['rpm', 'deb']
@ -117,13 +124,8 @@ class VagrantTestPlugin implements Plugin<Project> {
upgradeFromVersion = Version.fromString(upgradeFromVersionRaw)
}
DISTRIBUTION_ARCHIVES.each {
DISTRIBUTIONS.each {
// Adds a dependency for the current version
if (it == 'tar') {
it = 'archives:tar'
} else {
it = "packages:${it}"
}
project.dependencies.add(PACKAGING_CONFIGURATION,
project.dependencies.project(path: ":distribution:${it}", configuration: 'default'))
}

View File

@ -535,7 +535,6 @@
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReplicaShardAllocatorTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReusePeerRecoverySharedTest.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]get[/\\]GetActionIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexServiceTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexingSlowLogTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]MergePolicySettingsTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]SearchSlowLogTests.java" checks="LineLength" />

View File

@ -48,6 +48,7 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.MultiGetRequest;
import org.elasticsearch.action.index.IndexRequest;
@ -75,6 +76,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.rankeval.RankEvalRequest;
import org.elasticsearch.rest.action.RestFieldCapabilitiesAction;
import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
@ -536,6 +538,16 @@ public final class Request {
return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null);
}
static Request fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest) {
Params params = Params.builder();
params.withFields(fieldCapabilitiesRequest.fields());
params.withIndicesOptions(fieldCapabilitiesRequest.indicesOptions());
String[] indices = fieldCapabilitiesRequest.indices();
String endpoint = endpoint(indices, "_field_caps");
return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), null);
}
static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException {
String endpoint = endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval");
Params params = Params.builder();
@ -712,6 +724,13 @@ public final class Request {
return this;
}
Params withFields(String[] fields) {
if (fields != null && fields.length > 0) {
return putParam("fields", String.join(",", fields));
}
return this;
}
Params withMasterTimeout(TimeValue masterTimeout) {
return putParam("master_timeout", masterTimeout);
}

View File

@ -30,6 +30,8 @@ import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.get.MultiGetRequest;
@ -501,6 +503,31 @@ public class RestHighLevelClient implements Closeable {
headers);
}
/**
* Executes a request using the Field Capabilities API.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html">Field Capabilities API
* on elastic.co</a>.
*/
public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest,
Header... headers) throws IOException {
return performRequestAndParseEntity(fieldCapabilitiesRequest, Request::fieldCaps,
FieldCapabilitiesResponse::fromXContent, emptySet(), headers);
}
/**
* Asynchronously executes a request using the Field Capabilities API.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html">Field Capabilities API
* on elastic.co</a>.
*/
public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesRequest,
ActionListener<FieldCapabilitiesResponse> listener,
Header... headers) {
performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, Request::fieldCaps,
FieldCapabilitiesResponse::fromXContent, listener, emptySet(), headers);
}
protected final <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
CheckedFunction<Req, Request, IOException> requestConverter,
CheckedFunction<XContentParser, Resp, IOException> entityParser,

View File

@ -52,6 +52,7 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeType;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.MultiGetRequest;
import org.elasticsearch.action.index.IndexRequest;
@ -89,6 +90,7 @@ import org.elasticsearch.index.rankeval.RankEvalRequest;
import org.elasticsearch.index.rankeval.RankEvalSpec;
import org.elasticsearch.index.rankeval.RatedRequest;
import org.elasticsearch.index.rankeval.RestRankEvalAction;
import org.elasticsearch.rest.action.RestFieldCapabilitiesAction;
import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
@ -108,11 +110,14 @@ import java.io.InputStream;
import java.lang.reflect.Constructor;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.StringJoiner;
import java.util.function.Consumer;
import java.util.function.Function;
@ -128,6 +133,8 @@ import static org.elasticsearch.index.alias.RandomAliasActionsGenerator.randomAl
import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.nullValue;
public class RequestTests extends ESTestCase {
@ -1213,6 +1220,47 @@ public class RequestTests extends ESTestCase {
}
}
public void testFieldCaps() {
// Create a random request.
String[] indices = randomIndicesNames(0, 5);
String[] fields = generateRandomStringArray(5, 10, false, false);
FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest()
.indices(indices)
.fields(fields);
Map<String, String> indicesOptionsParams = new HashMap<>();
setRandomIndicesOptions(fieldCapabilitiesRequest::indicesOptions,
fieldCapabilitiesRequest::indicesOptions,
indicesOptionsParams);
Request request = Request.fieldCaps(fieldCapabilitiesRequest);
// Verify that the resulting REST request looks as expected.
StringJoiner endpoint = new StringJoiner("/", "/", "");
String joinedIndices = String.join(",", indices);
if (!joinedIndices.isEmpty()) {
endpoint.add(joinedIndices);
}
endpoint.add("_field_caps");
assertEquals(endpoint.toString(), request.getEndpoint());
assertEquals(4, request.getParameters().size());
// Note that we don't check the field param value explicitly, as field names are passed through
// a hash set before being added to the request, and can appear in a non-deterministic order.
assertThat(request.getParameters(), hasKey("fields"));
String[] requestFields = Strings.splitStringByCommaToArray(request.getParameters().get("fields"));
assertEquals(new HashSet<>(Arrays.asList(fields)),
new HashSet<>(Arrays.asList(requestFields)));
for (Map.Entry<String, String> param : indicesOptionsParams.entrySet()) {
assertThat(request.getParameters(), hasEntry(param.getKey(), param.getValue()));
}
assertNull(request.getEntity());
}
public void testRankEval() throws Exception {
RankEvalSpec spec = new RankEvalSpec(
Collections.singletonList(new RatedRequest("queryId", Collections.emptyList(), new SearchSourceBuilder())),
@ -1233,7 +1281,6 @@ public class RequestTests extends ESTestCase {
assertEquals(3, request.getParameters().size());
assertEquals(expectedParams, request.getParameters());
assertToXContentBody(spec, request.getEntity());
}
public void testSplit() throws IOException {

View File

@ -27,6 +27,9 @@ import org.apache.http.entity.StringEntity;
import org.apache.http.nio.entity.NStringEntity;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.action.fieldcaps.FieldCapabilities;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.ClearScrollResponse;
import org.elasticsearch.action.search.MultiSearchRequest;
@ -96,14 +99,31 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
client().performRequest(HttpPut.METHOD_NAME, "/index/type/5", Collections.emptyMap(), doc5);
client().performRequest(HttpPost.METHOD_NAME, "/index/_refresh");
StringEntity doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
StringEntity doc = new StringEntity("{\"field\":\"value1\", \"rating\": 7}", ContentType.APPLICATION_JSON);
client().performRequest(HttpPut.METHOD_NAME, "/index1/doc/1", Collections.emptyMap(), doc);
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
client().performRequest(HttpPut.METHOD_NAME, "/index1/doc/2", Collections.emptyMap(), doc);
doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
StringEntity mappings = new StringEntity(
"{" +
" \"mappings\": {" +
" \"doc\": {" +
" \"properties\": {" +
" \"rating\": {" +
" \"type\": \"keyword\"" +
" }" +
" }" +
" }" +
" }" +
"}}",
ContentType.APPLICATION_JSON);
client().performRequest("PUT", "/index2", Collections.emptyMap(), mappings);
doc = new StringEntity("{\"field\":\"value1\", \"rating\": \"good\"}", ContentType.APPLICATION_JSON);
client().performRequest(HttpPut.METHOD_NAME, "/index2/doc/3", Collections.emptyMap(), doc);
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
client().performRequest(HttpPut.METHOD_NAME, "/index2/doc/4", Collections.emptyMap(), doc);
doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON);
client().performRequest(HttpPut.METHOD_NAME, "/index3/doc/5", Collections.emptyMap(), doc);
doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON);
@ -713,6 +733,57 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
assertThat(multiSearchResponse.getResponses()[1].getResponse(), nullValue());
}
public void testFieldCaps() throws IOException {
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest()
.indices("index1", "index2")
.fields("rating", "field");
FieldCapabilitiesResponse response = execute(request,
highLevelClient()::fieldCaps, highLevelClient()::fieldCapsAsync);
// Check the capabilities for the 'rating' field.
assertTrue(response.get().containsKey("rating"));
Map<String, FieldCapabilities> ratingResponse = response.getField("rating");
assertEquals(2, ratingResponse.size());
FieldCapabilities expectedKeywordCapabilities = new FieldCapabilities(
"rating", "keyword", true, true, new String[]{"index2"}, null, null);
assertEquals(expectedKeywordCapabilities, ratingResponse.get("keyword"));
FieldCapabilities expectedLongCapabilities = new FieldCapabilities(
"rating", "long", true, true, new String[]{"index1"}, null, null);
assertEquals(expectedLongCapabilities, ratingResponse.get("long"));
// Check the capabilities for the 'field' field.
assertTrue(response.get().containsKey("field"));
Map<String, FieldCapabilities> fieldResponse = response.getField("field");
assertEquals(1, fieldResponse.size());
FieldCapabilities expectedTextCapabilities = new FieldCapabilities(
"field", "text", true, false);
assertEquals(expectedTextCapabilities, fieldResponse.get("text"));
}
public void testFieldCapsWithNonExistentFields() throws IOException {
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest()
.indices("index2")
.fields("nonexistent");
FieldCapabilitiesResponse response = execute(request,
highLevelClient()::fieldCaps, highLevelClient()::fieldCapsAsync);
assertTrue(response.get().isEmpty());
}
public void testFieldCapsWithNonExistentIndices() {
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest()
.indices("non-existent")
.fields("rating");
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
() -> execute(request, highLevelClient()::fieldCaps, highLevelClient()::fieldCapsAsync));
assertEquals(RestStatus.NOT_FOUND, exception.status());
}
private static void assertSearchHeader(SearchResponse searchResponse) {
assertThat(searchResponse.getTook().nanos(), greaterThanOrEqualTo(0L));
assertEquals(0, searchResponse.getFailedShards());

View File

@ -21,8 +21,13 @@ package org.elasticsearch.client.documentation;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.LatchedActionListener;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.fieldcaps.FieldCapabilities;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.ClearScrollRequest;
@ -93,6 +98,8 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
@ -157,6 +164,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
// tag::search-source-setter
SearchRequest searchRequest = new SearchRequest();
searchRequest.indices("posts");
searchRequest.source(sourceBuilder);
// end::search-source-setter
@ -699,6 +707,65 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
}
}
public void testFieldCaps() throws Exception {
indexSearchTestData();
RestHighLevelClient client = highLevelClient();
// tag::field-caps-request
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest()
.fields("user")
.indices("posts", "authors", "contributors");
// end::field-caps-request
// tag::field-caps-request-indicesOptions
request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
// end::field-caps-request-indicesOptions
// tag::field-caps-execute
FieldCapabilitiesResponse response = client.fieldCaps(request);
// end::field-caps-execute
// tag::field-caps-response
assertThat(response.get().keySet(), contains("user"));
Map<String, FieldCapabilities> userResponse = response.getField("user");
assertThat(userResponse.keySet(), containsInAnyOrder("keyword", "text")); // <1>
FieldCapabilities textCapabilities = userResponse.get("keyword");
assertTrue(textCapabilities.isSearchable());
assertFalse(textCapabilities.isAggregatable());
assertArrayEquals(textCapabilities.indices(), // <2>
new String[]{"authors", "contributors"});
assertNull(textCapabilities.nonSearchableIndices()); // <3>
assertArrayEquals(textCapabilities.nonAggregatableIndices(), // <4>
new String[]{"authors"});
// end::field-caps-response
// tag::field-caps-execute-listener
ActionListener<FieldCapabilitiesResponse> listener = new ActionListener<FieldCapabilitiesResponse>() {
@Override
public void onResponse(FieldCapabilitiesResponse response) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::field-caps-execute-listener
// Replace the empty listener by a blocking listener for tests.
CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::field-caps-execute-async
client.fieldCapsAsync(request, listener); // <1>
// end::field-caps-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
public void testRankEval() throws Exception {
indexSearchTestData();
RestHighLevelClient client = highLevelClient();
@ -794,7 +861,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
MultiSearchResponse.Item firstResponse = response.getResponses()[0]; // <1>
assertNull(firstResponse.getFailure()); // <2>
SearchResponse searchResponse = firstResponse.getResponse(); // <3>
assertEquals(3, searchResponse.getHits().getTotalHits());
assertEquals(4, searchResponse.getHits().getTotalHits());
MultiSearchResponse.Item secondResponse = response.getResponses()[1]; // <4>
assertNull(secondResponse.getFailure());
searchResponse = secondResponse.getResponse();
@ -840,18 +907,35 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
}
private void indexSearchTestData() throws IOException {
BulkRequest request = new BulkRequest();
request.add(new IndexRequest("posts", "doc", "1")
CreateIndexRequest authorsRequest = new CreateIndexRequest("authors")
.mapping("doc", "user", "type=keyword,doc_values=false");
CreateIndexResponse authorsResponse = highLevelClient().indices().create(authorsRequest);
assertTrue(authorsResponse.isAcknowledged());
CreateIndexRequest reviewersRequest = new CreateIndexRequest("contributors")
.mapping("doc", "user", "type=keyword");
CreateIndexResponse reviewersResponse = highLevelClient().indices().create(reviewersRequest);
assertTrue(reviewersResponse.isAcknowledged());
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(new IndexRequest("posts", "doc", "1")
.source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?", "user",
Arrays.asList("kimchy", "luca"), "innerObject", Collections.singletonMap("key", "value")));
request.add(new IndexRequest("posts", "doc", "2")
bulkRequest.add(new IndexRequest("posts", "doc", "2")
.source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch", "user",
Arrays.asList("kimchy", "christoph"), "innerObject", Collections.singletonMap("key", "value")));
request.add(new IndexRequest("posts", "doc", "3")
bulkRequest.add(new IndexRequest("posts", "doc", "3")
.source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch", "user",
Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value")));
request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
BulkResponse bulkResponse = highLevelClient().bulk(request);
bulkRequest.add(new IndexRequest("authors", "doc", "1")
.source(XContentType.JSON, "user", "kimchy"));
bulkRequest.add(new IndexRequest("contributors", "doc", "1")
.source(XContentType.JSON, "user", "tanguy"));
bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest);
assertSame(RestStatus.OK, bulkResponse.status());
assertFalse(bulkResponse.hasFailures());
}

View File

@ -270,7 +270,7 @@ Closure commonDebConfig(boolean oss) {
customFields['License'] = 'Elastic-License'
}
version = project.version
version = project.version.replace('-', '~')
packageGroup 'web'
requires 'bash'
requires 'libc6'

View File

@ -0,0 +1,82 @@
[[java-rest-high-field-caps]]
=== Field Capabilities API
The field capabilities API allows for retrieving the capabilities of fields across multiple indices.
[[java-rest-high-field-caps-request]]
==== Field Capabilities Request
A `FieldCapabilitiesRequest` contains a list of fields to get capabilities for,
should be returned, plus an optional list of target indices. If no indices
are provided, the request will be executed on all indices.
Note that fields parameter supports wildcard notation. For example, providing `text_*`
will cause all fields that match the expression to be returned.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-request]
--------------------------------------------------
[[java-rest-high-field-caps-request-optional]]
===== Optional arguments
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-request-indicesOptions]
--------------------------------------------------
<1> Setting `IndicesOptions` controls how unavailable indices are resolved and
how wildcard expressions are expanded.
[[java-rest-high-field-caps-sync]]
==== Synchronous Execution
The `fieldCaps` method executes the request synchronously:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-execute]
--------------------------------------------------
[[java-rest-high-field-caps-async]]
==== Asynchronous Execution
The `fieldCapsAsync` method executes the request asynchronously,
calling the provided `ActionListener` when the response is ready:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-execute-async]
--------------------------------------------------
<1> The `FieldCapabilitiesRequest` to execute and the `ActionListener` to use when
the execution completes.
The asynchronous method does not block and returns immediately. Once the request
completes, the `ActionListener` is called back using the `onResponse` method
if the execution successfully completed or using the `onFailure` method if
it failed.
A typical listener for `FieldCapabilitiesResponse` is constructed as follows:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed.
<2> Called when the whole `FieldCapabilitiesRequest` fails.
[[java-rest-high-field-caps-response]]
==== FieldCapabilitiesResponse
For each requested field, the returned `FieldCapabilitiesResponse` contains its type
and whether or not it can be searched or aggregated on. The response also gives
information about how each index contributes to the field's capabilities.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-response]
--------------------------------------------------
<1> The `user` field has two possible types, `keyword` and `text`.
<2> This field only has type `keyword` in the `authors` and `contributors` indices.
<3> Null, since the field is searchable in all indices for which it has the `keyword` type.
<4> The `user` field is not aggregatable in the `authors` index.

View File

@ -32,11 +32,13 @@ The Java High Level REST Client supports the following Search APIs:
* <<java-rest-high-search-scroll>>
* <<java-rest-high-clear-scroll>>
* <<java-rest-high-multi-search>>
* <<java-rest-high-field-caps>>
* <<java-rest-high-rank-eval>>
include::search/search.asciidoc[]
include::search/scroll.asciidoc[]
include::search/multi-search.asciidoc[]
include::search/field-caps.asciidoc[]
include::search/rank-eval.asciidoc[]
== Miscellaneous APIs

View File

@ -27,11 +27,13 @@ POST /sales/_search?size=0
// CONSOLE
// TEST[setup:sales]
Available expressions for interval: `year`, `quarter`, `month`, `week`, `day`, `hour`, `minute`, `second`
Available expressions for interval: `year` (`1y`), `quarter` (`1q`), `month` (`1M`), `week` (`1w`),
`day` (`1d`), `hour` (`1h`), `minute` (`1m`), `second` (`1s`)
Time values can also be specified via abbreviations supported by <<time-units,time units>> parsing.
Note that fractional time values are not supported, but you can address this by shifting to another
time unit (e.g., `1.5h` could instead be specified as `90m`).
time unit (e.g., `1.5h` could instead be specified as `90m`). Also note that time intervals larger than
than days do not support arbitrary values but can only be one unit large (e.g. `1y` is valid, `2y` is not).
[source,js]
--------------------------------------------------

View File

@ -39,11 +39,14 @@ The result of the above delete operation is:
[[delete-versioning]]
=== Versioning
Each document indexed is versioned. When deleting a document, the
`version` can be specified to make sure the relevant document we are
trying to delete is actually being deleted and it has not changed in the
meantime. Every write operation executed on a document, deletes included,
causes its version to be incremented.
Each document indexed is versioned. When deleting a document, the `version` can
be specified to make sure the relevant document we are trying to delete is
actually being deleted and it has not changed in the meantime. Every write
operation executed on a document, deletes included, causes its version to be
incremented. The version number of a deleted document remains available for a
short time after deletion to allow for control of concurrent operations. The
length of time for which a deleted document's version remains available is
determined by the `index.gc_deletes` index setting and defaults to 60 seconds.
[float]
[[delete-routing]]

View File

@ -214,6 +214,27 @@ specific index module:
The maximum length of regex that can be used in Regexp Query.
Defaults to `1000`.
`index.routing.allocation.enable`::
Controls shard allocation for this index. It can be set to:
* `all` (default) - Allows shard allocation for all shards.
* `primaries` - Allows shard allocation only for primary shards.
* `new_primaries` - Allows shard allocation only for newly-created primary shards.
* `none` - No shard allocation is allowed.
`index.routing.rebalance.enable`::
Enables shard rebalancing for this index. It can be set to:
* `all` (default) - Allows shard rebalancing for all shards.
* `primaries` - Allows shard rebalancing only for primary shards.
* `replicas` - Allows shard rebalancing only for replica shards.
* `none` - No shard rebalancing is allowed.
`index.gc_deletes`::
The length of time that a <<delete-versioning,deleted document's version number>> remains available for <<index-versioning,further versioned operations>>.
Defaults to `60s`.
[float]
=== Settings in other index modules

View File

@ -8,4 +8,7 @@ The changes listed below have been released for the first time in Elasticsearch
=== Breaking changes
Core::
* Tribe node has been removed in favor of Cross-Cluster-Search
* Tribe node has been removed in favor of Cross-Cluster-Search
Rest API::
* The Clear Cache API only supports `POST` as HTTP method

View File

@ -78,9 +78,9 @@ returned with each batch of results. Each call to the `scroll` API returns the
next batch of results until there are no more results left to return, ie the
`hits` array is empty.
IMPORTANT: The initial search request and each subsequent scroll request
returns a new `_scroll_id` -- only the most recent `_scroll_id` should be
used.
IMPORTANT: The initial search request and each subsequent scroll request each
return a `_scroll_id`, which may change with each request -- only the most
recent `_scroll_id` should be used.
NOTE: If the request specifies aggregations, only the initial search response
will contain the aggregations results.

View File

@ -128,7 +128,7 @@ public class RenameProcessorTests extends ESTestCase {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
String fieldName = RandomDocumentPicks.randomFieldName(random());
ingestDocument.setFieldValue(fieldName, null);
String newFieldName = RandomDocumentPicks.randomFieldName(random());
String newFieldName = randomValueOtherThanMany(ingestDocument::hasField, () -> RandomDocumentPicks.randomFieldName(random()));
Processor processor = new RenameProcessor(randomAlphaOfLength(10), fieldName, newFieldName, false);
processor.execute(ingestDocument);
assertThat(ingestDocument.hasField(fieldName), equalTo(false));

View File

@ -55,7 +55,8 @@ setup() {
}
@test "[TAR] archive is available" {
count=$(find . -type f -name 'elasticsearch*.tar.gz' | wc -l)
local version=$(cat version)
count=$(find . -type f -name "${PACKAGE_NAME}-${version}.tar.gz" | wc -l)
[ "$count" -eq 1 ]
}

View File

@ -35,10 +35,12 @@
install_archive() {
export ESHOME=${1:-/tmp/elasticsearch}
local version=$(cat version)
echo "Unpacking tarball to $ESHOME"
rm -rf /tmp/untar
mkdir -p /tmp/untar
tar -xzpf elasticsearch*.tar.gz -C /tmp/untar
tar -xzpf "${PACKAGE_NAME}-${version}.tar.gz" -C /tmp/untar
find /tmp/untar -depth -type d -name 'elasticsearch*' -exec mv {} "$ESHOME" \; > /dev/null
@ -79,6 +81,8 @@ export_elasticsearch_paths() {
export ESSCRIPTS="$ESCONFIG/scripts"
export ESDATA="$ESHOME/data"
export ESLOG="$ESHOME/logs"
export PACKAGE_NAME=${PACKAGE_NAME:-"elasticsearch-oss"}
}
# Checks that all directories & files are correctly installed

View File

@ -1,7 +1,7 @@
{
"indices.clear_cache": {
"documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html",
"methods": ["POST", "GET"],
"methods": ["POST"],
"url": {
"path": "/_cache/clear",
"paths": ["/_cache/clear", "/{index}/_cache/clear"],

View File

@ -19,11 +19,14 @@
package org.elasticsearch.action.fieldcaps;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.ArrayList;
@ -36,6 +39,13 @@ import java.util.List;
* Describes the capabilities of a field optionally merged across multiple indices.
*/
public class FieldCapabilities implements Writeable, ToXContentObject {
private static final ParseField TYPE_FIELD = new ParseField("type");
private static final ParseField SEARCHABLE_FIELD = new ParseField("searchable");
private static final ParseField AGGREGATABLE_FIELD = new ParseField("aggregatable");
private static final ParseField INDICES_FIELD = new ParseField("indices");
private static final ParseField NON_SEARCHABLE_INDICES_FIELD = new ParseField("non_searchable_indices");
private static final ParseField NON_AGGREGATABLE_INDICES_FIELD = new ParseField("non_aggregatable_indices");
private final String name;
private final String type;
private final boolean isSearchable;
@ -52,7 +62,7 @@ public class FieldCapabilities implements Writeable, ToXContentObject {
* @param isSearchable Whether this field is indexed for search.
* @param isAggregatable Whether this field can be aggregated on.
*/
FieldCapabilities(String name, String type, boolean isSearchable, boolean isAggregatable) {
public FieldCapabilities(String name, String type, boolean isSearchable, boolean isAggregatable) {
this(name, type, isSearchable, isAggregatable, null, null, null);
}
@ -69,7 +79,7 @@ public class FieldCapabilities implements Writeable, ToXContentObject {
* @param nonAggregatableIndices The list of indices where this field is not aggregatable,
* or null if the field is aggregatable in all indices.
*/
FieldCapabilities(String name, String type,
public FieldCapabilities(String name, String type,
boolean isSearchable, boolean isAggregatable,
String[] indices,
String[] nonSearchableIndices,
@ -83,7 +93,7 @@ public class FieldCapabilities implements Writeable, ToXContentObject {
this.nonAggregatableIndices = nonAggregatableIndices;
}
FieldCapabilities(StreamInput in) throws IOException {
public FieldCapabilities(StreamInput in) throws IOException {
this.name = in.readString();
this.type = in.readString();
this.isSearchable = in.readBoolean();
@ -107,22 +117,47 @@ public class FieldCapabilities implements Writeable, ToXContentObject {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("type", type);
builder.field("searchable", isSearchable);
builder.field("aggregatable", isAggregatable);
builder.field(TYPE_FIELD.getPreferredName(), type);
builder.field(SEARCHABLE_FIELD.getPreferredName(), isSearchable);
builder.field(AGGREGATABLE_FIELD.getPreferredName(), isAggregatable);
if (indices != null) {
builder.field("indices", indices);
builder.field(INDICES_FIELD.getPreferredName(), indices);
}
if (nonSearchableIndices != null) {
builder.field("non_searchable_indices", nonSearchableIndices);
builder.field(NON_SEARCHABLE_INDICES_FIELD.getPreferredName(), nonSearchableIndices);
}
if (nonAggregatableIndices != null) {
builder.field("non_aggregatable_indices", nonAggregatableIndices);
builder.field(NON_AGGREGATABLE_INDICES_FIELD.getPreferredName(), nonAggregatableIndices);
}
builder.endObject();
return builder;
}
public static FieldCapabilities fromXContent(String name, XContentParser parser) throws IOException {
return PARSER.parse(parser, name);
}
@SuppressWarnings("unchecked")
private static ConstructingObjectParser<FieldCapabilities, String> PARSER = new ConstructingObjectParser<>(
"field_capabilities",
true,
(a, name) -> new FieldCapabilities(name,
(String) a[0],
(boolean) a[1],
(boolean) a[2],
a[3] != null ? ((List<String>) a[3]).toArray(new String[0]) : null,
a[4] != null ? ((List<String>) a[4]).toArray(new String[0]) : null,
a[5] != null ? ((List<String>) a[5]).toArray(new String[0]) : null));
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), TYPE_FIELD);
PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), SEARCHABLE_FIELD);
PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), AGGREGATABLE_FIELD);
PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), INDICES_FIELD);
PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_SEARCHABLE_INDICES_FIELD);
PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_AGGREGATABLE_INDICES_FIELD);
}
/**
* The name of the field.
*/

View File

@ -61,14 +61,18 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
/**
* Returns <code>true</code> iff the results should be merged.
*
* Note that when using the high-level REST client, results are always merged (this flag is always considered 'true').
*/
boolean isMergeResults() {
return mergeResults;
}
/**
* if set to <code>true</code> the response will contain only a merged view of the per index field capabilities. Otherwise only
* unmerged per index field capabilities are returned.
* If set to <code>true</code> the response will contain only a merged view of the per index field capabilities.
* Otherwise only unmerged per index field capabilities are returned.
*
* Note that when using the high-level REST client, results are always merged (this flag is always considered 'true').
*/
void setMergeResults(boolean mergeResults) {
this.mergeResults = mergeResults;
@ -158,17 +162,17 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
if (o == null || getClass() != o.getClass()) return false;
FieldCapabilitiesRequest that = (FieldCapabilitiesRequest) o;
if (!Arrays.equals(indices, that.indices)) return false;
if (!indicesOptions.equals(that.indicesOptions)) return false;
return Arrays.equals(fields, that.fields);
return Arrays.equals(indices, that.indices) &&
Objects.equals(indicesOptions, that.indicesOptions) &&
Arrays.equals(fields, that.fields) &&
Objects.equals(mergeResults, that.mergeResults);
}
@Override
public int hashCode() {
int result = Arrays.hashCode(indices);
result = 31 * result + indicesOptions.hashCode();
result = 31 * result + Arrays.hashCode(fields);
return result;
return Objects.hash(Arrays.hashCode(indices),
indicesOptions,
Arrays.hashCode(fields),
mergeResults);
}
}

View File

@ -21,20 +21,29 @@ package org.elasticsearch.action.fieldcaps;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParserUtils;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Response for {@link FieldCapabilitiesRequest} requests.
*/
public class FieldCapabilitiesResponse extends ActionResponse implements ToXContentFragment {
private static final ParseField FIELDS_FIELD = new ParseField("fields");
private Map<String, Map<String, FieldCapabilities>> responseMap;
private List<FieldCapabilitiesIndexResponse> indexResponses;
@ -114,10 +123,42 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("fields", responseMap);
builder.field(FIELDS_FIELD.getPreferredName(), responseMap);
return builder;
}
public static FieldCapabilitiesResponse fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
@SuppressWarnings("unchecked")
private static final ConstructingObjectParser<FieldCapabilitiesResponse, Void> PARSER =
new ConstructingObjectParser<>("field_capabilities_response", true,
a -> new FieldCapabilitiesResponse(
((List<Tuple<String, Map<String, FieldCapabilities>>>) a[0]).stream()
.collect(Collectors.toMap(Tuple::v1, Tuple::v2))));
static {
PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> {
Map<String, FieldCapabilities> typeToCapabilities = parseTypeToCapabilities(p, n);
return new Tuple<>(n, typeToCapabilities);
}, FIELDS_FIELD);
}
private static Map<String, FieldCapabilities> parseTypeToCapabilities(XContentParser parser, String name) throws IOException {
Map<String, FieldCapabilities> typeToCapabilities = new HashMap<>();
XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
String type = parser.currentName();
FieldCapabilities capabilities = FieldCapabilities.fromXContent(name, parser);
typeToCapabilities.put(type, capabilities);
}
return typeToCapabilities;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;

View File

@ -37,8 +37,10 @@ import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
import org.elasticsearch.transport.Transport;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
@ -62,6 +64,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
private final long clusterStateVersion;
private final Map<String, AliasFilter> aliasFilter;
private final Map<String, Float> concreteIndexBoosts;
private final Map<String, Set<String>> indexRoutings;
private final SetOnce<AtomicArray<ShardSearchFailure>> shardFailures = new SetOnce<>();
private final Object shardFailuresMutex = new Object();
private final AtomicInteger successfulOps = new AtomicInteger();
@ -72,6 +75,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportService searchTransportService,
BiFunction<String, String, Transport.Connection> nodeIdToConnection,
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
Map<String, Set<String>> indexRoutings,
Executor executor, SearchRequest request,
ActionListener<SearchResponse> listener, GroupShardsIterator<SearchShardIterator> shardsIts,
TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion,
@ -89,6 +93,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
this.clusterStateVersion = clusterStateVersion;
this.concreteIndexBoosts = concreteIndexBoosts;
this.aliasFilter = aliasFilter;
this.indexRoutings = indexRoutings;
this.results = resultConsumer;
this.clusters = clusters;
}
@ -128,17 +133,17 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
onPhaseFailure(currentPhase, "all shards failed", cause);
} else {
Boolean allowPartialResults = request.allowPartialSearchResults();
assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults";
assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults";
if (allowPartialResults == false && shardFailures.get() != null ){
if (logger.isDebugEnabled()) {
final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures());
Throwable cause = shardSearchFailures.length == 0 ? null :
ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
logger.debug(() -> new ParameterizedMessage("{} shards failed for phase: [{}]",
logger.debug(() -> new ParameterizedMessage("{} shards failed for phase: [{}]",
shardSearchFailures.length, getName()), cause);
}
onPhaseFailure(currentPhase, "Partial shards failure", null);
} else {
onPhaseFailure(currentPhase, "Partial shards failure", null);
} else {
if (logger.isTraceEnabled()) {
final String resultsFrom = results.getSuccessfulResults()
.map(r -> r.getSearchShardTarget().toString()).collect(Collectors.joining(","));
@ -271,14 +276,14 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
@Override
public final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) {
ShardSearchFailure[] failures = buildShardFailures();
Boolean allowPartialResults = request.allowPartialSearchResults();
assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults";
if (allowPartialResults == false && failures.length > 0){
raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures));
}
raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures));
}
return new SearchResponse(internalSearchResponse, scrollId, getNumShards(), successfulOps.get(),
skippedOps.get(), buildTookInMillis(), failures, clusters);
}
@ -318,8 +323,11 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
AliasFilter filter = aliasFilter.get(shardIt.shardId().getIndex().getUUID());
assert filter != null;
float indexBoost = concreteIndexBoosts.getOrDefault(shardIt.shardId().getIndex().getUUID(), DEFAULT_INDEX_BOOST);
String indexName = shardIt.shardId().getIndex().getName();
final String[] routings = indexRoutings.getOrDefault(indexName, Collections.emptySet())
.toArray(new String[0]);
return new ShardSearchTransportRequest(shardIt.getOriginalIndices(), request, shardIt.shardId(), getNumShards(),
filter, indexBoost, timeProvider.getAbsoluteStartMillis(), clusterAlias);
filter, indexBoost, timeProvider.getAbsoluteStartMillis(), clusterAlias, routings);
}
/**

View File

@ -27,6 +27,7 @@ import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.transport.Transport;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.function.BiFunction;
import java.util.function.Function;
@ -47,6 +48,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<Searc
CanMatchPreFilterSearchPhase(Logger logger, SearchTransportService searchTransportService,
BiFunction<String, String, Transport.Connection> nodeIdToConnection,
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
Map<String, Set<String>> indexRoutings,
Executor executor, SearchRequest request,
ActionListener<SearchResponse> listener, GroupShardsIterator<SearchShardIterator> shardsIts,
TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion,
@ -56,9 +58,9 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<Searc
* We set max concurrent shard requests to the number of shards to otherwise avoid deep recursing that would occur if the local node
* is the coordinating node for the query, holds all the shards for the request, and there are a lot of shards.
*/
super("can_match", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request,
listener, shardsIts, timeProvider, clusterStateVersion, task, new BitSetSearchPhaseResults(shardsIts.size()), shardsIts.size(),
clusters);
super("can_match", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, indexRoutings,
executor, request, listener, shardsIts, timeProvider, clusterStateVersion, task,
new BitSetSearchPhaseResults(shardsIts.size()), shardsIts.size(), clusters);
this.phaseFactory = phaseFactory;
this.shardsIts = shardsIts;
}

View File

@ -131,7 +131,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
if (shardsIts.size() > 0) {
int maxConcurrentShardRequests = Math.min(this.maxConcurrentShardRequests, shardsIts.size());
final boolean success = shardExecutionIndex.compareAndSet(0, maxConcurrentShardRequests);
assert success;
assert success;
assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults";
if (request.allowPartialSearchResults() == false) {
final StringBuilder missingShards = new StringBuilder();
@ -140,7 +140,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
final SearchShardIterator shardRoutings = shardsIts.get(index);
if (shardRoutings.size() == 0) {
if(missingShards.length() >0 ){
missingShards.append(", ");
missingShards.append(", ");
}
missingShards.append(shardRoutings.shardId());
}

View File

@ -28,6 +28,7 @@ import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.transport.Transport;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.function.BiFunction;
@ -37,11 +38,13 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
SearchDfsQueryThenFetchAsyncAction(final Logger logger, final SearchTransportService searchTransportService,
final BiFunction<String, String, Transport.Connection> nodeIdToConnection, final Map<String, AliasFilter> aliasFilter,
final Map<String, Float> concreteIndexBoosts, final SearchPhaseController searchPhaseController, final Executor executor,
final Map<String, Float> concreteIndexBoosts, final Map<String, Set<String>> indexRoutings,
final SearchPhaseController searchPhaseController, final Executor executor,
final SearchRequest request, final ActionListener<SearchResponse> listener,
final GroupShardsIterator<SearchShardIterator> shardsIts, final TransportSearchAction.SearchTimeProvider timeProvider,
final long clusterStateVersion, final SearchTask task, SearchResponse.Clusters clusters) {
super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, listener,
super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, indexRoutings,
executor, request, listener,
shardsIts, timeProvider, clusterStateVersion, task, new ArraySearchPhaseResults<>(shardsIts.size()),
request.getMaxConcurrentShardRequests(), clusters);
this.searchPhaseController = searchPhaseController;

View File

@ -28,6 +28,7 @@ import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.transport.Transport;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.function.BiFunction;
@ -37,13 +38,14 @@ final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<Se
SearchQueryThenFetchAsyncAction(final Logger logger, final SearchTransportService searchTransportService,
final BiFunction<String, String, Transport.Connection> nodeIdToConnection, final Map<String, AliasFilter> aliasFilter,
final Map<String, Float> concreteIndexBoosts, final SearchPhaseController searchPhaseController, final Executor executor,
final Map<String, Float> concreteIndexBoosts, final Map<String, Set<String>> indexRoutings,
final SearchPhaseController searchPhaseController, final Executor executor,
final SearchRequest request, final ActionListener<SearchResponse> listener,
final GroupShardsIterator<SearchShardIterator> shardsIts, final TransportSearchAction.SearchTimeProvider timeProvider,
long clusterStateVersion, SearchTask task, SearchResponse.Clusters clusters) {
super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, listener,
shardsIts, timeProvider, clusterStateVersion, task, searchPhaseController.newSearchPhaseResults(request, shardsIts.size()),
request.getMaxConcurrentShardRequests(), clusters);
super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, indexRoutings,
executor, request, listener, shardsIts, timeProvider, clusterStateVersion, task,
searchPhaseController.newSearchPhaseResults(request, shardsIts.size()), request.getMaxConcurrentShardRequests(), clusters);
this.searchPhaseController = searchPhaseController;
}

View File

@ -297,6 +297,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
Map<String, AliasFilter> aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(),
searchRequest.indices());
routingMap = routingMap == null ? Collections.emptyMap() : Collections.unmodifiableMap(routingMap);
String[] concreteIndices = new String[indices.length];
for (int i = 0; i < indices.length; i++) {
concreteIndices[i] = indices[i].getName();
@ -350,7 +351,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
}
boolean preFilterSearchShards = shouldPreFilterSearchShards(searchRequest, shardIterators);
searchAsyncAction(task, searchRequest, shardIterators, timeProvider, connectionLookup, clusterState.version(),
Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, listener, preFilterSearchShards, clusters).start();
Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, routingMap, listener, preFilterSearchShards, clusters).start();
}
private boolean shouldPreFilterSearchShards(SearchRequest searchRequest, GroupShardsIterator<SearchShardIterator> shardIterators) {
@ -380,17 +381,20 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
GroupShardsIterator<SearchShardIterator> shardIterators,
SearchTimeProvider timeProvider,
BiFunction<String, String, Transport.Connection> connectionLookup,
long clusterStateVersion, Map<String, AliasFilter> aliasFilter,
long clusterStateVersion,
Map<String, AliasFilter> aliasFilter,
Map<String, Float> concreteIndexBoosts,
ActionListener<SearchResponse> listener, boolean preFilter,
Map<String, Set<String>> indexRoutings,
ActionListener<SearchResponse> listener,
boolean preFilter,
SearchResponse.Clusters clusters) {
Executor executor = threadPool.executor(ThreadPool.Names.SEARCH);
if (preFilter) {
return new CanMatchPreFilterSearchPhase(logger, searchTransportService, connectionLookup,
aliasFilter, concreteIndexBoosts, executor, searchRequest, listener, shardIterators,
aliasFilter, concreteIndexBoosts, indexRoutings, executor, searchRequest, listener, shardIterators,
timeProvider, clusterStateVersion, task, (iter) -> {
AbstractSearchAsyncAction action = searchAsyncAction(task, searchRequest, iter, timeProvider, connectionLookup,
clusterStateVersion, aliasFilter, concreteIndexBoosts, listener, false, clusters);
clusterStateVersion, aliasFilter, concreteIndexBoosts, indexRoutings, listener, false, clusters);
return new SearchPhase(action.getName()) {
@Override
public void run() throws IOException {
@ -403,14 +407,14 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
switch (searchRequest.searchType()) {
case DFS_QUERY_THEN_FETCH:
searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup,
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators,
timeProvider, clusterStateVersion, task, clusters);
aliasFilter, concreteIndexBoosts, indexRoutings, searchPhaseController, executor, searchRequest, listener,
shardIterators, timeProvider, clusterStateVersion, task, clusters);
break;
case QUERY_AND_FETCH:
case QUERY_THEN_FETCH:
searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup,
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators,
timeProvider, clusterStateVersion, task, clusters);
aliasFilter, concreteIndexBoosts, indexRoutings, searchPhaseController, executor, searchRequest, listener,
shardIterators, timeProvider, clusterStateVersion, task, clusters);
break;
default:
throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]");

View File

@ -24,7 +24,7 @@ import java.util.List;
/**
* A simple {@link ShardsIterator} that iterates a list or sub-list of
* {@link ShardRouting shard routings}.
* {@link ShardRouting shard indexRoutings}.
*/
public class PlainShardsIterator implements ShardsIterator {

View File

@ -38,7 +38,7 @@ import java.util.List;
/**
* {@link ShardRouting} immutably encapsulates information about shard
* routings like id, state, version, etc.
* indexRoutings like id, state, version, etc.
*/
public final class ShardRouting implements Writeable, ToXContentObject {
@ -477,7 +477,7 @@ public final class ShardRouting implements Writeable, ToXContentObject {
"ShardRouting is a relocation target but current node id isn't equal to source relocating node. This [" + this + "], other [" + other + "]";
assert b == false || this.shardId.equals(other.shardId) :
"ShardRouting is a relocation target but both routings are not of the same shard id. This [" + this + "], other [" + other + "]";
"ShardRouting is a relocation target but both indexRoutings are not of the same shard id. This [" + this + "], other [" + other + "]";
assert b == false || this.primary == other.primary :
"ShardRouting is a relocation target but primary flag is different. This [" + this + "], target [" + other + "]";
@ -504,7 +504,7 @@ public final class ShardRouting implements Writeable, ToXContentObject {
"ShardRouting is a relocation source but relocating node isn't equal to other's current node. This [" + this + "], other [" + other + "]";
assert b == false || this.shardId.equals(other.shardId) :
"ShardRouting is a relocation source but both routings are not of the same shard. This [" + this + "], target [" + other + "]";
"ShardRouting is a relocation source but both indexRoutings are not of the same shard. This [" + this + "], target [" + other + "]";
assert b == false || this.primary == other.primary :
"ShardRouting is a relocation source but primary flag is different. This [" + this + "], target [" + other + "]";

View File

@ -18,9 +18,7 @@
*/
package org.elasticsearch.index.query;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanQuery;
@ -86,6 +84,11 @@ public final class TermsSetQueryBuilder extends AbstractQueryBuilder<TermsSetQue
out.writeOptionalWriteable(minimumShouldMatchScript);
}
// package protected for testing purpose
String getFieldName() {
return fieldName;
}
public List<?> getValues() {
return values;
}
@ -116,9 +119,10 @@ public final class TermsSetQueryBuilder extends AbstractQueryBuilder<TermsSetQue
@Override
protected boolean doEquals(TermsSetQueryBuilder other) {
return Objects.equals(fieldName, this.fieldName) && Objects.equals(values, this.values) &&
Objects.equals(minimumShouldMatchField, this.minimumShouldMatchField) &&
Objects.equals(minimumShouldMatchScript, this.minimumShouldMatchScript);
return Objects.equals(fieldName, other.fieldName)
&& Objects.equals(values, other.values)
&& Objects.equals(minimumShouldMatchField, other.minimumShouldMatchField)
&& Objects.equals(minimumShouldMatchScript, other.minimumShouldMatchScript);
}
@Override

View File

@ -31,7 +31,6 @@ import org.elasticsearch.rest.action.RestToXContentListener;
import java.io.IOException;
import static org.elasticsearch.rest.RestRequest.Method.GET;
import static org.elasticsearch.rest.RestRequest.Method.POST;
public class RestClearIndicesCacheAction extends BaseRestHandler {
@ -40,9 +39,6 @@ public class RestClearIndicesCacheAction extends BaseRestHandler {
super(settings);
controller.registerHandler(POST, "/_cache/clear", this);
controller.registerHandler(POST, "/{index}/_cache/clear", this);
controller.registerHandler(GET, "/_cache/clear", this);
controller.registerHandler(GET, "/{index}/_cache/clear", this);
}
@Override

View File

@ -25,8 +25,10 @@ import org.apache.lucene.search.Collector;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.Counter;
import org.elasticsearch.Version;
import org.elasticsearch.action.search.SearchTask;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.lucene.search.Queries;
@ -91,6 +93,7 @@ final class DefaultSearchContext extends SearchContext {
private final Engine.Searcher engineSearcher;
private final BigArrays bigArrays;
private final IndexShard indexShard;
private final ClusterService clusterService;
private final IndexService indexService;
private final ContextIndexSearcher searcher;
private final DfsSearchResult dfsResult;
@ -120,6 +123,7 @@ final class DefaultSearchContext extends SearchContext {
// filter for sliced scroll
private SliceBuilder sliceBuilder;
private SearchTask task;
private final Version minNodeVersion;
/**
@ -152,9 +156,10 @@ final class DefaultSearchContext extends SearchContext {
private final QueryShardContext queryShardContext;
private FetchPhase fetchPhase;
DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher,
IndexService indexService, IndexShard indexShard, BigArrays bigArrays, Counter timeEstimateCounter,
TimeValue timeout, FetchPhase fetchPhase, String clusterAlias) {
DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget,
Engine.Searcher engineSearcher, ClusterService clusterService, IndexService indexService,
IndexShard indexShard, BigArrays bigArrays, Counter timeEstimateCounter, TimeValue timeout,
FetchPhase fetchPhase, String clusterAlias, Version minNodeVersion) {
this.id = id;
this.request = request;
this.fetchPhase = fetchPhase;
@ -168,9 +173,11 @@ final class DefaultSearchContext extends SearchContext {
this.fetchResult = new FetchSearchResult(id, shardTarget);
this.indexShard = indexShard;
this.indexService = indexService;
this.clusterService = clusterService;
this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy());
this.timeEstimateCounter = timeEstimateCounter;
this.timeout = timeout;
this.minNodeVersion = minNodeVersion;
queryShardContext = indexService.newQueryShardContext(request.shardId().id(), searcher.getIndexReader(), request::nowInMillis,
clusterAlias);
queryShardContext.setTypes(request.types());
@ -278,8 +285,7 @@ final class DefaultSearchContext extends SearchContext {
}
if (sliceBuilder != null) {
filters.add(sliceBuilder.toFilter(queryShardContext, shardTarget().getShardId().getId(),
queryShardContext.getIndexSettings().getNumberOfShards()));
filters.add(sliceBuilder.toFilter(clusterService, request, queryShardContext, minNodeVersion));
}
if (filters.isEmpty()) {

View File

@ -616,8 +616,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
Engine.Searcher engineSearcher = indexShard.acquireSearcher("search");
final DefaultSearchContext searchContext = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget,
engineSearcher, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), timeout, fetchPhase,
request.getClusterAlias());
engineSearcher, clusterService, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), timeout,
fetchPhase, request.getClusterAlias(), clusterService.state().nodes().getMinNodeVersion());
boolean success = false;
try {
// we clone the query shard context here just for rewriting otherwise we

View File

@ -28,13 +28,10 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryRewriteContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.Rewriteable;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import java.io.IOException;
@ -61,7 +58,6 @@ import java.util.Optional;
*/
public class ShardSearchLocalRequest implements ShardSearchRequest {
private String clusterAlias;
private ShardId shardId;
private int numberOfShards;
@ -74,17 +70,18 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
private Boolean requestCache;
private long nowInMillis;
private boolean allowPartialSearchResults;
private String[] indexRoutings = Strings.EMPTY_ARRAY;
private String preference;
private boolean profile;
ShardSearchLocalRequest() {
}
ShardSearchLocalRequest(SearchRequest searchRequest, ShardId shardId, int numberOfShards,
AliasFilter aliasFilter, float indexBoost, long nowInMillis, String clusterAlias) {
AliasFilter aliasFilter, float indexBoost, long nowInMillis, String clusterAlias, String[] indexRoutings) {
this(shardId, numberOfShards, searchRequest.searchType(),
searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter, indexBoost,
searchRequest.allowPartialSearchResults());
searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter, indexBoost,
searchRequest.allowPartialSearchResults(), indexRoutings, searchRequest.preference());
// If allowPartialSearchResults is unset (ie null), the cluster-level default should have been substituted
// at this stage. Any NPEs in the above are therefore an error in request preparation logic.
assert searchRequest.allowPartialSearchResults() != null;
@ -102,7 +99,8 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
}
public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types,
Boolean requestCache, AliasFilter aliasFilter, float indexBoost, boolean allowPartialSearchResults) {
Boolean requestCache, AliasFilter aliasFilter, float indexBoost, boolean allowPartialSearchResults,
String[] indexRoutings, String preference) {
this.shardId = shardId;
this.numberOfShards = numberOfShards;
this.searchType = searchType;
@ -112,6 +110,8 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
this.aliasFilter = aliasFilter;
this.indexBoost = indexBoost;
this.allowPartialSearchResults = allowPartialSearchResults;
this.indexRoutings = indexRoutings;
this.preference = preference;
}
@ -169,18 +169,28 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
public Boolean requestCache() {
return requestCache;
}
@Override
public Boolean allowPartialSearchResults() {
return allowPartialSearchResults;
}
@Override
public Scroll scroll() {
return scroll;
}
@Override
public String[] indexRoutings() {
return indexRoutings;
}
@Override
public String preference() {
return preference;
}
@Override
public void setProfile(boolean profile) {
this.profile = profile;
@ -225,6 +235,13 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
allowPartialSearchResults = in.readOptionalBoolean();
}
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
indexRoutings = in.readStringArray();
preference = in.readOptionalString();
} else {
indexRoutings = Strings.EMPTY_ARRAY;
preference = null;
}
}
protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException {
@ -240,7 +257,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
out.writeFloat(indexBoost);
}
if (!asKey) {
if (asKey == false) {
out.writeVLong(nowInMillis);
}
out.writeOptionalBoolean(requestCache);
@ -250,7 +267,12 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
if (out.getVersion().onOrAfter(Version.V_6_3_0)) {
out.writeOptionalBoolean(allowPartialSearchResults);
}
if (asKey == false) {
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
out.writeStringArray(indexRoutings);
out.writeOptionalString(preference);
}
}
}
@Override

View File

@ -19,7 +19,9 @@
package org.elasticsearch.search.internal;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.CheckedFunction;
@ -28,8 +30,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryRewriteContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.Rewriteable;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.AliasFilterParsingException;
@ -68,11 +68,21 @@ public interface ShardSearchRequest {
long nowInMillis();
Boolean requestCache();
Boolean allowPartialSearchResults();
Scroll scroll();
/**
* Returns the routing values resolved by the coordinating node for the index pointed by {@link #shardId()}.
*/
String[] indexRoutings();
/**
* Returns the preference of the original {@link SearchRequest#preference()}.
*/
String preference();
/**
* Sets if this shard search needs to be profiled or not
* @param profile True if the shard should be profiled

View File

@ -28,9 +28,6 @@ import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryRewriteContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.Rewriteable;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.Scroll;
@ -57,9 +54,10 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
}
public ShardSearchTransportRequest(OriginalIndices originalIndices, SearchRequest searchRequest, ShardId shardId, int numberOfShards,
AliasFilter aliasFilter, float indexBoost, long nowInMillis, String clusterAlias) {
AliasFilter aliasFilter, float indexBoost, long nowInMillis,
String clusterAlias, String[] indexRoutings) {
this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardId, numberOfShards, aliasFilter, indexBoost,
nowInMillis, clusterAlias);
nowInMillis, clusterAlias, indexRoutings);
this.originalIndices = originalIndices;
}
@ -151,17 +149,27 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
public Boolean requestCache() {
return shardSearchLocalRequest.requestCache();
}
@Override
public Boolean allowPartialSearchResults() {
return shardSearchLocalRequest.allowPartialSearchResults();
}
}
@Override
public Scroll scroll() {
return shardSearchLocalRequest.scroll();
}
@Override
public String[] indexRoutings() {
return shardSearchLocalRequest.indexRoutings();
}
@Override
public String preference() {
return shardSearchLocalRequest.preference();
}
@Override
public void readFrom(StreamInput in) throws IOException {
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");

View File

@ -23,6 +23,10 @@ import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
@ -30,6 +34,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -39,9 +44,13 @@ import org.elasticsearch.index.fielddata.IndexNumericFieldData;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.internal.ShardSearchRequest;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
/**
* A slice builder allowing to split a scroll in multiple partitions.
@ -203,12 +212,49 @@ public class SliceBuilder implements Writeable, ToXContentObject {
return Objects.hash(this.field, this.id, this.max);
}
public Query toFilter(QueryShardContext context, int shardId, int numShards) {
/**
* Converts this QueryBuilder to a lucene {@link Query}.
*
* @param context Additional information needed to build the query
*/
public Query toFilter(ClusterService clusterService, ShardSearchRequest request, QueryShardContext context, Version minNodeVersion) {
final MappedFieldType type = context.fieldMapper(field);
if (type == null) {
throw new IllegalArgumentException("field " + field + " not found");
}
int shardId = request.shardId().id();
int numShards = context.getIndexSettings().getNumberOfShards();
if (minNodeVersion.onOrAfter(Version.V_6_4_0) &&
(request.preference() != null || request.indexRoutings().length > 0)) {
GroupShardsIterator<ShardIterator> group = buildShardIterator(clusterService, request);
assert group.size() <= numShards : "index routing shards: " + group.size() +
" cannot be greater than total number of shards: " + numShards;
if (group.size() < numShards) {
/**
* The routing of this request targets a subset of the shards of this index so we need to we retrieve
* the original {@link GroupShardsIterator} and compute the request shard id and number of
* shards from it.
* This behavior has been added in {@link Version#V_6_4_0} so if there is another node in the cluster
* with an older version we use the original shard id and number of shards in order to ensure that all
* slices use the same numbers.
*/
numShards = group.size();
int ord = 0;
shardId = -1;
// remap the original shard id with its index (position) in the sorted shard iterator.
for (ShardIterator it : group) {
assert it.shardId().getIndex().equals(request.shardId().getIndex());
if (request.shardId().equals(it.shardId())) {
shardId = ord;
break;
}
++ord;
}
assert shardId != -1 : "shard id: " + request.shardId().getId() + " not found in index shard routing";
}
}
String field = this.field;
boolean useTermQuery = false;
if ("_uid".equals(field)) {
@ -273,6 +319,17 @@ public class SliceBuilder implements Writeable, ToXContentObject {
return new MatchAllDocsQuery();
}
/**
* Returns the {@link GroupShardsIterator} for the provided <code>request</code>.
*/
private GroupShardsIterator<ShardIterator> buildShardIterator(ClusterService clusterService, ShardSearchRequest request) {
final ClusterState state = clusterService.state();
String[] indices = new String[] { request.shardId().getIndex().getName() };
Map<String, Set<String>> routingMap = request.indexRoutings().length > 0 ?
Collections.singletonMap(indices[0], Sets.newHashSet(request.indexRoutings())) : null;
return clusterService.operationRouting().searchShards(state, indices, routingMap, request.preference());
}
@Override
public String toString() {
return Strings.toString(this, true, true);

View File

@ -19,15 +19,20 @@
package org.elasticsearch.action.fieldcaps;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.common.util.ArrayUtils;
import org.elasticsearch.test.AbstractStreamableTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Consumer;
public class FieldCapabilitiesRequestTests extends ESTestCase {
private FieldCapabilitiesRequest randomRequest() {
public class FieldCapabilitiesRequestTests extends AbstractStreamableTestCase<FieldCapabilitiesRequest> {
@Override
protected FieldCapabilitiesRequest createTestInstance() {
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest();
int size = randomIntBetween(1, 20);
String[] randomFields = new String[size];
@ -48,49 +53,39 @@ public class FieldCapabilitiesRequestTests extends ESTestCase {
return request;
}
public void testEqualsAndHashcode() {
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest();
request.indices("foo");
request.indicesOptions(IndicesOptions.lenientExpandOpen());
request.fields("bar");
FieldCapabilitiesRequest other = new FieldCapabilitiesRequest();
other.indices("foo");
other.indicesOptions(IndicesOptions.lenientExpandOpen());
other.fields("bar");
assertEquals(request, request);
assertEquals(request, other);
assertEquals(request.hashCode(), other.hashCode());
// change indices
other.indices("foo", "bar");
assertNotEquals(request, other);
other.indices("foo");
assertEquals(request, other);
// change fields
other.fields("foo", "bar");
assertNotEquals(request, other);
other.fields("bar");
assertEquals(request, request);
// change indices options
other.indicesOptions(IndicesOptions.strictExpand());
assertNotEquals(request, other);
@Override
protected FieldCapabilitiesRequest createBlankInstance() {
return new FieldCapabilitiesRequest();
}
public void testFieldCapsRequestSerialization() throws IOException {
for (int i = 0; i < 20; i++) {
FieldCapabilitiesRequest request = randomRequest();
BytesStreamOutput output = new BytesStreamOutput();
request.writeTo(output);
output.flush();
StreamInput input = output.bytes().streamInput();
FieldCapabilitiesRequest deserialized = new FieldCapabilitiesRequest();
deserialized.readFrom(input);
assertEquals(deserialized, request);
assertEquals(deserialized.hashCode(), request.hashCode());
}
@Override
protected FieldCapabilitiesRequest mutateInstance(FieldCapabilitiesRequest instance) throws IOException {
List<Consumer<FieldCapabilitiesRequest>> mutators = new ArrayList<>();
mutators.add(request -> {
String[] fields = ArrayUtils.concat(request.fields(), new String[] {randomAlphaOfLength(10)});
request.fields(fields);
});
mutators.add(request -> {
String[] indices = ArrayUtils.concat(instance.indices(), generateRandomStringArray(5, 10, false, false));
request.indices(indices);
});
mutators.add(request -> {
IndicesOptions indicesOptions = randomValueOtherThan(request.indicesOptions(),
() -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
request.indicesOptions(indicesOptions);
});
mutators.add(request -> request.setMergeResults(!request.isMergeResults()));
FieldCapabilitiesRequest mutatedInstance = copyInstance(instance);
Consumer<FieldCapabilitiesRequest> mutator = randomFrom(mutators);
mutator.accept(mutatedInstance);
return mutatedInstance;
}
public void testValidation() {
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest()
.indices("index2");
ActionRequestValidationException exception = request.validate();
assertNotNull(exception);
}
}

View File

@ -19,42 +19,152 @@
package org.elasticsearch.action.fieldcaps;
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.AbstractStreamableXContentTestCase;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Predicate;
public class FieldCapabilitiesResponseTests extends ESTestCase {
private FieldCapabilitiesResponse randomResponse() {
Map<String, Map<String, FieldCapabilities> > fieldMap = new HashMap<> ();
int numFields = randomInt(10);
for (int i = 0; i < numFields; i++) {
String fieldName = randomAlphaOfLengthBetween(5, 10);
int numIndices = randomIntBetween(1, 5);
Map<String, FieldCapabilities> indexFieldMap = new HashMap<> ();
for (int j = 0; j < numIndices; j++) {
String index = randomAlphaOfLengthBetween(10, 20);
indexFieldMap.put(index, FieldCapabilitiesTests.randomFieldCaps());
}
fieldMap.put(fieldName, indexFieldMap);
}
return new FieldCapabilitiesResponse(fieldMap);
import static org.elasticsearch.test.XContentTestUtils.insertRandomFields;
public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTestCase<FieldCapabilitiesResponse> {
@Override
protected FieldCapabilitiesResponse doParseInstance(XContentParser parser) throws IOException {
return FieldCapabilitiesResponse.fromXContent(parser);
}
public void testSerialization() throws IOException {
for (int i = 0; i < 20; i++) {
FieldCapabilitiesResponse response = randomResponse();
BytesStreamOutput output = new BytesStreamOutput();
response.writeTo(output);
output.flush();
StreamInput input = output.bytes().streamInput();
FieldCapabilitiesResponse deserialized = new FieldCapabilitiesResponse();
deserialized.readFrom(input);
assertEquals(deserialized, response);
assertEquals(deserialized.hashCode(), response.hashCode());
@Override
protected FieldCapabilitiesResponse createBlankInstance() {
return new FieldCapabilitiesResponse();
}
@Override
protected FieldCapabilitiesResponse createTestInstance() {
Map<String, Map<String, FieldCapabilities>> responses = new HashMap<>();
String[] fields = generateRandomStringArray(5, 10, false, true);
assertNotNull(fields);
for (String field : fields) {
Map<String, FieldCapabilities> typesToCapabilities = new HashMap<>();
String[] types = generateRandomStringArray(5, 10, false, false);
assertNotNull(types);
for (String type : types) {
typesToCapabilities.put(type, FieldCapabilitiesTests.randomFieldCaps(field));
}
responses.put(field, typesToCapabilities);
}
return new FieldCapabilitiesResponse(responses);
}
@Override
protected FieldCapabilitiesResponse mutateInstance(FieldCapabilitiesResponse response) {
Map<String, Map<String, FieldCapabilities>> mutatedResponses = new HashMap<>(response.get());
int mutation = response.get().isEmpty() ? 0 : randomIntBetween(0, 2);
switch (mutation) {
case 0:
String toAdd = randomAlphaOfLength(10);
mutatedResponses.put(toAdd, Collections.singletonMap(
randomAlphaOfLength(10),
FieldCapabilitiesTests.randomFieldCaps(toAdd)));
break;
case 1:
String toRemove = randomFrom(mutatedResponses.keySet());
mutatedResponses.remove(toRemove);
break;
case 2:
String toReplace = randomFrom(mutatedResponses.keySet());
mutatedResponses.put(toReplace, Collections.singletonMap(
randomAlphaOfLength(10),
FieldCapabilitiesTests.randomFieldCaps(toReplace)));
break;
}
return new FieldCapabilitiesResponse(mutatedResponses);
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
// Disallow random fields from being inserted under the 'fields' key, as this
// map only contains field names, and also under 'fields.FIELD_NAME', as these
// maps only contain type names.
return field -> field.matches("fields(\\.\\w+)?");
}
public void testToXContent() throws IOException {
FieldCapabilitiesResponse response = createSimpleResponse();
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)
.startObject();
response.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject();
String generatedResponse = BytesReference.bytes(builder).utf8ToString();
assertEquals((
"{" +
" \"fields\": {" +
" \"rating\": { " +
" \"keyword\": {" +
" \"type\": \"keyword\"," +
" \"searchable\": false," +
" \"aggregatable\": true," +
" \"indices\": [\"index3\", \"index4\"]," +
" \"non_searchable_indices\": [\"index4\"] " +
" }," +
" \"long\": {" +
" \"type\": \"long\"," +
" \"searchable\": true," +
" \"aggregatable\": false," +
" \"indices\": [\"index1\", \"index2\"]," +
" \"non_aggregatable_indices\": [\"index1\"] " +
" }" +
" }," +
" \"title\": { " +
" \"text\": {" +
" \"type\": \"text\"," +
" \"searchable\": true," +
" \"aggregatable\": false" +
" }" +
" }" +
" }" +
"}").replaceAll("\\s+", ""), generatedResponse);
}
private static FieldCapabilitiesResponse createSimpleResponse() {
Map<String, FieldCapabilities> titleCapabilities = new HashMap<>();
titleCapabilities.put("text", new FieldCapabilities("title", "text", true, false));
Map<String, FieldCapabilities> ratingCapabilities = new HashMap<>();
ratingCapabilities.put("long", new FieldCapabilities("rating", "long",
true, false,
new String[]{"index1", "index2"},
null,
new String[]{"index1"}));
ratingCapabilities.put("keyword", new FieldCapabilities("rating", "keyword",
false, true,
new String[]{"index3", "index4"},
new String[]{"index4"},
null));
Map<String, Map<String, FieldCapabilities>> responses = new HashMap<>();
responses.put("title", titleCapabilities);
responses.put("rating", ratingCapabilities);
return new FieldCapabilitiesResponse(responses);
}
}

View File

@ -20,16 +20,26 @@
package org.elasticsearch.action.fieldcaps;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractSerializingTestCase;
import org.elasticsearch.test.AbstractWireSerializingTestCase;
import java.io.IOException;
import java.util.Arrays;
import static org.hamcrest.Matchers.equalTo;
public class FieldCapabilitiesTests extends AbstractWireSerializingTestCase<FieldCapabilities> {
public class FieldCapabilitiesTests extends AbstractSerializingTestCase<FieldCapabilities> {
private static final String FIELD_NAME = "field";
@Override
protected FieldCapabilities doParseInstance(XContentParser parser) throws IOException {
return FieldCapabilities.fromXContent(FIELD_NAME, parser);
}
@Override
protected FieldCapabilities createTestInstance() {
return randomFieldCaps();
return randomFieldCaps(FIELD_NAME);
}
@Override
@ -82,7 +92,7 @@ public class FieldCapabilitiesTests extends AbstractWireSerializingTestCase<Fiel
}
}
static FieldCapabilities randomFieldCaps() {
static FieldCapabilities randomFieldCaps(String fieldName) {
String[] indices = null;
if (randomBoolean()) {
indices = new String[randomIntBetween(1, 5)];
@ -104,7 +114,7 @@ public class FieldCapabilitiesTests extends AbstractWireSerializingTestCase<Fiel
nonAggregatableIndices[i] = randomAlphaOfLengthBetween(5, 20);
}
}
return new FieldCapabilities(randomAlphaOfLengthBetween(5, 20),
return new FieldCapabilities(fieldName,
randomAlphaOfLengthBetween(5, 20), randomBoolean(), randomBoolean(),
indices, nonSearchableIndices, nonAggregatableIndices);
}

View File

@ -23,6 +23,7 @@ import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.shard.ShardId;
@ -62,10 +63,15 @@ public class AbstractSearchAsyncActionTests extends ESTestCase {
final SearchRequest request = new SearchRequest();
request.allowPartialSearchResults(true);
request.preference("_shards:1,3");
return new AbstractSearchAsyncAction<SearchPhaseResult>("test", null, null, null,
Collections.singletonMap("foo", new AliasFilter(new MatchAllQueryBuilder())), Collections.singletonMap("foo", 2.0f), null,
request, null, new GroupShardsIterator<>(Collections.singletonList(
new SearchShardIterator(null, null, Collections.emptyList(), null))), timeProvider, 0, null,
Collections.singletonMap("foo", new AliasFilter(new MatchAllQueryBuilder())), Collections.singletonMap("foo", 2.0f),
Collections.singletonMap("name", Sets.newHashSet("bar", "baz")),null, request, null,
new GroupShardsIterator<>(
Collections.singletonList(
new SearchShardIterator(null, null, Collections.emptyList(), null)
)
), timeProvider, 0, null,
new InitialSearchPhase.ArraySearchPhaseResults<>(10), request.getMaxConcurrentShardRequests(),
SearchResponse.Clusters.EMPTY) {
@Override
@ -117,5 +123,8 @@ public class AbstractSearchAsyncActionTests extends ESTestCase {
assertArrayEquals(new String[] {"name", "name1"}, shardSearchTransportRequest.indices());
assertEquals(new MatchAllQueryBuilder(), shardSearchTransportRequest.getAliasFilter().getQueryBuilder());
assertEquals(2.0f, shardSearchTransportRequest.indexBoost(), 0.0f);
assertArrayEquals(new String[] {"name", "name1"}, shardSearchTransportRequest.indices());
assertArrayEquals(new String[] {"bar", "baz"}, shardSearchTransportRequest.indexRoutings());
assertEquals("_shards:1,3", shardSearchTransportRequest.preference());
}
}

View File

@ -78,12 +78,12 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
2, randomBoolean(), primaryNode, replicaNode);
final SearchRequest searchRequest = new SearchRequest();
searchRequest.allowPartialSearchResults(true);
CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase(logger,
searchTransportService,
(clusterAlias, node) -> lookup.get(node),
Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)),
Collections.emptyMap(), EsExecutors.newDirectExecutorService(),
Collections.emptyMap(), Collections.emptyMap(), EsExecutors.newDirectExecutorService(),
searchRequest, null, shardsIter, timeProvider, 0, null,
(iter) -> new SearchPhase("test") {
@Override
@ -159,12 +159,12 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
final SearchRequest searchRequest = new SearchRequest();
searchRequest.allowPartialSearchResults(true);
CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase(logger,
searchTransportService,
(clusterAlias, node) -> lookup.get(node),
Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)),
Collections.emptyMap(), EsExecutors.newDirectExecutorService(),
Collections.emptyMap(), Collections.emptyMap(), EsExecutors.newDirectExecutorService(),
searchRequest, null, shardsIter, timeProvider, 0, null,
(iter) -> new SearchPhase("test") {
@Override
@ -222,6 +222,7 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
(clusterAlias, node) -> lookup.get(node),
Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)),
Collections.emptyMap(),
Collections.emptyMap(),
EsExecutors.newDirectExecutorService(),
searchRequest,
null,

View File

@ -106,6 +106,7 @@ public class SearchAsyncActionTests extends ESTestCase {
return lookup.get(node); },
aliasFilters,
Collections.emptyMap(),
Collections.emptyMap(),
null,
request,
responseListener,
@ -198,6 +199,7 @@ public class SearchAsyncActionTests extends ESTestCase {
return lookup.get(node); },
aliasFilters,
Collections.emptyMap(),
Collections.emptyMap(),
null,
request,
responseListener,
@ -303,6 +305,7 @@ public class SearchAsyncActionTests extends ESTestCase {
return lookup.get(node); },
aliasFilters,
Collections.emptyMap(),
Collections.emptyMap(),
executor,
request,
responseListener,

View File

@ -83,7 +83,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
}
/**
* puts primary shard routings into initializing state
* puts primary shard indexRoutings into initializing state
*/
private void initPrimaries() {
logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1);

View File

@ -83,7 +83,7 @@ public class RoutingTableTests extends ESAllocationTestCase {
}
/**
* puts primary shard routings into initializing state
* puts primary shard indexRoutings into initializing state
*/
private void initPrimaries() {
logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1);

View File

@ -21,7 +21,6 @@ package org.elasticsearch.index;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
@ -131,16 +130,16 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
assertTrue(indexService.getRefreshTask().mustReschedule());
// now disable
IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).build();
indexService.updateMetaData(metaData);
client().admin().indices().prepareUpdateSettings("test")
.setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).get();
assertNotSame(refreshTask, indexService.getRefreshTask());
assertTrue(refreshTask.isClosed());
assertFalse(refreshTask.isScheduled());
assertFalse(indexService.getRefreshTask().mustReschedule());
// set it to 100ms
metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "100ms")).build();
indexService.updateMetaData(metaData);
client().admin().indices().prepareUpdateSettings("test")
.setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "100ms")).get();
assertNotSame(refreshTask, indexService.getRefreshTask());
assertTrue(refreshTask.isClosed());
@ -150,8 +149,8 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
assertEquals(100, refreshTask.getInterval().millis());
// set it to 200ms
metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).build();
indexService.updateMetaData(metaData);
client().admin().indices().prepareUpdateSettings("test")
.setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).get();
assertNotSame(refreshTask, indexService.getRefreshTask());
assertTrue(refreshTask.isClosed());
@ -161,8 +160,8 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
assertEquals(200, refreshTask.getInterval().millis());
// set it to 200ms again
metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).build();
indexService.updateMetaData(metaData);
client().admin().indices().prepareUpdateSettings("test")
.setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).get();
assertSame(refreshTask, indexService.getRefreshTask());
assertTrue(indexService.getRefreshTask().mustReschedule());
assertTrue(refreshTask.isScheduled());
@ -174,7 +173,9 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
}
public void testFsyncTaskIsRunning() throws IOException {
IndexService indexService = createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC).build());
Settings settings = Settings.builder()
.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC).build();
IndexService indexService = createIndex("test", settings);
IndexService.AsyncTranslogFSync fsyncTask = indexService.getFsyncTask();
assertNotNull(fsyncTask);
assertEquals(5000, fsyncTask.getInterval().millis());
@ -198,12 +199,10 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
IndexShard shard = indexService.getShard(0);
client().prepareIndex("test", "test", "0").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
// now disable the refresh
IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData())
.settings(Settings.builder().put(indexService.getMetaData().getSettings())
.put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).build();
client().admin().indices().prepareUpdateSettings("test")
.setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).get();
// when we update we reschedule the existing task AND fire off an async refresh to make sure we make everything visible
// before that this is why we need to wait for the refresh task to be unscheduled and the first doc to be visible
indexService.updateMetaData(metaData);
assertTrue(refreshTask.isClosed());
refreshTask = indexService.getRefreshTask();
assertBusy(() -> {
@ -217,10 +216,8 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
assertFalse(refreshTask.isClosed());
// refresh every millisecond
client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
metaData = IndexMetaData.builder(indexService.getMetaData())
.settings(Settings.builder().put(indexService.getMetaData().getSettings())
.put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms")).build();
indexService.updateMetaData(metaData);
client().admin().indices().prepareUpdateSettings("test")
.setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms")).get();
assertTrue(refreshTask.isClosed());
assertBusy(() -> {
// this one becomes visible due to the force refresh we are running on updateMetaData if the interval changes
@ -303,13 +300,11 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
assertTrue(indexService.getRefreshTask().mustReschedule());
client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
client().admin().indices().prepareFlush("test").get();
IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder()
.put(indexService.getMetaData().getSettings())
.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), -1)
.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), -1))
.build();
indexService.updateMetaData(metaData);
client().admin().indices().prepareUpdateSettings("test")
.setSettings(Settings.builder()
.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), -1)
.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), -1))
.get();
IndexShard shard = indexService.getShard(0);
assertBusy(() -> assertThat(shard.estimateTranslogOperationsFromMinSeq(0L), equalTo(0)));
}

View File

@ -122,6 +122,16 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase {
return null;
}
@Override
public String[] indexRoutings() {
return null;
}
@Override
public String preference() {
return null;
}
@Override
public void setProfile(boolean profile) {

View File

@ -59,7 +59,9 @@ import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Predicate;
import static java.util.Collections.emptyMap;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
@ -85,17 +87,13 @@ public class TermsSetQueryBuilderTests extends AbstractQueryTestCase<TermsSetQue
do {
fieldName = randomFrom(MAPPED_FIELD_NAMES);
} while (fieldName.equals(GEO_POINT_FIELD_NAME) || fieldName.equals(GEO_SHAPE_FIELD_NAME));
int numValues = randomIntBetween(0, 10);
List<Object> randomTerms = new ArrayList<>(numValues);
for (int i = 0; i < numValues; i++) {
randomTerms.add(getRandomValueForFieldName(fieldName));
}
List<?> randomTerms = randomValues(fieldName);
TermsSetQueryBuilder queryBuilder = new TermsSetQueryBuilder(STRING_FIELD_NAME, randomTerms);
if (randomBoolean()) {
queryBuilder.setMinimumShouldMatchField("m_s_m");
} else {
queryBuilder.setMinimumShouldMatchScript(
new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", Collections.emptyMap()));
new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", emptyMap()));
}
return queryBuilder;
}
@ -122,6 +120,41 @@ public class TermsSetQueryBuilderTests extends AbstractQueryTestCase<TermsSetQue
return false;
}
@Override
public TermsSetQueryBuilder mutateInstance(final TermsSetQueryBuilder instance) throws IOException {
String fieldName = instance.getFieldName();
List<?> values = instance.getValues();
String minimumShouldMatchField = null;
Script minimumShouldMatchScript = null;
switch (randomIntBetween(0, 3)) {
case 0:
Predicate<String> predicate = s -> s.equals(instance.getFieldName()) == false && s.equals(GEO_POINT_FIELD_NAME) == false
&& s.equals(GEO_SHAPE_FIELD_NAME) == false;
fieldName = randomValueOtherThanMany(predicate, () -> randomFrom(MAPPED_FIELD_NAMES));
values = randomValues(fieldName);
break;
case 1:
values = randomValues(fieldName);
break;
case 2:
minimumShouldMatchField = randomAlphaOfLengthBetween(1, 10);
break;
case 3:
minimumShouldMatchScript = new Script(ScriptType.INLINE, MockScriptEngine.NAME, randomAlphaOfLength(10), emptyMap());
break;
}
TermsSetQueryBuilder newInstance = new TermsSetQueryBuilder(fieldName, values);
if (minimumShouldMatchField != null) {
newInstance.setMinimumShouldMatchField(minimumShouldMatchField);
}
if (minimumShouldMatchScript != null) {
newInstance.setMinimumShouldMatchScript(minimumShouldMatchScript);
}
return newInstance;
}
public void testBothFieldAndScriptSpecified() {
TermsSetQueryBuilder queryBuilder = new TermsSetQueryBuilder("_field", Collections.emptyList());
queryBuilder.setMinimumShouldMatchScript(new Script(""));
@ -215,7 +248,7 @@ public class TermsSetQueryBuilderTests extends AbstractQueryTestCase<TermsSetQue
try (IndexReader ir = DirectoryReader.open(directory)) {
QueryShardContext context = createShardContext();
Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", Collections.emptyMap());
Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", emptyMap());
Query query = new TermsSetQueryBuilder("message", Arrays.asList("a", "b", "c", "d"))
.setMinimumShouldMatchScript(script).doToQuery(context);
IndexSearcher searcher = new IndexSearcher(ir);
@ -228,6 +261,16 @@ public class TermsSetQueryBuilderTests extends AbstractQueryTestCase<TermsSetQue
}
}
private static List<?> randomValues(final String fieldName) {
final int numValues = randomIntBetween(0, 10);
final List<Object> values = new ArrayList<>(numValues);
for (int i = 0; i < numValues; i++) {
values.add(getRandomValueForFieldName(fieldName));
}
return values;
}
public static class CustomScriptPlugin extends MockScriptPlugin {
@Override

View File

@ -170,7 +170,7 @@ public class AliasRoutingIT extends ESIntegTestCase {
assertThat(client().prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
}
logger.info("--> search with 0,1 routings , should find two");
logger.info("--> search with 0,1 indexRoutings , should find two");
for (int i = 0; i < 5; i++) {
assertThat(client().prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
assertThat(client().prepareSearch().setSize(0).setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));

View File

@ -173,13 +173,13 @@ public class SimpleRoutingIT extends ESIntegTestCase {
assertThat(client().prepareSearch().setSize(0).setRouting(secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
}
logger.info("--> search with {},{} routings , should find two", routingValue, "1");
logger.info("--> search with {},{} indexRoutings , should find two", routingValue, "1");
for (int i = 0; i < 5; i++) {
assertThat(client().prepareSearch().setRouting(routingValue, secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
assertThat(client().prepareSearch().setSize(0).setRouting(routingValue, secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
}
logger.info("--> search with {},{},{} routings , should find two", routingValue, secondRoutingValue, routingValue);
logger.info("--> search with {},{},{} indexRoutings , should find two", routingValue, secondRoutingValue, routingValue);
for (int i = 0; i < 5; i++) {
assertThat(client().prepareSearch().setRouting(routingValue, secondRoutingValue, routingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
assertThat(client().prepareSearch().setSize(0).setRouting(routingValue, secondRoutingValue,routingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));

View File

@ -112,8 +112,8 @@ public class DefaultSearchContextTests extends ESTestCase {
IndexReader reader = w.getReader();
Engine.Searcher searcher = new Engine.Searcher("test", new IndexSearcher(reader))) {
DefaultSearchContext context1 = new DefaultSearchContext(1L, shardSearchRequest, null, searcher, indexService,
indexShard, bigArrays, null, timeout, null, null);
DefaultSearchContext context1 = new DefaultSearchContext(1L, shardSearchRequest, null, searcher, null, indexService,
indexShard, bigArrays, null, timeout, null, null, Version.CURRENT);
context1.from(300);
// resultWindow greater than maxResultWindow and scrollContext is null
@ -153,8 +153,8 @@ public class DefaultSearchContextTests extends ESTestCase {
+ "] index level setting."));
// rescore is null but sliceBuilder is not null
DefaultSearchContext context2 = new DefaultSearchContext(2L, shardSearchRequest, null, searcher, indexService,
indexShard, bigArrays, null, timeout, null, null);
DefaultSearchContext context2 = new DefaultSearchContext(2L, shardSearchRequest, null, searcher,
null, indexService, indexShard, bigArrays, null, timeout, null, null, Version.CURRENT);
SliceBuilder sliceBuilder = mock(SliceBuilder.class);
int numSlices = maxSlicesPerScroll + randomIntBetween(1, 100);
@ -170,8 +170,8 @@ public class DefaultSearchContextTests extends ESTestCase {
when(shardSearchRequest.getAliasFilter()).thenReturn(AliasFilter.EMPTY);
when(shardSearchRequest.indexBoost()).thenReturn(AbstractQueryBuilder.DEFAULT_BOOST);
DefaultSearchContext context3 = new DefaultSearchContext(3L, shardSearchRequest, null, searcher, indexService,
indexShard, bigArrays, null, timeout, null, null);
DefaultSearchContext context3 = new DefaultSearchContext(3L, shardSearchRequest, null, searcher, null,
indexService, indexShard, bigArrays, null, timeout, null, null, Version.CURRENT);
ParsedQuery parsedQuery = ParsedQuery.parsedMatchAllQuery();
context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(false);
assertEquals(context3.query(), context3.buildFilteredQuery(parsedQuery.query()));

View File

@ -213,7 +213,7 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
SearchPhaseResult searchPhaseResult = service.executeQueryPhase(
new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f,
true),
true, null, null),
new SearchTask(123L, "", "", "", null, Collections.emptyMap()));
IntArrayList intCursors = new IntArrayList(1);
intCursors.add(0);
@ -249,7 +249,7 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
new String[0],
false,
new AliasFilter(null, Strings.EMPTY_ARRAY),
1.0f, true)
1.0f, true, null, null)
);
try {
// the search context should inherit the default timeout
@ -269,7 +269,7 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
new String[0],
false,
new AliasFilter(null, Strings.EMPTY_ARRAY),
1.0f, true)
1.0f, true, null, null)
);
try {
// the search context should inherit the query timeout
@ -297,12 +297,13 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
searchSourceBuilder.docValueField("field" + i);
}
try (SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true))) {
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true, null, null))) {
assertNotNull(context);
searchSourceBuilder.docValueField("one_field_too_much");
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
() -> service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true)));
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f,
true, null, null)));
assertEquals(
"Trying to retrieve too many docvalue_fields. Must be less than or equal to: [100] but was [101]. "
+ "This limit can be set by changing the [index.max_docvalue_fields_search] index level setting.",
@ -328,13 +329,14 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap()));
}
try (SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true))) {
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true, null, null))) {
assertNotNull(context);
searchSourceBuilder.scriptField("anotherScriptField",
new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap()));
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
() -> service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true)));
searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY),
1.0f, true, null, null)));
assertEquals(
"Trying to retrieve too many script_fields. Must be less than or equal to: [" + maxScriptFields + "] but was ["
+ (maxScriptFields + 1)
@ -406,28 +408,28 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
final IndexShard indexShard = indexService.getShard(0);
final boolean allowPartialSearchResults = true;
assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH, null,
Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults)));
Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null)));
assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH,
new SearchSourceBuilder(), Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f,
allowPartialSearchResults)));
new SearchSourceBuilder(), Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f,
allowPartialSearchResults, null, null)));
assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH,
new SearchSourceBuilder().query(new MatchAllQueryBuilder()), Strings.EMPTY_ARRAY, false,
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults)));
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null)));
assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH,
new SearchSourceBuilder().query(new MatchNoneQueryBuilder())
.aggregation(new TermsAggregationBuilder("test", ValueType.STRING).minDocCount(0)), Strings.EMPTY_ARRAY, false,
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults)));
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null)));
assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH,
new SearchSourceBuilder().query(new MatchNoneQueryBuilder())
.aggregation(new GlobalAggregationBuilder("test")), Strings.EMPTY_ARRAY, false,
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults)));
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null)));
assertFalse(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH,
new SearchSourceBuilder().query(new MatchNoneQueryBuilder()), Strings.EMPTY_ARRAY, false,
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults)));
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null)));
}

View File

@ -74,6 +74,8 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase {
assertEquals(deserializedRequest.searchType(), shardSearchTransportRequest.searchType());
assertEquals(deserializedRequest.shardId(), shardSearchTransportRequest.shardId());
assertEquals(deserializedRequest.numberOfShards(), shardSearchTransportRequest.numberOfShards());
assertEquals(deserializedRequest.indexRoutings(), shardSearchTransportRequest.indexRoutings());
assertEquals(deserializedRequest.preference(), shardSearchTransportRequest.preference());
assertEquals(deserializedRequest.cacheKey(), shardSearchTransportRequest.cacheKey());
assertNotSame(deserializedRequest, shardSearchTransportRequest);
assertEquals(deserializedRequest.getAliasFilter(), shardSearchTransportRequest.getAliasFilter());
@ -92,8 +94,10 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase {
} else {
filteringAliases = new AliasFilter(null, Strings.EMPTY_ARRAY);
}
final String[] routings = generateRandomStringArray(5, 10, false, true);
return new ShardSearchTransportRequest(new OriginalIndices(searchRequest), searchRequest, shardId,
randomIntBetween(1, 100), filteringAliases, randomBoolean() ? 1.0f : randomFloat(), Math.abs(randomLong()), null);
randomIntBetween(1, 100), filteringAliases, randomBoolean() ? 1.0f : randomFloat(),
Math.abs(randomLong()), null, routings);
}
public void testFilteringAliases() throws Exception {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.search.slice;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchRequestBuilder;
@ -48,9 +49,7 @@ import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.startsWith;
public class SearchSliceIT extends ESIntegTestCase {
private static final int NUM_DOCS = 1000;
private int setupIndex(boolean withDocs) throws IOException, ExecutionException, InterruptedException {
private void setupIndex(int numDocs, int numberOfShards) throws IOException, ExecutionException, InterruptedException {
String mapping = Strings.toString(XContentFactory.jsonBuilder().
startObject()
.startObject("type")
@ -70,74 +69,112 @@ public class SearchSliceIT extends ESIntegTestCase {
.endObject()
.endObject()
.endObject());
int numberOfShards = randomIntBetween(1, 7);
assertAcked(client().admin().indices().prepareCreate("test")
.setSettings(Settings.builder().put("number_of_shards", numberOfShards).put("index.max_slices_per_scroll", 10000))
.addMapping("type", mapping, XContentType.JSON));
ensureGreen();
if (withDocs == false) {
return numberOfShards;
}
List<IndexRequestBuilder> requests = new ArrayList<>();
for (int i = 0; i < NUM_DOCS; i++) {
XContentBuilder builder = jsonBuilder();
builder.startObject();
builder.field("invalid_random_kw", randomAlphaOfLengthBetween(5, 20));
builder.field("random_int", randomInt());
builder.field("static_int", 0);
builder.field("invalid_random_int", randomInt());
builder.endObject();
for (int i = 0; i < numDocs; i++) {
XContentBuilder builder = jsonBuilder()
.startObject()
.field("invalid_random_kw", randomAlphaOfLengthBetween(5, 20))
.field("random_int", randomInt())
.field("static_int", 0)
.field("invalid_random_int", randomInt())
.endObject();
requests.add(client().prepareIndex("test", "type").setSource(builder));
}
indexRandom(true, requests);
return numberOfShards;
}
public void testDocIdSort() throws Exception {
int numShards = setupIndex(true);
SearchResponse sr = client().prepareSearch("test")
.setQuery(matchAllQuery())
.setSize(0)
.get();
int numDocs = (int) sr.getHits().getTotalHits();
assertThat(numDocs, equalTo(NUM_DOCS));
int max = randomIntBetween(2, numShards*3);
public void testSearchSort() throws Exception {
int numShards = randomIntBetween(1, 7);
int numDocs = randomIntBetween(100, 1000);
setupIndex(numDocs, numShards);
int max = randomIntBetween(2, numShards * 3);
for (String field : new String[]{"_id", "random_int", "static_int"}) {
int fetchSize = randomIntBetween(10, 100);
// test _doc sort
SearchRequestBuilder request = client().prepareSearch("test")
.setQuery(matchAllQuery())
.setScroll(new Scroll(TimeValue.timeValueSeconds(10)))
.setSize(fetchSize)
.addSort(SortBuilders.fieldSort("_doc"));
assertSearchSlicesWithScroll(request, field, max);
}
}
assertSearchSlicesWithScroll(request, field, max, numDocs);
public void testNumericSort() throws Exception {
int numShards = setupIndex(true);
SearchResponse sr = client().prepareSearch("test")
.setQuery(matchAllQuery())
.setSize(0)
.get();
int numDocs = (int) sr.getHits().getTotalHits();
assertThat(numDocs, equalTo(NUM_DOCS));
int max = randomIntBetween(2, numShards*3);
for (String field : new String[]{"_id", "random_int", "static_int"}) {
int fetchSize = randomIntBetween(10, 100);
SearchRequestBuilder request = client().prepareSearch("test")
// test numeric sort
request = client().prepareSearch("test")
.setQuery(matchAllQuery())
.setScroll(new Scroll(TimeValue.timeValueSeconds(10)))
.addSort(SortBuilders.fieldSort("random_int"))
.setSize(fetchSize);
assertSearchSlicesWithScroll(request, field, max);
assertSearchSlicesWithScroll(request, field, max, numDocs);
}
}
public void testWithPreferenceAndRoutings() throws Exception {
int numShards = 10;
int totalDocs = randomIntBetween(100, 1000);
setupIndex(totalDocs, numShards);
{
SearchResponse sr = client().prepareSearch("test")
.setQuery(matchAllQuery())
.setPreference("_shards:1,4")
.setSize(0)
.get();
int numDocs = (int) sr.getHits().getTotalHits();
int max = randomIntBetween(2, numShards * 3);
int fetchSize = randomIntBetween(10, 100);
SearchRequestBuilder request = client().prepareSearch("test")
.setQuery(matchAllQuery())
.setScroll(new Scroll(TimeValue.timeValueSeconds(10)))
.setSize(fetchSize)
.setPreference("_shards:1,4")
.addSort(SortBuilders.fieldSort("_doc"));
assertSearchSlicesWithScroll(request, "_id", max, numDocs);
}
{
SearchResponse sr = client().prepareSearch("test")
.setQuery(matchAllQuery())
.setRouting("foo", "bar")
.setSize(0)
.get();
int numDocs = (int) sr.getHits().getTotalHits();
int max = randomIntBetween(2, numShards * 3);
int fetchSize = randomIntBetween(10, 100);
SearchRequestBuilder request = client().prepareSearch("test")
.setQuery(matchAllQuery())
.setScroll(new Scroll(TimeValue.timeValueSeconds(10)))
.setSize(fetchSize)
.setRouting("foo", "bar")
.addSort(SortBuilders.fieldSort("_doc"));
assertSearchSlicesWithScroll(request, "_id", max, numDocs);
}
{
assertAcked(client().admin().indices().prepareAliases()
.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test").alias("alias1").routing("foo"))
.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test").alias("alias2").routing("bar"))
.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test").alias("alias3").routing("baz"))
.get());
SearchResponse sr = client().prepareSearch("alias1", "alias3")
.setQuery(matchAllQuery())
.setSize(0)
.get();
int numDocs = (int) sr.getHits().getTotalHits();
int max = randomIntBetween(2, numShards * 3);
int fetchSize = randomIntBetween(10, 100);
SearchRequestBuilder request = client().prepareSearch("alias1", "alias3")
.setQuery(matchAllQuery())
.setScroll(new Scroll(TimeValue.timeValueSeconds(10)))
.setSize(fetchSize)
.addSort(SortBuilders.fieldSort("_doc"));
assertSearchSlicesWithScroll(request, "_id", max, numDocs);
}
}
public void testInvalidFields() throws Exception {
setupIndex(false);
setupIndex(0, 1);
SearchPhaseExecutionException exc = expectThrows(SearchPhaseExecutionException.class,
() -> client().prepareSearch("test")
.setQuery(matchAllQuery())
@ -161,7 +198,7 @@ public class SearchSliceIT extends ESIntegTestCase {
}
public void testInvalidQuery() throws Exception {
setupIndex(false);
setupIndex(0, 1);
SearchPhaseExecutionException exc = expectThrows(SearchPhaseExecutionException.class,
() -> client().prepareSearch()
.setQuery(matchAllQuery())
@ -173,7 +210,7 @@ public class SearchSliceIT extends ESIntegTestCase {
equalTo("`slice` cannot be used outside of a scroll context"));
}
private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String field, int numSlice) {
private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String field, int numSlice, int numDocs) {
int totalResults = 0;
List<String> keys = new ArrayList<>();
for (int id = 0; id < numSlice; id++) {
@ -184,7 +221,7 @@ public class SearchSliceIT extends ESIntegTestCase {
int numSliceResults = searchResponse.getHits().getHits().length;
String scrollId = searchResponse.getScrollId();
for (SearchHit hit : searchResponse.getHits().getHits()) {
keys.add(hit.getId());
assertTrue(keys.add(hit.getId()));
}
while (searchResponse.getHits().getHits().length > 0) {
searchResponse = client().prepareSearchScroll("test")
@ -195,15 +232,15 @@ public class SearchSliceIT extends ESIntegTestCase {
totalResults += searchResponse.getHits().getHits().length;
numSliceResults += searchResponse.getHits().getHits().length;
for (SearchHit hit : searchResponse.getHits().getHits()) {
keys.add(hit.getId());
assertTrue(keys.add(hit.getId()));
}
}
assertThat(numSliceResults, equalTo(expectedSliceResults));
clearScroll(scrollId);
}
assertThat(totalResults, equalTo(NUM_DOCS));
assertThat(keys.size(), equalTo(NUM_DOCS));
assertThat(new HashSet(keys).size(), equalTo(NUM_DOCS));
assertThat(totalResults, equalTo(numDocs));
assertThat(keys.size(), equalTo(numDocs));
assertThat(new HashSet(keys).size(), equalTo(numDocs));
}
private Throwable findRootCause(Exception e) {

View File

@ -30,19 +30,38 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.elasticsearch.Version;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.search.SearchShardIterator;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.Rewriteable;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
@ -58,13 +77,138 @@ import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashC
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class SliceBuilderTests extends ESTestCase {
private static final int MAX_SLICE = 20;
private static SliceBuilder randomSliceBuilder() throws IOException {
static class ShardSearchRequestTest implements IndicesRequest, ShardSearchRequest {
private final String[] indices;
private final int shardId;
private final String[] indexRoutings;
private final String preference;
ShardSearchRequestTest(String index, int shardId, String[] indexRoutings, String preference) {
this.indices = new String[] { index };
this.shardId = shardId;
this.indexRoutings = indexRoutings;
this.preference = preference;
}
@Override
public String[] indices() {
return indices;
}
@Override
public IndicesOptions indicesOptions() {
return null;
}
@Override
public ShardId shardId() {
return new ShardId(new Index(indices[0], indices[0]), shardId);
}
@Override
public String[] types() {
return new String[0];
}
@Override
public SearchSourceBuilder source() {
return null;
}
@Override
public AliasFilter getAliasFilter() {
return null;
}
@Override
public void setAliasFilter(AliasFilter filter) {
}
@Override
public void source(SearchSourceBuilder source) {
}
@Override
public int numberOfShards() {
return 0;
}
@Override
public SearchType searchType() {
return null;
}
@Override
public float indexBoost() {
return 0;
}
@Override
public long nowInMillis() {
return 0;
}
@Override
public Boolean requestCache() {
return null;
}
@Override
public Boolean allowPartialSearchResults() {
return null;
}
@Override
public Scroll scroll() {
return null;
}
@Override
public String[] indexRoutings() {
return indexRoutings;
}
@Override
public String preference() {
return preference;
}
@Override
public void setProfile(boolean profile) {
}
@Override
public boolean isProfile() {
return false;
}
@Override
public BytesReference cacheKey() throws IOException {
return null;
}
@Override
public String getClusterAlias() {
return null;
}
@Override
public Rewriteable<Rewriteable> getRewriteable() {
return null;
}
}
private static SliceBuilder randomSliceBuilder() {
int max = randomIntBetween(2, MAX_SLICE);
int id = randomIntBetween(1, max - 1);
String field = randomAlphaOfLengthBetween(5, 20);
@ -75,7 +219,7 @@ public class SliceBuilderTests extends ESTestCase {
return copyWriteable(original, new NamedWriteableRegistry(Collections.emptyList()), SliceBuilder::new);
}
private static SliceBuilder mutate(SliceBuilder original) throws IOException {
private static SliceBuilder mutate(SliceBuilder original) {
switch (randomIntBetween(0, 2)) {
case 0: return new SliceBuilder(original.getField() + "_xyz", original.getId(), original.getMax());
case 1: return new SliceBuilder(original.getField(), original.getId() - 1, original.getMax());
@ -84,6 +228,63 @@ public class SliceBuilderTests extends ESTestCase {
}
}
private IndexSettings createIndexSettings(Version indexVersionCreated, int numShards) {
Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, indexVersionCreated)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.build();
IndexMetaData indexState = IndexMetaData.builder("index").settings(settings).build();
return new IndexSettings(indexState, Settings.EMPTY);
}
private ShardSearchRequest createRequest(int shardId) {
return createRequest(shardId, Strings.EMPTY_ARRAY, null);
}
private ShardSearchRequest createRequest(int shardId, String[] routings, String preference) {
return new ShardSearchRequestTest("index", shardId, routings, preference);
}
private QueryShardContext createShardContext(Version indexVersionCreated, IndexReader reader,
String fieldName, DocValuesType dvType, int numShards, int shardId) {
MappedFieldType fieldType = new MappedFieldType() {
@Override
public MappedFieldType clone() {
return null;
}
@Override
public String typeName() {
return null;
}
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
return null;
}
public Query existsQuery(QueryShardContext context) {
return null;
}
};
fieldType.setName(fieldName);
QueryShardContext context = mock(QueryShardContext.class);
when(context.fieldMapper(fieldName)).thenReturn(fieldType);
when(context.getIndexReader()).thenReturn(reader);
when(context.getShardId()).thenReturn(shardId);
IndexSettings indexSettings = createIndexSettings(indexVersionCreated, numShards);
when(context.getIndexSettings()).thenReturn(indexSettings);
if (dvType != null) {
fieldType.setHasDocValues(true);
fieldType.setDocValuesType(dvType);
IndexNumericFieldData fd = mock(IndexNumericFieldData.class);
when(context.getForField(fieldType)).thenReturn(fd);
}
return context;
}
public void testSerialization() throws Exception {
SliceBuilder original = randomSliceBuilder();
SliceBuilder deserialized = serializedCopy(original);
@ -131,92 +332,41 @@ public class SliceBuilderTests extends ESTestCase {
assertEquals("max must be greater than id", e.getMessage());
}
public void testToFilter() throws IOException {
public void testToFilterSimple() throws IOException {
Directory dir = new RAMDirectory();
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
writer.commit();
}
QueryShardContext context = mock(QueryShardContext.class);
try (IndexReader reader = DirectoryReader.open(dir)) {
MappedFieldType fieldType = new MappedFieldType() {
@Override
public MappedFieldType clone() {
return null;
}
@Override
public String typeName() {
return null;
}
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
return null;
}
public Query existsQuery(QueryShardContext context) {
return null;
}
};
fieldType.setName(IdFieldMapper.NAME);
fieldType.setHasDocValues(false);
when(context.fieldMapper(IdFieldMapper.NAME)).thenReturn(fieldType);
when(context.getIndexReader()).thenReturn(reader);
Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.build();
IndexMetaData indexState = IndexMetaData.builder("index").settings(settings).build();
IndexSettings indexSettings = new IndexSettings(indexState, Settings.EMPTY);
when(context.getIndexSettings()).thenReturn(indexSettings);
QueryShardContext context =
createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED_NUMERIC, 1,0);
SliceBuilder builder = new SliceBuilder(5, 10);
Query query = builder.toFilter(context, 0, 1);
Query query = builder.toFilter(null, createRequest(0), context, Version.CURRENT);
assertThat(query, instanceOf(TermsSliceQuery.class));
assertThat(builder.toFilter(context, 0, 1), equalTo(query));
assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query));
try (IndexReader newReader = DirectoryReader.open(dir)) {
when(context.getIndexReader()).thenReturn(newReader);
assertThat(builder.toFilter(context, 0, 1), equalTo(query));
assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query));
}
}
}
public void testToFilterRandom() throws IOException {
Directory dir = new RAMDirectory();
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
writer.commit();
}
try (IndexReader reader = DirectoryReader.open(dir)) {
MappedFieldType fieldType = new MappedFieldType() {
@Override
public MappedFieldType clone() {
return null;
}
@Override
public String typeName() {
return null;
}
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
return null;
}
public Query existsQuery(QueryShardContext context) {
return null;
}
};
fieldType.setName("field_doc_values");
fieldType.setHasDocValues(true);
fieldType.setDocValuesType(DocValuesType.SORTED_NUMERIC);
when(context.fieldMapper("field_doc_values")).thenReturn(fieldType);
when(context.getIndexReader()).thenReturn(reader);
IndexNumericFieldData fd = mock(IndexNumericFieldData.class);
when(context.getForField(fieldType)).thenReturn(fd);
SliceBuilder builder = new SliceBuilder("field_doc_values", 5, 10);
Query query = builder.toFilter(context, 0, 1);
QueryShardContext context =
createShardContext(Version.CURRENT, reader, "field", DocValuesType.SORTED_NUMERIC, 1,0);
SliceBuilder builder = new SliceBuilder("field", 5, 10);
Query query = builder.toFilter(null, createRequest(0), context, Version.CURRENT);
assertThat(query, instanceOf(DocValuesSliceQuery.class));
assertThat(builder.toFilter(context, 0, 1), equalTo(query));
assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query));
try (IndexReader newReader = DirectoryReader.open(dir)) {
when(context.getIndexReader()).thenReturn(newReader);
assertThat(builder.toFilter(context, 0, 1), equalTo(query));
assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query));
}
// numSlices > numShards
@ -226,7 +376,8 @@ public class SliceBuilderTests extends ESTestCase {
for (int i = 0; i < numSlices; i++) {
for (int j = 0; j < numShards; j++) {
SliceBuilder slice = new SliceBuilder("_id", i, numSlices);
Query q = slice.toFilter(context, j, numShards);
context = createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED, numShards, j);
Query q = slice.toFilter(null, createRequest(j), context, Version.CURRENT);
if (q instanceof TermsSliceQuery || q instanceof MatchAllDocsQuery) {
AtomicInteger count = numSliceMap.get(j);
if (count == null) {
@ -250,12 +401,13 @@ public class SliceBuilderTests extends ESTestCase {
// numShards > numSlices
numShards = randomIntBetween(4, 100);
numSlices = randomIntBetween(2, numShards-1);
numSlices = randomIntBetween(2, numShards - 1);
List<Integer> targetShards = new ArrayList<>();
for (int i = 0; i < numSlices; i++) {
for (int j = 0; j < numShards; j++) {
SliceBuilder slice = new SliceBuilder("_id", i, numSlices);
Query q = slice.toFilter(context, j, numShards);
context = createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED, numShards, j);
Query q = slice.toFilter(null, createRequest(j), context, Version.CURRENT);
if (q instanceof MatchNoDocsQuery == false) {
assertThat(q, instanceOf(MatchAllDocsQuery.class));
targetShards.add(j);
@ -271,7 +423,8 @@ public class SliceBuilderTests extends ESTestCase {
for (int i = 0; i < numSlices; i++) {
for (int j = 0; j < numShards; j++) {
SliceBuilder slice = new SliceBuilder("_id", i, numSlices);
Query q = slice.toFilter(context, j, numShards);
context = createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED, numShards, j);
Query q = slice.toFilter(null, createRequest(j), context, Version.CURRENT);
if (i == j) {
assertThat(q, instanceOf(MatchAllDocsQuery.class));
} else {
@ -280,85 +433,35 @@ public class SliceBuilderTests extends ESTestCase {
}
}
}
}
public void testInvalidField() throws IOException {
Directory dir = new RAMDirectory();
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
writer.commit();
}
try (IndexReader reader = DirectoryReader.open(dir)) {
MappedFieldType fieldType = new MappedFieldType() {
@Override
public MappedFieldType clone() {
return null;
}
@Override
public String typeName() {
return null;
}
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
return null;
}
public Query existsQuery(QueryShardContext context) {
return null;
}
};
fieldType.setName("field_without_doc_values");
when(context.fieldMapper("field_without_doc_values")).thenReturn(fieldType);
when(context.getIndexReader()).thenReturn(reader);
SliceBuilder builder = new SliceBuilder("field_without_doc_values", 5, 10);
IllegalArgumentException exc =
expectThrows(IllegalArgumentException.class, () -> builder.toFilter(context, 0, 1));
QueryShardContext context = createShardContext(Version.CURRENT, reader, "field", null, 1,0);
SliceBuilder builder = new SliceBuilder("field", 5, 10);
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class,
() -> builder.toFilter(null, createRequest(0), context, Version.CURRENT));
assertThat(exc.getMessage(), containsString("cannot load numeric doc values"));
}
}
public void testToFilterDeprecationMessage() throws IOException {
Directory dir = new RAMDirectory();
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
writer.commit();
}
QueryShardContext context = mock(QueryShardContext.class);
try (IndexReader reader = DirectoryReader.open(dir)) {
MappedFieldType fieldType = new MappedFieldType() {
@Override
public MappedFieldType clone() {
return null;
}
@Override
public String typeName() {
return null;
}
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
return null;
}
public Query existsQuery(QueryShardContext context) {
return null;
}
};
fieldType.setName("_uid");
fieldType.setHasDocValues(false);
when(context.fieldMapper("_uid")).thenReturn(fieldType);
when(context.getIndexReader()).thenReturn(reader);
Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.build();
IndexMetaData indexState = IndexMetaData.builder("index").settings(settings).build();
IndexSettings indexSettings = new IndexSettings(indexState, Settings.EMPTY);
when(context.getIndexSettings()).thenReturn(indexSettings);
QueryShardContext context = createShardContext(Version.V_6_3_0, reader, "_uid", null, 1,0);
SliceBuilder builder = new SliceBuilder("_uid", 5, 10);
Query query = builder.toFilter(context, 0, 1);
Query query = builder.toFilter(null, createRequest(0), context, Version.CURRENT);
assertThat(query, instanceOf(TermsSliceQuery.class));
assertThat(builder.toFilter(context, 0, 1), equalTo(query));
assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query));
assertWarnings("Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead");
}
}
public void testSerializationBackcompat() throws IOException {
@ -375,4 +478,35 @@ public class SliceBuilderTests extends ESTestCase {
SliceBuilder::new, Version.V_6_3_0);
assertEquals(sliceBuilder, copy63);
}
public void testToFilterWithRouting() throws IOException {
Directory dir = new RAMDirectory();
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
writer.commit();
}
ClusterService clusterService = mock(ClusterService.class);
ClusterState state = mock(ClusterState.class);
when(state.metaData()).thenReturn(MetaData.EMPTY_META_DATA);
when(clusterService.state()).thenReturn(state);
OperationRouting routing = mock(OperationRouting.class);
GroupShardsIterator<ShardIterator> it = new GroupShardsIterator<>(
Collections.singletonList(
new SearchShardIterator(null, new ShardId("index", "index", 1), null, null)
)
);
when(routing.searchShards(any(), any(), any(), any())).thenReturn(it);
when(clusterService.operationRouting()).thenReturn(routing);
when(clusterService.getSettings()).thenReturn(Settings.EMPTY);
try (IndexReader reader = DirectoryReader.open(dir)) {
QueryShardContext context = createShardContext(Version.CURRENT, reader, "field", DocValuesType.SORTED, 5, 0);
SliceBuilder builder = new SliceBuilder("field", 6, 10);
String[] routings = new String[] { "foo" };
Query query = builder.toFilter(clusterService, createRequest(1, routings, null), context, Version.CURRENT);
assertEquals(new DocValuesSliceQuery("field", 6, 10), query);
query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.CURRENT);
assertEquals(new DocValuesSliceQuery("field", 6, 10), query);
query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.V_6_2_0);
assertEquals(new DocValuesSliceQuery("field", 1, 2), query);
}
}
}

View File

@ -61,7 +61,6 @@ buildRestTests.expectedUnconvertedCandidates = [
'en/watcher/trigger/schedule/yearly.asciidoc',
'en/watcher/troubleshooting.asciidoc',
'en/rest-api/license/delete-license.asciidoc',
'en/rest-api/license/start-trial.asciidoc',
'en/rest-api/license/update-license.asciidoc',
'en/ml/api-quickref.asciidoc',
'en/rest-api/ml/delete-calendar-event.asciidoc',

View File

@ -40,7 +40,7 @@ The following example checks whether you are eligible to start a trial:
[source,js]
------------------------------------------------------------
POST _xpack/license/start_trial
GET _xpack/license/start_trial
------------------------------------------------------------
// CONSOLE
// TEST[skip:license testing issues]
@ -49,6 +49,27 @@ Example response:
[source,js]
------------------------------------------------------------
{
"trial_was_started": true
"eligible_to_start_trial": true
}
------------------------------------------------------------
// NOTCONSOLE
The following example starts a 30-day trial license. The acknowledge
parameter is required as you are initiating a license that will expire.
[source,js]
------------------------------------------------------------
POST _xpack/license/start_trial?acknowledge=true
------------------------------------------------------------
// CONSOLE
// TEST[skip:license testing issues]
Example response:
[source,js]
------------------------------------------------------------
{
"trial_was_started": true,
"acknowledged": true
}
------------------------------------------------------------
// NOTCONSOLE

View File

@ -3,8 +3,7 @@
=== Install {es} with Docker
{es} is also available as Docker images.
The images use https://hub.docker.com/_/centos/[centos:7] as the base image and
are available with {xpack-ref}/xpack-introduction.html[X-Pack].
The images use https://hub.docker.com/_/centos/[centos:7] as the base image.
A list of all published Docker images and tags can be found in
https://www.docker.elastic.co[www.docker.elastic.co]. The source code can be found
@ -12,28 +11,19 @@ on https://github.com/elastic/elasticsearch-docker/tree/{branch}[GitHub].
==== Image types
The images are available in three different configurations or "flavors". The
`basic` flavor, which is the default, ships with {xpack} Basic features
pre-installed and automatically activated with a free licence. The `platinum`
flavor features all {xpack} functionally under a 30-day trial licence. The `oss`
flavor does not include {xpack}, and contains only open-source {es}.
These images are free to use under the Elastic license. They contain open source
and free commercial features and access to paid commercial features.
{xpack-ref}/license-management.html[Start a 30-day trial] to try out all of the
paid commercial features. See the
https://www.elastic.co/subscriptions[Subscriptions] page for information about
Elastic license levels.
NOTE: {xpack-ref}/xpack-security.html[X-Pack Security] is enabled in the `platinum`
image. To access your cluster, it's necessary to set an initial password for the
`elastic` user. The initial password can be set at start up time via the
`ELASTIC_PASSWORD` environment variable:
Alternatively, you can download `-oss` images, which contain only features that
are available under the Apache 2.0 license.
["source","txt",subs="attributes"]
--------------------------------------------
docker run -e ELASTIC_PASSWORD=MagicWord {docker-repo}-platinum:{version}
--------------------------------------------
==== Pulling the image
NOTE: The `platinum` image includes a trial license for 30 days. After that, you
can obtain one of the https://www.elastic.co/subscriptions[available
subscriptions] or revert to a Basic licence. The Basic license is free and
includes a selection of {xpack} features.
Obtaining {Es} for Docker is as simple as issuing a +docker pull+ command
Obtaining {es} for Docker is as simple as issuing a +docker pull+ command
against the Elastic Docker registry.
ifeval::["{release-state}"=="unreleased"]

View File

@ -41,11 +41,11 @@ public class LicensingClient {
client.execute(DeleteLicenseAction.INSTANCE, request, listener);
}
public PostStartTrialRequestBuilder preparePostUpgradeToTrial() {
public PostStartTrialRequestBuilder preparePostStartTrial() {
return new PostStartTrialRequestBuilder(client, PostStartTrialAction.INSTANCE);
}
public GetTrialStatusRequestBuilder prepareGetUpgradeToTrial() {
public GetTrialStatusRequestBuilder prepareGetStartTrial() {
return new GetTrialStatusRequestBuilder(client, GetTrialStatusAction.INSTANCE);
}

View File

@ -15,6 +15,7 @@ import java.io.IOException;
public class PostStartTrialRequest extends MasterNodeRequest<PostStartTrialRequest> {
private boolean acknowledge = false;
private String type;
@Override
@ -31,25 +32,47 @@ public class PostStartTrialRequest extends MasterNodeRequest<PostStartTrialReque
return type;
}
public PostStartTrialRequest acknowledge(boolean acknowledge) {
this.acknowledge = acknowledge;
return this;
}
public boolean isAcknowledged() {
return acknowledge;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
type = in.readString();
acknowledge = in.readBoolean();
} else {
type = "trial";
acknowledge = true;
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
Version version = Version.V_6_3_0;
// TODO: Change to 6.3 after backport
Version version = Version.V_7_0_0_alpha1;
if (out.getVersion().onOrAfter(version)) {
super.writeTo(out);
out.writeString(type);
out.writeBoolean(acknowledge);
} else {
throw new IllegalArgumentException("All nodes in cluster must be version [" + version
+ "] or newer to use `type` parameter. Attempting to write to node with version [" + out.getVersion() + "].");
if ("trial".equals(type) == false) {
throw new IllegalArgumentException("All nodes in cluster must be version [" + version
+ "] or newer to start trial with a different type than 'trial'. Attempting to write to " +
"a node with version [" + out.getVersion() + "] with trial type [" + type + "].");
} else if (acknowledge == false) {
throw new IllegalArgumentException("Request must be acknowledged to send to a node with a version " +
"prior to [" + version + "]. Attempting to send request to node with version [" + out.getVersion() + "] " +
"without acknowledgement.");
} else {
super.writeTo(out);
}
}
}
}

View File

@ -14,4 +14,9 @@ class PostStartTrialRequestBuilder extends ActionRequestBuilder<PostStartTrialRe
PostStartTrialRequestBuilder(ElasticsearchClient client, PostStartTrialAction action) {
super(client, action, new PostStartTrialRequest());
}
public PostStartTrialRequestBuilder setAcknowledge(boolean acknowledge) {
request.acknowledge(acknowledge);
return this;
}
}

View File

@ -5,23 +5,33 @@
*/
package org.elasticsearch.license;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
class PostStartTrialResponse extends ActionResponse {
// Nodes Prior to 6.3 did not have NEED_ACKNOWLEDGEMENT as part of status
enum Pre63Status {
UPGRADED_TO_TRIAL,
TRIAL_ALREADY_ACTIVATED;
}
enum Status {
UPGRADED_TO_TRIAL(true, null, RestStatus.OK),
TRIAL_ALREADY_ACTIVATED(false, "Operation failed: Trial was already activated.", RestStatus.FORBIDDEN);
TRIAL_ALREADY_ACTIVATED(false, "Operation failed: Trial was already activated.", RestStatus.FORBIDDEN),
NEED_ACKNOWLEDGEMENT(false,"Operation failed: Needs acknowledgement.", RestStatus.OK);
private final boolean isTrialStarted;
private final String errorMessage;
private final RestStatus restStatus;
Status(boolean isTrialStarted, String errorMessage, RestStatus restStatus) {
this.isTrialStarted = isTrialStarted;
this.errorMessage = errorMessage;
@ -39,15 +49,24 @@ class PostStartTrialResponse extends ActionResponse {
RestStatus getRestStatus() {
return restStatus;
}
}
private Status status;
private Map<String, String[]> acknowledgeMessages;
private String acknowledgeMessage;
PostStartTrialResponse() {
}
PostStartTrialResponse(Status status) {
this(status, Collections.emptyMap(), null);
}
PostStartTrialResponse(Status status, Map<String, String[]> acknowledgeMessages, String acknowledgeMessage) {
this.status = status;
this.acknowledgeMessages = acknowledgeMessages;
this.acknowledgeMessage = acknowledgeMessage;
}
public Status getStatus() {
@ -57,10 +76,58 @@ class PostStartTrialResponse extends ActionResponse {
@Override
public void readFrom(StreamInput in) throws IOException {
status = in.readEnum(Status.class);
// TODO: Change to 6.3 after backport
if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
acknowledgeMessage = in.readOptionalString();
int size = in.readVInt();
Map<String, String[]> acknowledgeMessages = new HashMap<>(size);
for (int i = 0; i < size; i++) {
String feature = in.readString();
int nMessages = in.readVInt();
String[] messages = new String[nMessages];
for (int j = 0; j < nMessages; j++) {
messages[j] = in.readString();
}
acknowledgeMessages.put(feature, messages);
}
this.acknowledgeMessages = acknowledgeMessages;
} else {
this.acknowledgeMessages = Collections.emptyMap();
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeEnum(status);
// TODO: Change to 6.3 after backport
Version version = Version.V_7_0_0_alpha1;
if (out.getVersion().onOrAfter(version)) {
out.writeEnum(status);
out.writeOptionalString(acknowledgeMessage);
out.writeVInt(acknowledgeMessages.size());
for (Map.Entry<String, String[]> entry : acknowledgeMessages.entrySet()) {
out.writeString(entry.getKey());
out.writeVInt(entry.getValue().length);
for (String message : entry.getValue()) {
out.writeString(message);
}
}
} else {
if (status == Status.UPGRADED_TO_TRIAL) {
out.writeEnum(Pre63Status.UPGRADED_TO_TRIAL);
} else if (status == Status.TRIAL_ALREADY_ACTIVATED) {
out.writeEnum(Pre63Status.TRIAL_ALREADY_ACTIVATED);
} else {
throw new IllegalArgumentException("Starting trial on node with version [" + Version.CURRENT + "] requires " +
"acknowledgement parameter.");
}
}
}
Map<String, String[]> getAcknowledgementMessages() {
return acknowledgeMessages;
}
String getAcknowledgementMessage() {
return acknowledgeMessage;
}
}

View File

@ -29,7 +29,7 @@ public class RestGetTrialStatus extends XPackRestHandler {
@Override
protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException {
return channel -> client.licensing().prepareGetUpgradeToTrial().execute(
return channel -> client.licensing().prepareGetStartTrial().execute(
new RestBuilderListener<GetTrialStatusResponse>(channel) {
@Override
public RestResponse buildResponse(GetTrialStatusResponse response, XContentBuilder builder) throws Exception {

View File

@ -16,6 +16,7 @@ import org.elasticsearch.xpack.core.XPackClient;
import org.elasticsearch.xpack.core.rest.XPackRestHandler;
import java.io.IOException;
import java.util.Map;
import static org.elasticsearch.rest.RestRequest.Method.POST;
@ -30,23 +31,36 @@ public class RestPostStartTrialLicense extends XPackRestHandler {
protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException {
PostStartTrialRequest startTrialRequest = new PostStartTrialRequest();
startTrialRequest.setType(request.param("type", "trial"));
startTrialRequest.acknowledge(request.paramAsBoolean("acknowledge", false));
return channel -> client.licensing().postStartTrial(startTrialRequest,
new RestBuilderListener<PostStartTrialResponse>(channel) {
@Override
public RestResponse buildResponse(PostStartTrialResponse response, XContentBuilder builder) throws Exception {
PostStartTrialResponse.Status status = response.getStatus();
builder.startObject();
builder.field("acknowledged", startTrialRequest.isAcknowledged());
if (status.isTrialStarted()) {
builder.startObject()
.field("trial_was_started", true)
.field("type", startTrialRequest.getType())
.endObject();
builder.field("trial_was_started", true);
builder.field("type", startTrialRequest.getType());
} else {
builder.startObject()
.field("trial_was_started", false)
.field("error_message", status.getErrorMessage())
.endObject();
builder.field("trial_was_started", false);
builder.field("error_message", status.getErrorMessage());
}
Map<String, String[]> acknowledgementMessages = response.getAcknowledgementMessages();
if (acknowledgementMessages.isEmpty() == false) {
builder.startObject("acknowledge");
builder.field("message", response.getAcknowledgementMessage());
for (Map.Entry<String, String[]> entry : acknowledgementMessages.entrySet()) {
builder.startArray(entry.getKey());
for (String message : entry.getValue()) {
builder.value(message);
}
builder.endArray();
}
builder.endObject();
}
builder.endObject();
return new BytesRestResponse(status.getRestStatus(), builder);
}
});

View File

@ -15,10 +15,23 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.Nullable;
import java.time.Clock;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
public class StartTrialClusterTask extends ClusterStateUpdateTask {
private static final String ACKNOWLEDGEMENT_HEADER = "This API initiates a free 30-day trial for all platinum features. " +
"By starting this trial, you agree that it is subject to the terms and conditions at" +
" https://www.elastic.co/legal/trial_license/. To begin your free trial, call /start_trial again and specify " +
"the \"acknowledge=true\" parameter.";
private static final Map<String, String[]> ACK_MESSAGES = Collections.singletonMap("security",
new String[] {"With a trial license, X-Pack security features are available, but are not enabled by default."});
private final Logger logger;
private final String clusterName;
private final PostStartTrialRequest request;
@ -39,7 +52,10 @@ public class StartTrialClusterTask extends ClusterStateUpdateTask {
LicensesMetaData oldLicensesMetaData = oldState.metaData().custom(LicensesMetaData.TYPE);
logger.debug("started self generated trial license: {}", oldLicensesMetaData);
if (oldLicensesMetaData == null || oldLicensesMetaData.isEligibleForTrial()) {
if (request.isAcknowledged() == false) {
listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.NEED_ACKNOWLEDGEMENT,
ACK_MESSAGES, ACKNOWLEDGEMENT_HEADER));
} else if (oldLicensesMetaData == null || oldLicensesMetaData.isEligibleForTrial()) {
listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.UPGRADED_TO_TRIAL));
} else {
listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.TRIAL_ALREADY_ACTIVATED));
@ -50,7 +66,9 @@ public class StartTrialClusterTask extends ClusterStateUpdateTask {
public ClusterState execute(ClusterState currentState) throws Exception {
LicensesMetaData currentLicensesMetaData = currentState.metaData().custom(LicensesMetaData.TYPE);
if (currentLicensesMetaData == null || currentLicensesMetaData.isEligibleForTrial()) {
if (request.isAcknowledged() == false) {
return currentState;
} else if (currentLicensesMetaData == null || currentLicensesMetaData.isEligibleForTrial()) {
long issueDate = clock.millis();
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
long expiryDate = issueDate + LicenseService.NON_BASIC_SELF_GENERATED_LICENSE_DURATION.getMillis();

View File

@ -56,33 +56,47 @@ public class StartTrialLicenseTests extends AbstractLicensesIntegrationTestCase
assertEquals(200, response.getStatusLine().getStatusCode());
assertEquals("{\"eligible_to_start_trial\":true}", body);
String type = randomFrom(LicenseService.VALID_TRIAL_TYPES);
Response response2 = restClient.performRequest("POST", "/_xpack/license/start_trial?type=" + type);
// Test that starting will fail without acknowledgement
Response response2 = restClient.performRequest("POST", "/_xpack/license/start_trial");
String body2 = Streams.copyToString(new InputStreamReader(response2.getEntity().getContent(), StandardCharsets.UTF_8));
assertEquals(200, response2.getStatusLine().getStatusCode());
assertTrue(body2.contains("\"trial_was_started\":true"));
assertTrue(body2.contains("\"type\":\"" + type + "\""));
assertTrue(body2.contains("\"trial_was_started\":false"));
assertTrue(body2.contains("\"error_message\":\"Operation failed: Needs acknowledgement.\""));
assertTrue(body2.contains("\"acknowledged\":false"));
assertBusy(() -> {
GetLicenseResponse getLicenseResponse = licensingClient.prepareGetLicense().get();
assertEquals("basic", getLicenseResponse.license().type());
});
String type = randomFrom(LicenseService.VALID_TRIAL_TYPES);
Response response3 = restClient.performRequest("POST", "/_xpack/license/start_trial?acknowledge=true&type=" + type);
String body3 = Streams.copyToString(new InputStreamReader(response3.getEntity().getContent(), StandardCharsets.UTF_8));
assertEquals(200, response3.getStatusLine().getStatusCode());
assertTrue(body3.contains("\"trial_was_started\":true"));
assertTrue(body3.contains("\"type\":\"" + type + "\""));
assertTrue(body3.contains("\"acknowledged\":true"));
assertBusy(() -> {
GetLicenseResponse postTrialLicenseResponse = licensingClient.prepareGetLicense().get();
assertEquals(type, postTrialLicenseResponse.license().type());
});
Response response3 = restClient.performRequest("GET", "/_xpack/license/trial_status");
String body3 = Streams.copyToString(new InputStreamReader(response3.getEntity().getContent(), StandardCharsets.UTF_8));
assertEquals(200, response3.getStatusLine().getStatusCode());
assertEquals("{\"eligible_to_start_trial\":false}", body3);
Response response4 = restClient.performRequest("GET", "/_xpack/license/trial_status");
String body4 = Streams.copyToString(new InputStreamReader(response4.getEntity().getContent(), StandardCharsets.UTF_8));
assertEquals(200, response4.getStatusLine().getStatusCode());
assertEquals("{\"eligible_to_start_trial\":false}", body4);
String secondAttemptType = randomFrom(LicenseService.VALID_TRIAL_TYPES);
ResponseException ex = expectThrows(ResponseException.class,
() -> restClient.performRequest("POST", "/_xpack/license/start_trial?type=" + secondAttemptType));
Response response4 = ex.getResponse();
String body4 = Streams.copyToString(new InputStreamReader(response4.getEntity().getContent(), StandardCharsets.UTF_8));
assertEquals(403, response4.getStatusLine().getStatusCode());
assertTrue(body4.contains("\"trial_was_started\":false"));
assertTrue(body4.contains("\"error_message\":\"Operation failed: Trial was already activated.\""));
() -> restClient.performRequest("POST", "/_xpack/license/start_trial?acknowledge=true&type=" + secondAttemptType));
Response response5 = ex.getResponse();
String body5 = Streams.copyToString(new InputStreamReader(response5.getEntity().getContent(), StandardCharsets.UTF_8));
assertEquals(403, response5.getStatusLine().getStatusCode());
assertTrue(body5.contains("\"trial_was_started\":false"));
assertTrue(body5.contains("\"error_message\":\"Operation failed: Trial was already activated.\""));
}
public void testInvalidType() throws Exception {

View File

@ -3,23 +3,6 @@
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
/*
* ELASTICSEARCH CONFIDENTIAL
* __________________
*
* [2017] Elasticsearch Incorporated. All Rights Reserved.
*
* NOTICE: All information contained herein is, and remains
* the property of Elasticsearch Incorporated and its suppliers,
* if any. The intellectual and technical concepts contained
* herein are proprietary to Elasticsearch Incorporated
* and its suppliers and may be covered by U.S. and Foreign Patents,
* patents in process, and are protected by trade secret or copyright law.
* Dissemination of this information or reproduction of this material
* is strictly forbidden unless prior written permission is obtained
* from Elasticsearch Incorporated.
*/
package org.elasticsearch.xpack.security.authc.support.mapper;
import org.elasticsearch.common.ParsingException;

View File

@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor;
import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.UnaryArithmeticProcessor;
import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor;
import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor;
import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor;
import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.BucketExtractorProcessor;
import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ChainingProcessor;
@ -40,6 +41,7 @@ public final class Processors {
// arithmetic
entries.add(new Entry(Processor.class, BinaryArithmeticProcessor.NAME, BinaryArithmeticProcessor::new));
entries.add(new Entry(Processor.class, UnaryArithmeticProcessor.NAME, UnaryArithmeticProcessor::new));
entries.add(new Entry(Processor.class, BinaryMathProcessor.NAME, BinaryMathProcessor::new));
// datetime
entries.add(new Entry(Processor.class, DateTimeProcessor.NAME, DateTimeProcessor::new));
// math

View File

@ -10,6 +10,7 @@ import org.elasticsearch.xpack.sql.expression.Attribute;
import org.elasticsearch.xpack.sql.expression.BinaryExpression;
import org.elasticsearch.xpack.sql.expression.Expression;
import org.elasticsearch.xpack.sql.expression.ExpressionId;
import org.elasticsearch.xpack.sql.expression.Expressions;
import org.elasticsearch.xpack.sql.expression.FieldAttribute;
import org.elasticsearch.xpack.sql.expression.Literal;
import org.elasticsearch.xpack.sql.expression.NamedExpression;
@ -159,7 +160,7 @@ abstract class QueryTranslator {
}
}
throw new UnsupportedOperationException(format(Locale.ROOT, "Don't know how to translate %s %s", e.nodeName(), e));
throw new SqlIllegalArgumentException("Don't know how to translate {} {}", e.nodeName(), e);
}
static LeafAgg toAgg(String id, Function f) {
@ -171,7 +172,7 @@ abstract class QueryTranslator {
}
}
throw new UnsupportedOperationException(format(Locale.ROOT, "Don't know how to translate %s %s", f.nodeName(), f));
throw new SqlIllegalArgumentException("Don't know how to translate {} {}", f.nodeName(), f);
}
static class GroupingContext {
@ -395,8 +396,8 @@ abstract class QueryTranslator {
if (arg instanceof Literal) {
return String.valueOf(((Literal) arg).value());
}
throw new SqlIllegalArgumentException("Does not know how to convert argument " + arg.nodeString()
+ " for function " + af.nodeString());
throw new SqlIllegalArgumentException("Does not know how to convert argument {} for function {}", arg.nodeString(),
af.nodeString());
}
// TODO: need to optimize on ngram
@ -505,9 +506,9 @@ abstract class QueryTranslator {
@Override
protected QueryTranslation asQuery(BinaryComparison bc, boolean onAggs) {
Check.isTrue(bc.right().foldable(),
"Line %d:%d - Comparisons against variables are not (currently) supported; offender %s in %s",
"Line {}:{}: Comparisons against variables are not (currently) supported; offender [{}] in [{}]",
bc.right().location().getLineNumber(), bc.right().location().getColumnNumber(),
bc.right().nodeName(), bc.nodeName());
Expressions.name(bc.right()), bc.symbol());
if (bc.left() instanceof NamedExpression) {
NamedExpression ne = (NamedExpression) bc.left();
@ -605,8 +606,8 @@ abstract class QueryTranslator {
return new TermQuery(loc, name, value);
}
Check.isTrue(false, "don't know how to translate binary comparison [{}] in [{}]", bc.right().nodeString(), bc);
return null;
throw new SqlIllegalArgumentException("Don't know how to translate binary comparison [{}] in [{}]", bc.right().nodeString(),
bc);
}
}
@ -700,9 +701,8 @@ abstract class QueryTranslator {
return new QueryTranslation(query, aggFilter);
}
else {
throw new UnsupportedOperationException("No idea how to translate " + e);
throw new SqlIllegalArgumentException("No idea how to translate " + e);
}
}
}

View File

@ -153,7 +153,7 @@ public class FieldAttributeTests extends ESTestCase {
public void testStarExpansionExcludesObjectAndUnsupportedTypes() {
LogicalPlan plan = plan("SELECT * FROM test");
List<? extends NamedExpression> list = ((Project) plan).projections();
assertThat(list, hasSize(7));
assertThat(list, hasSize(8));
List<String> names = Expressions.names(list);
assertThat(names, not(hasItem("some")));
assertThat(names, not(hasItem("some.dotted")));

View File

@ -15,6 +15,7 @@ import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.util.TimeZone;
import static java.util.Arrays.asList;
import static java.util.Collections.emptyList;
@ -24,7 +25,7 @@ import static java.util.Collections.singletonMap;
public class CompositeKeyExtractorTests extends AbstractWireSerializingTestCase<CompositeKeyExtractor> {
public static CompositeKeyExtractor randomCompositeKeyExtractor() {
return new CompositeKeyExtractor(randomAlphaOfLength(16), randomFrom(asList(Property.values())), randomTimeZone());
return new CompositeKeyExtractor(randomAlphaOfLength(16), randomFrom(asList(Property.values())), randomSafeTimeZone());
}
@Override
@ -58,7 +59,7 @@ public class CompositeKeyExtractorTests extends AbstractWireSerializingTestCase<
}
public void testExtractDate() {
CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, randomTimeZone());
CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, randomSafeTimeZone());
long millis = System.currentTimeMillis();
Bucket bucket = new TestBucket(singletonMap(extractor.key(), millis), randomLong(), new Aggregations(emptyList()));
@ -73,4 +74,13 @@ public class CompositeKeyExtractorTests extends AbstractWireSerializingTestCase<
SqlIllegalArgumentException exception = expectThrows(SqlIllegalArgumentException.class, () -> extractor.extract(bucket));
assertEquals("Invalid date key returned: " + value, exception.getMessage());
}
/**
* We need to exclude SystemV/* time zones because they cannot be converted
* back to DateTimeZone which we currently still need to do internally,
* e.g. in bwc serialization and in the extract() method
*/
private static TimeZone randomSafeTimeZone() {
return randomValueOtherThanMany(tz -> tz.getID().startsWith("SystemV"), () -> randomTimeZone());
}
}

View File

@ -3,23 +3,6 @@
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
/*
* ELASTICSEARCH CONFIDENTIAL
* __________________
*
* [2017] Elasticsearch Incorporated. All Rights Reserved.
*
* NOTICE: All information contained herein is, and remains
* the property of Elasticsearch Incorporated and its suppliers,
* if any. The intellectual and technical concepts contained
* herein are proprietary to Elasticsearch Incorporated
* and its suppliers and may be covered by U.S. and Foreign Patents,
* patents in process, and are protected by trade secret or copyright law.
* Dissemination of this information or reproduction of this material
* is strictly forbidden unless prior written permission is obtained
* from Elasticsearch Incorporated.
*/
package org.elasticsearch.xpack.sql.expression;
import org.elasticsearch.xpack.sql.tree.AbstractNodeTestCase;

View File

@ -3,23 +3,6 @@
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
/*
* ELASTICSEARCH CONFIDENTIAL
* __________________
*
* [2017] Elasticsearch Incorporated. All Rights Reserved.
*
* NOTICE: All information contained herein is, and remains
* the property of Elasticsearch Incorporated and its suppliers,
* if any. The intellectual and technical concepts contained
* herein are proprietary to Elasticsearch Incorporated
* and its suppliers and may be covered by U.S. and Foreign Patents,
* patents in process, and are protected by trade secret or copyright law.
* Dissemination of this information or reproduction of this material
* is strictly forbidden unless prior written permission is obtained
* from Elasticsearch Incorporated.
*/
package org.elasticsearch.xpack.sql.expression.function;
import org.elasticsearch.xpack.sql.expression.Expression;

View File

@ -0,0 +1,59 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.expression.function.scalar.math;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.Writeable.Reader;
import org.elasticsearch.test.AbstractWireSerializingTestCase;
import org.elasticsearch.xpack.sql.expression.Literal;
import org.elasticsearch.xpack.sql.expression.function.scalar.Processors;
import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ConstantProcessor;
import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor;
import static org.elasticsearch.xpack.sql.tree.Location.EMPTY;
public class BinaryMathProcessorTests extends AbstractWireSerializingTestCase<BinaryMathProcessor> {
public static BinaryMathProcessor randomProcessor() {
return new BinaryMathProcessor(
new ConstantProcessor(randomLong()),
new ConstantProcessor(randomLong()),
randomFrom(BinaryMathProcessor.BinaryMathOperation.values()));
}
@Override
protected BinaryMathProcessor createTestInstance() {
return randomProcessor();
}
@Override
protected Reader<BinaryMathProcessor> instanceReader() {
return BinaryMathProcessor::new;
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return new NamedWriteableRegistry(Processors.getNamedWriteables());
}
public void testAtan2() {
Processor ba = new ATan2(EMPTY, l(1), l(1)).makeProcessorDefinition().asProcessor();
assertEquals(0.7853981633974483d, ba.process(null));
}
public void testPower() {
Processor ba = new Power(EMPTY, l(2), l(2)).makeProcessorDefinition().asProcessor();
assertEquals(4d, ba.process(null));
}
public void testHandleNull() {
assertNull(new ATan2(EMPTY, l(null), l(3)).makeProcessorDefinition().asProcessor().process(null));
assertNull(new Power(EMPTY, l(null), l(null)).makeProcessorDefinition().asProcessor().process(null));
}
private static Literal l(Object value) {
return Literal.of(EMPTY, value);
}
}

View File

@ -17,7 +17,7 @@ public class SysColumnsTests extends ESTestCase {
public void testSysColumns() {
List<List<?>> rows = new ArrayList<>();
SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null);
assertEquals(15, rows.size());
assertEquals(16, rows.size());
assertEquals(24, rows.get(0).size());
List<?> row = rows.get(0);
@ -38,13 +38,13 @@ public class SysColumnsTests extends ESTestCase {
assertEquals(null, radix(row));
assertEquals(Integer.MAX_VALUE, bufferLength(row));
row = rows.get(6);
row = rows.get(7);
assertEquals("some.dotted", name(row));
assertEquals(Types.STRUCT, sqlType(row));
assertEquals(null, radix(row));
assertEquals(-1, bufferLength(row));
row = rows.get(14);
row = rows.get(15);
assertEquals("some.ambiguous.normalized", name(row));
assertEquals(Types.VARCHAR, sqlType(row));
assertEquals(null, radix(row));

View File

@ -6,6 +6,7 @@
package org.elasticsearch.xpack.sql.planner;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.sql.SqlIllegalArgumentException;
import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer;
import org.elasticsearch.xpack.sql.analysis.index.EsIndex;
import org.elasticsearch.xpack.sql.analysis.index.IndexResolution;
@ -18,9 +19,11 @@ import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan;
import org.elasticsearch.xpack.sql.plan.logical.Project;
import org.elasticsearch.xpack.sql.planner.QueryTranslator.QueryTranslation;
import org.elasticsearch.xpack.sql.querydsl.query.Query;
import org.elasticsearch.xpack.sql.querydsl.query.RangeQuery;
import org.elasticsearch.xpack.sql.querydsl.query.TermQuery;
import org.elasticsearch.xpack.sql.type.EsField;
import org.elasticsearch.xpack.sql.type.TypesTests;
import org.joda.time.DateTime;
import java.util.Map;
import java.util.TimeZone;
@ -84,4 +87,56 @@ public class QueryTranslatorTests extends ESTestCase {
assertEquals("int", tq.term());
assertEquals(5, tq.value());
}
public void testComparisonAgainstColumns() {
LogicalPlan p = plan("SELECT some.string FROM test WHERE date > int");
assertTrue(p instanceof Project);
p = ((Project) p).child();
assertTrue(p instanceof Filter);
Expression condition = ((Filter) p).condition();
SqlIllegalArgumentException ex = expectThrows(SqlIllegalArgumentException.class, () -> QueryTranslator.toQuery(condition, false));
assertEquals("Line 1:43: Comparisons against variables are not (currently) supported; offender [int] in [>]", ex.getMessage());
}
public void testDateRange() {
LogicalPlan p = plan("SELECT some.string FROM test WHERE date > 1969-05-13");
assertTrue(p instanceof Project);
p = ((Project) p).child();
assertTrue(p instanceof Filter);
Expression condition = ((Filter) p).condition();
QueryTranslation translation = QueryTranslator.toQuery(condition, false);
Query query = translation.query;
assertTrue(query instanceof RangeQuery);
RangeQuery rq = (RangeQuery) query;
assertEquals("date", rq.field());
assertEquals(1951, rq.lower());
}
public void testDateRangeLiteral() {
LogicalPlan p = plan("SELECT some.string FROM test WHERE date > '1969-05-13'");
assertTrue(p instanceof Project);
p = ((Project) p).child();
assertTrue(p instanceof Filter);
Expression condition = ((Filter) p).condition();
QueryTranslation translation = QueryTranslator.toQuery(condition, false);
Query query = translation.query;
assertTrue(query instanceof RangeQuery);
RangeQuery rq = (RangeQuery) query;
assertEquals("date", rq.field());
assertEquals("1969-05-13", rq.lower());
}
public void testDateRangeCast() {
LogicalPlan p = plan("SELECT some.string FROM test WHERE date > CAST('1969-05-13T12:34:56Z' AS DATE)");
assertTrue(p instanceof Project);
p = ((Project) p).child();
assertTrue(p instanceof Filter);
Expression condition = ((Filter) p).condition();
QueryTranslation translation = QueryTranslator.toQuery(condition, false);
Query query = translation.query;
assertTrue(query instanceof RangeQuery);
RangeQuery rq = (RangeQuery) query;
assertEquals("date", rq.field());
assertEquals(DateTime.parse("1969-05-13T12:34:56Z"), rq.lower());
}
}

View File

@ -4,6 +4,7 @@
"int" : { "type" : "integer" },
"text" : { "type" : "text" },
"keyword" : { "type" : "keyword" },
"date" : { "type" : "date" },
"unsupported" : { "type" : "ip_range" },
"some" : {
"properties" : {

View File

@ -11,6 +11,10 @@
"type": {
"type" : "string",
"description" : "The type of trial license to generate (default: \"trial\")"
},
"acknowledge": {
"type" : "boolean",
"description" : "whether the user has acknowledged acknowledge messages (default: false)"
}
}
},

View File

@ -133,7 +133,8 @@ teardown:
- do:
catch: forbidden
xpack.license.post_start_trial: {}
xpack.license.post_start_trial:
acknowledge: true
- match: { trial_was_started: false }
- match: { error_message: "Operation failed: Trial was already activated." }
@ -143,6 +144,7 @@ teardown:
catch: bad_request
xpack.license.post_start_trial:
type: "basic"
acknowledge: true
---
"Can start basic license if do not already have basic":
- do:

View File

@ -63,7 +63,7 @@ public class ExecutableEmailAction extends ExecutableAction<EmailAction> {
}
Email.Builder email = action.getEmail().render(templateEngine, model, htmlSanitizer, attachments);
email.id(ctx.id().value());
email.id(actionId + "_" + ctx.id().value());
if (ctx.simulateAction(actionId)) {
return new EmailAction.Result.Simulated(email.build());

View File

@ -354,7 +354,7 @@ public class Email implements ToXContentObject {
* after this is called is incorrect.
*/
public Email build() {
assert id != null : "email id should not be null (should be set to the watch id";
assert id != null : "email id should not be null";
Email email = new Email(id, from, replyTo, priority, sentDate, to, cc, bcc, subject, textBody, htmlBody,
unmodifiableMap(attachments));
attachments = null;

View File

@ -171,7 +171,7 @@ public class EmailActionTests extends ESTestCase {
assertThat(result, instanceOf(EmailAction.Result.Success.class));
assertThat(((EmailAction.Result.Success) result).account(), equalTo(account));
Email actualEmail = ((EmailAction.Result.Success) result).email();
assertThat(actualEmail.id(), is(wid.value()));
assertThat(actualEmail.id(), is("_id_" + wid.value()));
assertThat(actualEmail, notNullValue());
assertThat(actualEmail.subject(), is(subject == null ? null : subject.getTemplate()));
assertThat(actualEmail.textBody(), is(textBody == null ? null : textBody.getTemplate()));

View File

@ -0,0 +1,88 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.watcher.actions.email;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext;
import org.elasticsearch.xpack.core.watcher.watch.Payload;
import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine;
import org.elasticsearch.xpack.watcher.notification.email.EmailService;
import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate;
import org.elasticsearch.xpack.watcher.notification.email.HtmlSanitizer;
import org.elasticsearch.xpack.watcher.notification.email.support.EmailServer;
import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine;
import org.elasticsearch.xpack.watcher.test.WatcherTestUtils;
import org.junit.After;
import org.junit.Before;
import javax.mail.internet.MimeMessage;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import static org.hamcrest.Matchers.hasSize;
public class EmailMessageIdTests extends ESTestCase {
private EmailServer server;
private TextTemplateEngine textTemplateEngine = new MockTextTemplateEngine();
private HtmlSanitizer htmlSanitizer = new HtmlSanitizer(Settings.EMPTY);
private EmailService emailService;
private EmailAction emailAction;
@Before
public void startSmtpServer() {
server = EmailServer.localhost(logger);
Settings settings = Settings.builder()
.put("xpack.notification.email.account.test.smtp.auth", true)
.put("xpack.notification.email.account.test.smtp.user", EmailServer.USERNAME)
.put("xpack.notification.email.account.test.smtp.password", EmailServer.PASSWORD)
.put("xpack.notification.email.account.test.smtp.port", server.port())
.put("xpack.notification.email.account.test.smtp.host", "localhost")
.build();
Set<Setting<?>> registeredSettings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
registeredSettings.addAll(EmailService.getSettings());
ClusterSettings clusterSettings = new ClusterSettings(settings, registeredSettings);
emailService = new EmailService(settings, null, clusterSettings);
EmailTemplate emailTemplate = EmailTemplate.builder().from("from@example.org").to("to@example.org")
.subject("subject").textBody("body").build();
emailAction = new EmailAction(emailTemplate, null, null, null, null, null);
}
@After
public void stopSmtpServer() {
server.stop();
}
public void testThatMessageIdIsUnique() throws Exception {
List<MimeMessage> messages = new ArrayList<>();
server.addListener(messages::add);
ExecutableEmailAction firstEmailAction = new ExecutableEmailAction(emailAction, logger, emailService, textTemplateEngine,
htmlSanitizer, Collections.emptyMap());
ExecutableEmailAction secondEmailAction = new ExecutableEmailAction(emailAction, logger, emailService, textTemplateEngine,
htmlSanitizer, Collections.emptyMap());
WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger);
firstEmailAction.execute("my_first_action_id", ctx, Payload.EMPTY);
secondEmailAction.execute("my_second_action_id", ctx, Payload.EMPTY);
assertThat(messages, hasSize(2));
// check for unique message ids, should be two as well
Set<String> messageIds = new HashSet<>();
for (MimeMessage message : messages) {
messageIds.add(message.getMessageID());
}
assertThat(messageIds, hasSize(2));
}
}

View File

@ -114,7 +114,7 @@ subprojects {
approvedLicenses << 'Apache'
}
String outputDir = "generated-resources/${project.name}"
String outputDir = "${buildDir}/generated-resources/${project.name}"
// This is a top level task which we will add dependencies to below.
// It is a single task that can be used to backcompat tests against all versions.
@ -123,7 +123,7 @@ subprojects {
group = 'verification'
}
String output = "generated-resources/${project.name}"
String output = "${buildDir}/generated-resources/${project.name}"
task copyTestNodeKeystore(type: Copy) {
from project(xpackModule('core'))
.file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks')

View File

@ -71,7 +71,7 @@ task bwcTest {
group = 'verification'
}
String outputDir = "generated-resources/${project.name}"
String outputDir = "${buildDir}/generated-resources/${project.name}"
for (Version version : bwcVersions.wireCompatible) {
String baseName = "v${version}"

View File

@ -96,7 +96,7 @@ subprojects {
}
}
String outputDir = "generated-resources/${project.name}"
String outputDir = "${buildDir}/generated-resources/${project.name}"
// This is a top level task which we will add dependencies to below.
// It is a single task that can be used to backcompat tests against all versions.
@ -105,7 +105,7 @@ subprojects {
group = 'verification'
}
String output = "generated-resources/${project.name}"
String output = "${buildDir}/generated-resources/${project.name}"
task copyTestNodeKeystore(type: Copy) {
from project(xpackModule('core'))
.file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks')

View File

@ -39,34 +39,6 @@ public class UpgradeClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCa
XPackRestTestHelper.waitForMlTemplates(client());
}
/**
* Enables an HTTP exporter for monitoring so that we can test the production-level exporter (not the local exporter).
*
* The build.gradle file disables data collection, so the expectation is that any monitoring rest tests will use the
* "_xpack/monitoring/_bulk" endpoint to lazily setup the templates on-demand and fill in data without worrying about
* timing.
*/
@Before
public void waitForMonitoring() throws Exception {
final String[] nodes = System.getProperty("tests.rest.cluster").split(",");
final Map<String, Object> settings = new HashMap<>();
settings.put("xpack.monitoring.exporters._http.enabled", true);
// only select the last node to avoid getting the "old" node in a mixed cluster
// if we ever randomize the order that the nodes are restarted (or add more nodes), then we need to verify which node we select
settings.put("xpack.monitoring.exporters._http.host", nodes[nodes.length - 1]);
assertBusy(() -> {
final ClientYamlTestResponse response =
getAdminExecutionContext().callApi("cluster.put_settings",
emptyMap(),
singletonList(singletonMap("transient", settings)),
emptyMap());
assertThat(response.evaluate("acknowledged"), is(true));
});
}
@Override
protected boolean preserveIndicesUponCompletion() {
return true;

View File

@ -1,48 +0,0 @@
---
setup:
- do:
cluster.health:
wait_for_status: yellow
---
"Index monitoring data and search on the mixed cluster":
- skip:
version: "all"
reason: "AwaitsFix'ing, see x-pack-elasticsearch #2948"
- do:
search:
index: .monitoring-kibana-*
body: { "query": { "term" : { "type": "old_cluster" } } }
- match: { hits.total: 2 }
- do:
xpack.monitoring.bulk:
system_id: "kibana"
system_api_version: "6"
interval: "123456ms"
type: "mixed_cluster"
body:
- '{"index": {}}'
- '{"field": "value_3"}'
- '{"index": {}}'
- '{"field": "value_4"}'
- '{"index": {}}'
- '{"field": "value_5"}'
- is_false: errors
- do:
indices.refresh: {}
- do:
search:
index: .monitoring-kibana-*
body: { "query": { "term" : { "type": "old_cluster" } } }
- match: { hits.total: 2 }
- do:
search:
index: .monitoring-kibana-*
body: { "query": { "term" : { "type": "mixed_cluster" } } }
- match: { hits.total: 3 }

View File

@ -1,33 +0,0 @@
---
setup:
- do:
cluster.health:
wait_for_status: yellow
---
"Index monitoring data and search on the old cluster":
- skip:
version: "all"
reason: "AwaitsFix'ing, see x-pack-elasticsearch #2948"
- do:
xpack.monitoring.bulk:
system_id: "kibana"
system_api_version: "6"
interval: "123456ms"
type: "old_cluster"
body:
- '{"index": {}}'
- '{"field": "value_1"}'
- '{"index": {}}'
- '{"field": "value_2"}'
- is_false: errors
- do:
indices.refresh: {}
- do:
search:
index: .monitoring-kibana-*
body: { "query": { "term" : { "type": "old_cluster" } } }
- match: { hits.total: 2 }

View File

@ -1,54 +0,0 @@
---
setup:
- do:
cluster.health:
wait_for_status: yellow
---
"Index monitoring data and search on the upgraded cluster":
- skip:
version: "all"
reason: "AwaitsFix'ing, see x-pack-elasticsearch #2948"
- do:
search:
index: .monitoring-kibana-*
body: { "query": { "term" : { "type": "old_cluster" } } }
- match: { hits.total: 2 }
- do:
search:
index: .monitoring-kibana-*
body: { "query": { "term" : { "type": "mixed_cluster" } } }
- match: { hits.total: 3 }
- do:
xpack.monitoring.bulk:
system_id: "kibana"
system_api_version: "6"
interval: "123456ms"
type: "upgraded_cluster"
body:
- '{"index": {}}'
- '{"field": "value_6"}'
- '{"index": {}}'
- '{"field": "value_7"}'
- '{"index": {}}'
- '{"field": "value_8"}'
- is_false: errors
- do:
indices.refresh: {}
- do:
search:
index: .monitoring-kibana-*
body: { "query": { "terms" : { "type": [ "old_cluster", "mixed_cluster" ] } } }
- match: { hits.total: 5 }
- do:
search:
index: .monitoring-kibana-*
body: { "query": { "term" : { "type": "upgraded_cluster" } } }
- match: { hits.total: 3 }

View File

@ -6,7 +6,7 @@ dependencies {
testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime')
}
String outputDir = "generated-resources/${project.name}"
String outputDir = "${buildDir}/generated-resources/${project.name}"
task copyXPackPluginProps(type: Copy) {
from project(xpackModule('core')).file('src/main/plugin-metadata')
from project(xpackModule('core')).tasks.pluginProperties

View File

@ -17,7 +17,7 @@ dependencies {
testCompile project(path: xpackModule('core'), configuration: 'runtime')
}
String outputDir = "generated-resources/${project.name}"
String outputDir = "${buildDir}/generated-resources/${project.name}"
task copyXPackPluginProps(type: Copy) {
from project(xpackModule('core')).file('src/main/plugin-metadata')
from project(xpackModule('core')).tasks.pluginProperties

View File

@ -19,6 +19,7 @@ load $BATS_UTILS/xpack.bash
setup() {
skip_not_tar_gz
export ESHOME=/tmp/elasticsearch
export PACKAGE_NAME="elasticsearch"
export_elasticsearch_paths
export ESPLUGIN_COMMAND_USER=elasticsearch
}