Merge remote-tracking branch 'es/master' into ccr
This commit is contained in:
commit
2e41f0dd48
|
@ -123,12 +123,19 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
println " Random Testing Seed : ${project.testSeed}"
|
||||
|
||||
// enforce gradle version
|
||||
GradleVersion minGradle = GradleVersion.version('3.3')
|
||||
if (GradleVersion.current() < minGradle) {
|
||||
// enforce Gradle version
|
||||
final GradleVersion currentGradleVersion = GradleVersion.current();
|
||||
|
||||
final GradleVersion minGradle = GradleVersion.version('3.3')
|
||||
if (currentGradleVersion < minGradle) {
|
||||
throw new GradleException("${minGradle} or above is required to build elasticsearch")
|
||||
}
|
||||
|
||||
final GradleVersion maxGradle = GradleVersion.version('4.2')
|
||||
if (currentGradleVersion >= maxGradle) {
|
||||
throw new GradleException("${maxGradle} or above is not compatible with the elasticsearch build")
|
||||
}
|
||||
|
||||
// enforce Java version
|
||||
if (javaVersionEnum < minimumJava) {
|
||||
throw new GradleException("Java ${minimumJava} or above is required to build Elasticsearch")
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
/**
|
||||
* A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Indices API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices.html">Indices API on elastic.co</a>
|
||||
*/
|
||||
public final class IndicesClient {
|
||||
private final RestHighLevelClient restHighLevelClient;
|
||||
|
||||
public IndicesClient(RestHighLevelClient restHighLevelClient) {
|
||||
this.restHighLevelClient = restHighLevelClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes an index using the Delete Index API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html">
|
||||
* Delete Index API on elastic.co</a>
|
||||
*/
|
||||
public DeleteIndexResponse deleteIndex(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent,
|
||||
Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously deletes an index using the Delete Index API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html">
|
||||
* Delete Index API on elastic.co</a>
|
||||
*/
|
||||
public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListener<DeleteIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
}
|
||||
}
|
16
client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java
Normal file → Executable file
16
client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java
Normal file → Executable file
|
@ -29,6 +29,7 @@ import org.apache.http.entity.ByteArrayEntity;
|
|||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
|
@ -123,6 +124,17 @@ public final class Request {
|
|||
return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
}
|
||||
|
||||
static Request deleteIndex(DeleteIndexRequest deleteIndexRequest) {
|
||||
String endpoint = endpoint(deleteIndexRequest.indices(), Strings.EMPTY_ARRAY, "");
|
||||
|
||||
Params parameters = Params.builder();
|
||||
parameters.withTimeout(deleteIndexRequest.timeout());
|
||||
parameters.withMasterTimeout(deleteIndexRequest.masterNodeTimeout());
|
||||
parameters.withIndicesOptions(deleteIndexRequest.indicesOptions());
|
||||
|
||||
return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
}
|
||||
|
||||
static Request info() {
|
||||
return new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
}
|
||||
|
@ -449,6 +461,10 @@ public final class Request {
|
|||
return this;
|
||||
}
|
||||
|
||||
Params withMasterTimeout(TimeValue masterTimeout) {
|
||||
return putParam("master_timeout", masterTimeout);
|
||||
}
|
||||
|
||||
Params withParent(String parent) {
|
||||
return putParam("parent", parent);
|
||||
}
|
||||
|
|
27
client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
Normal file → Executable file
27
client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
Normal file → Executable file
|
@ -176,6 +176,8 @@ public class RestHighLevelClient implements Closeable {
|
|||
private final NamedXContentRegistry registry;
|
||||
private final CheckedConsumer<RestClient, IOException> doClose;
|
||||
|
||||
private final IndicesClient indicesClient = new IndicesClient(this);
|
||||
|
||||
/**
|
||||
* Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the
|
||||
* {@link RestClient} to be used to perform requests.
|
||||
|
@ -220,6 +222,15 @@ public class RestHighLevelClient implements Closeable {
|
|||
doClose.accept(client);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides an {@link IndicesClient} which can be used to access the Indices API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices.html">Indices API on elastic.co</a>
|
||||
*/
|
||||
public IndicesClient indices() {
|
||||
return indicesClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a bulk request using the Bulk API
|
||||
*
|
||||
|
@ -327,7 +338,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Deletes a document by id using the Delete api
|
||||
* Deletes a document by id using the Delete API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a>
|
||||
*/
|
||||
|
@ -337,7 +348,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Asynchronously deletes a document by id using the Delete api
|
||||
* Asynchronously deletes a document by id using the Delete API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a>
|
||||
*/
|
||||
|
@ -347,7 +358,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Executes a search using the Search api
|
||||
* Executes a search using the Search API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html">Search API on elastic.co</a>
|
||||
*/
|
||||
|
@ -356,7 +367,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a search using the Search api
|
||||
* Asynchronously executes a search using the Search API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html">Search API on elastic.co</a>
|
||||
*/
|
||||
|
@ -365,7 +376,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Executes a search using the Search Scroll api
|
||||
* Executes a search using the Search Scroll API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html">Search Scroll
|
||||
* API on elastic.co</a>
|
||||
|
@ -375,7 +386,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a search using the Search Scroll api
|
||||
* Asynchronously executes a search using the Search Scroll API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html">Search Scroll
|
||||
* API on elastic.co</a>
|
||||
|
@ -386,7 +397,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Clears one or more scroll ids using the Clear Scroll api
|
||||
* Clears one or more scroll ids using the Clear Scroll API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api">
|
||||
* Clear Scroll API on elastic.co</a>
|
||||
|
@ -397,7 +408,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Asynchronously clears one or more scroll ids using the Clear Scroll api
|
||||
* Asynchronously clears one or more scroll ids using the Clear Scroll API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api">
|
||||
* Clear Scroll API on elastic.co</a>
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.elasticsearch.action.update.UpdateRequest;
|
|||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -50,7 +49,6 @@ import org.elasticsearch.rest.RestStatus;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
@ -614,14 +612,14 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
};
|
||||
|
||||
ThreadPool threadPool = new ThreadPool(Settings.builder().put("node.name", getClass().getName()).build());
|
||||
// Pull the client to a variable to work around https://bugs.eclipse.org/bugs/show_bug.cgi?id=514884
|
||||
RestHighLevelClient hlClient = highLevelClient();
|
||||
try(BulkProcessor processor = new BulkProcessor.Builder(hlClient::bulkAsync, listener, threadPool)
|
||||
.setConcurrentRequests(0)
|
||||
.setBulkSize(new ByteSizeValue(5, ByteSizeUnit.GB))
|
||||
.setBulkActions(nbItems + 1)
|
||||
.build()) {
|
||||
|
||||
try (BulkProcessor processor = BulkProcessor.builder(hlClient::bulkAsync, listener)
|
||||
.setConcurrentRequests(0)
|
||||
.setBulkSize(new ByteSizeValue(5, ByteSizeUnit.GB))
|
||||
.setBulkActions(nbItems + 1)
|
||||
.build()) {
|
||||
for (int i = 0; i < nbItems; i++) {
|
||||
String id = String.valueOf(i);
|
||||
boolean erroneous = randomBoolean();
|
||||
|
@ -631,7 +629,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
if (opType == DocWriteRequest.OpType.DELETE) {
|
||||
if (erroneous == false) {
|
||||
assertEquals(RestStatus.CREATED,
|
||||
highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status());
|
||||
highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status());
|
||||
}
|
||||
DeleteRequest deleteRequest = new DeleteRequest("index", "test", id);
|
||||
processor.add(deleteRequest);
|
||||
|
@ -653,10 +651,10 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
} else if (opType == DocWriteRequest.OpType.UPDATE) {
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "test", id)
|
||||
.doc(new IndexRequest().source(xContentType, "id", i));
|
||||
.doc(new IndexRequest().source(xContentType, "id", i));
|
||||
if (erroneous == false) {
|
||||
assertEquals(RestStatus.CREATED,
|
||||
highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status());
|
||||
highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status());
|
||||
}
|
||||
processor.add(updateRequest);
|
||||
}
|
||||
|
@ -676,8 +674,6 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
assertNull(error.get());
|
||||
|
||||
validateBulkResponses(nbItems, errors, bulkResponse, bulkRequest);
|
||||
|
||||
terminate(threadPool);
|
||||
}
|
||||
|
||||
private void validateBulkResponses(int nbItems, boolean[] errors, BulkResponse bulkResponse, BulkRequest bulkRequest) {
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testDeleteIndex() throws IOException {
|
||||
{
|
||||
// Delete index if exists
|
||||
String indexName = "test_index";
|
||||
createIndex(indexName);
|
||||
|
||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexName);
|
||||
DeleteIndexResponse deleteIndexResponse =
|
||||
execute(deleteIndexRequest, highLevelClient().indices()::deleteIndex, highLevelClient().indices()::deleteIndexAsync);
|
||||
assertTrue(deleteIndexResponse.isAcknowledged());
|
||||
|
||||
assertFalse(indexExists(indexName));
|
||||
}
|
||||
{
|
||||
// Return 404 if index doesn't exist
|
||||
String nonExistentIndex = "non_existent_index";
|
||||
assertFalse(indexExists(nonExistentIndex));
|
||||
|
||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(nonExistentIndex);
|
||||
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(deleteIndexRequest, highLevelClient().indices()::deleteIndex, highLevelClient().indices()::deleteIndexAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
}
|
||||
|
||||
private static void createIndex(String index) throws IOException {
|
||||
Response response = client().performRequest("PUT", index);
|
||||
|
||||
assertEquals(200, response.getStatusLine().getStatusCode());
|
||||
}
|
||||
|
||||
private static boolean indexExists(String index) throws IOException {
|
||||
Response response = client().performRequest("HEAD", index);
|
||||
|
||||
return response.getStatusLine().getStatusCode() == 200;
|
||||
}
|
||||
}
|
84
client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java
Normal file → Executable file
84
client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java
Normal file → Executable file
|
@ -25,6 +25,7 @@ import org.apache.http.entity.ContentType;
|
|||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
|
@ -36,6 +37,8 @@ import org.elasticsearch.action.search.SearchScrollRequest;
|
|||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
|
@ -44,6 +47,7 @@ import org.elasticsearch.common.bytes.BytesArray;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
|
@ -74,6 +78,7 @@ import java.util.Map;
|
|||
import java.util.StringJoiner;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.client.Request.enforceSameContentType;
|
||||
|
@ -139,7 +144,7 @@ public class RequestTests extends ESTestCase {
|
|||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
|
||||
setRandomTimeout(deleteRequest, expectedParams);
|
||||
setRandomTimeout(deleteRequest::timeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams);
|
||||
setRandomRefreshPolicy(deleteRequest, expectedParams);
|
||||
setRandomVersion(deleteRequest, expectedParams);
|
||||
setRandomVersionType(deleteRequest, expectedParams);
|
||||
|
@ -240,6 +245,30 @@ public class RequestTests extends ESTestCase {
|
|||
assertEquals(method, request.getMethod());
|
||||
}
|
||||
|
||||
public void testDeleteIndex() throws IOException {
|
||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest();
|
||||
|
||||
int numIndices = randomIntBetween(0, 5);
|
||||
String[] indices = new String[numIndices];
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5);
|
||||
}
|
||||
deleteIndexRequest.indices(indices);
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
|
||||
setRandomTimeout(deleteIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
setRandomMasterTimeout(deleteIndexRequest, expectedParams);
|
||||
|
||||
setRandomIndicesOptions(deleteIndexRequest::indicesOptions, deleteIndexRequest::indicesOptions, expectedParams);
|
||||
|
||||
Request request = Request.deleteIndex(deleteIndexRequest);
|
||||
assertEquals("/" + String.join(",", indices), request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertEquals("DELETE", request.getMethod());
|
||||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
public void testIndex() throws IOException {
|
||||
String index = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
|
@ -258,7 +287,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
setRandomTimeout(indexRequest, expectedParams);
|
||||
setRandomTimeout(indexRequest::timeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams);
|
||||
setRandomRefreshPolicy(indexRequest, expectedParams);
|
||||
|
||||
// There is some logic around _create endpoint and version/version type
|
||||
|
@ -678,20 +707,7 @@ public class RequestTests extends ESTestCase {
|
|||
expectedParams.put("scroll", searchRequest.scroll().keepAlive().getStringRep());
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
searchRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
|
||||
}
|
||||
expectedParams.put("ignore_unavailable", Boolean.toString(searchRequest.indicesOptions().ignoreUnavailable()));
|
||||
expectedParams.put("allow_no_indices", Boolean.toString(searchRequest.indicesOptions().allowNoIndices()));
|
||||
if (searchRequest.indicesOptions().expandWildcardsOpen() && searchRequest.indicesOptions().expandWildcardsClosed()) {
|
||||
expectedParams.put("expand_wildcards", "open,closed");
|
||||
} else if (searchRequest.indicesOptions().expandWildcardsOpen()) {
|
||||
expectedParams.put("expand_wildcards", "open");
|
||||
} else if (searchRequest.indicesOptions().expandWildcardsClosed()) {
|
||||
expectedParams.put("expand_wildcards", "closed");
|
||||
} else {
|
||||
expectedParams.put("expand_wildcards", "none");
|
||||
}
|
||||
setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams);
|
||||
|
||||
SearchSourceBuilder searchSourceBuilder = null;
|
||||
if (frequently()) {
|
||||
|
@ -903,13 +919,43 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private static void setRandomTimeout(ReplicationRequest<?> request, Map<String, String> expectedParams) {
|
||||
private static void setRandomIndicesOptions(Consumer<IndicesOptions> setter, Supplier<IndicesOptions> getter,
|
||||
Map<String, String> expectedParams) {
|
||||
|
||||
if (randomBoolean()) {
|
||||
setter.accept(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(),
|
||||
randomBoolean()));
|
||||
}
|
||||
expectedParams.put("ignore_unavailable", Boolean.toString(getter.get().ignoreUnavailable()));
|
||||
expectedParams.put("allow_no_indices", Boolean.toString(getter.get().allowNoIndices()));
|
||||
if (getter.get().expandWildcardsOpen() && getter.get().expandWildcardsClosed()) {
|
||||
expectedParams.put("expand_wildcards", "open,closed");
|
||||
} else if (getter.get().expandWildcardsOpen()) {
|
||||
expectedParams.put("expand_wildcards", "open");
|
||||
} else if (getter.get().expandWildcardsClosed()) {
|
||||
expectedParams.put("expand_wildcards", "closed");
|
||||
} else {
|
||||
expectedParams.put("expand_wildcards", "none");
|
||||
}
|
||||
}
|
||||
|
||||
private static void setRandomTimeout(Consumer<String> setter, TimeValue defaultTimeout, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
String timeout = randomTimeValue();
|
||||
request.timeout(timeout);
|
||||
setter.accept(timeout);
|
||||
expectedParams.put("timeout", timeout);
|
||||
} else {
|
||||
expectedParams.put("timeout", ReplicationRequest.DEFAULT_TIMEOUT.getStringRep());
|
||||
expectedParams.put("timeout", defaultTimeout.getStringRep());
|
||||
}
|
||||
}
|
||||
|
||||
private static void setRandomMasterTimeout(MasterNodeRequest<?> request, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
String masterTimeout = randomTimeValue();
|
||||
request.masterNodeTimeout(masterTimeout);
|
||||
expectedParams.put("master_timeout", masterTimeout);
|
||||
} else {
|
||||
expectedParams.put("master_timeout", MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT.getStringRep());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,13 +19,11 @@
|
|||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.Build;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
|
@ -40,7 +38,6 @@ import org.elasticsearch.action.get.GetRequest;
|
|||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.main.MainResponse;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
|
@ -49,9 +46,7 @@ import org.elasticsearch.action.update.UpdateResponse;
|
|||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -64,7 +59,7 @@ import org.elasticsearch.rest.RestStatus;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.Scheduler;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
@ -868,31 +863,27 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
|
||||
public void testBulkProcessor() throws InterruptedException, IOException {
|
||||
Settings settings = Settings.builder().put("node.name", "my-application").build();
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
// tag::bulk-processor-init
|
||||
ThreadPool threadPool = new ThreadPool(settings); // <1>
|
||||
|
||||
BulkProcessor.Listener listener = new BulkProcessor.Listener() { // <2>
|
||||
BulkProcessor.Listener listener = new BulkProcessor.Listener() { // <1>
|
||||
@Override
|
||||
public void beforeBulk(long executionId, BulkRequest request) {
|
||||
// <3>
|
||||
// <2>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
|
||||
// <4>
|
||||
// <3>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
|
||||
// <5>
|
||||
// <4>
|
||||
}
|
||||
};
|
||||
|
||||
BulkProcessor bulkProcessor = new BulkProcessor.Builder(client::bulkAsync, listener, threadPool)
|
||||
.build(); // <6>
|
||||
BulkProcessor bulkProcessor = BulkProcessor.builder(client::bulkAsync, listener).build(); // <5>
|
||||
// end::bulk-processor-init
|
||||
assertNotNull(bulkProcessor);
|
||||
|
||||
|
@ -917,7 +908,6 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
// tag::bulk-processor-close
|
||||
bulkProcessor.close();
|
||||
// end::bulk-processor-close
|
||||
terminate(threadPool);
|
||||
}
|
||||
{
|
||||
// tag::bulk-processor-listener
|
||||
|
@ -944,19 +934,14 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
};
|
||||
// end::bulk-processor-listener
|
||||
|
||||
ThreadPool threadPool = new ThreadPool(settings);
|
||||
try {
|
||||
// tag::bulk-processor-options
|
||||
BulkProcessor.Builder builder = new BulkProcessor.Builder(client::bulkAsync, listener, threadPool);
|
||||
builder.setBulkActions(500); // <1>
|
||||
builder.setBulkSize(new ByteSizeValue(1L, ByteSizeUnit.MB)); // <2>
|
||||
builder.setConcurrentRequests(0); // <3>
|
||||
builder.setFlushInterval(TimeValue.timeValueSeconds(10L)); // <4>
|
||||
builder.setBackoffPolicy(BackoffPolicy.constantBackoff(TimeValue.timeValueSeconds(1L), 3)); // <5>
|
||||
// end::bulk-processor-options
|
||||
} finally {
|
||||
terminate(threadPool);
|
||||
}
|
||||
// tag::bulk-processor-options
|
||||
BulkProcessor.Builder builder = BulkProcessor.builder(client::bulkAsync, listener);
|
||||
builder.setBulkActions(500); // <1>
|
||||
builder.setBulkSize(new ByteSizeValue(1L, ByteSizeUnit.MB)); // <2>
|
||||
builder.setConcurrentRequests(0); // <3>
|
||||
builder.setFlushInterval(TimeValue.timeValueSeconds(10L)); // <4>
|
||||
builder.setBackoffPolicy(BackoffPolicy.constantBackoff(TimeValue.timeValueSeconds(1L), 3)); // <5>
|
||||
// end::bulk-processor-options
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* This class is used to generate the Java Indices API documentation.
|
||||
* You need to wrap your code between two tags like:
|
||||
* // tag::example[]
|
||||
* // end::example[]
|
||||
*
|
||||
* Where example is your tag name.
|
||||
*
|
||||
* Then in the documentation, you can extract what is between tag and end tags with
|
||||
* ["source","java",subs="attributes,callouts,macros"]
|
||||
* --------------------------------------------------
|
||||
* include-tagged::{doc-tests}/CRUDDocumentationIT.java[example]
|
||||
* --------------------------------------------------
|
||||
*/
|
||||
public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testDeleteIndex() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
Response createIndexResponse = client().performRequest("PUT", "/posts");
|
||||
assertEquals(200, createIndexResponse.getStatusLine().getStatusCode());
|
||||
}
|
||||
|
||||
{
|
||||
// tag::delete-index-request
|
||||
DeleteIndexRequest request = new DeleteIndexRequest("posts"); // <1>
|
||||
// end::delete-index-request
|
||||
|
||||
// tag::delete-index-execute
|
||||
DeleteIndexResponse deleteIndexResponse = client.indices().deleteIndex(request);
|
||||
// end::delete-index-execute
|
||||
assertTrue(deleteIndexResponse.isAcknowledged());
|
||||
|
||||
// tag::delete-index-response
|
||||
boolean acknowledged = deleteIndexResponse.isAcknowledged(); // <1>
|
||||
// end::delete-index-response
|
||||
|
||||
// tag::delete-index-execute-async
|
||||
client.indices().deleteIndexAsync(request, new ActionListener<DeleteIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteIndexResponse deleteIndexResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
// end::delete-index-execute-async
|
||||
}
|
||||
|
||||
{
|
||||
DeleteIndexRequest request = new DeleteIndexRequest("posts");
|
||||
// tag::delete-index-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
// end::delete-index-request-timeout
|
||||
// tag::delete-index-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.timeout("1m"); // <2>
|
||||
// end::delete-index-request-masterTimeout
|
||||
// tag::delete-index-request-indicesOptions
|
||||
request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
|
||||
// end::delete-index-request-indicesOptions
|
||||
}
|
||||
|
||||
{
|
||||
// tag::delete-index-notfound
|
||||
try {
|
||||
DeleteIndexRequest request = new DeleteIndexRequest("does_not_exist");
|
||||
DeleteIndexResponse deleteIndexResponse = client.indices().deleteIndex(request);
|
||||
} catch (ElasticsearchException exception) {
|
||||
if (exception.status() == RestStatus.NOT_FOUND) {
|
||||
// <1>
|
||||
}
|
||||
}
|
||||
// end::delete-index-notfound
|
||||
}
|
||||
}
|
||||
}
|
|
@ -50,6 +50,7 @@ import java.io.IOException;
|
|||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
|
@ -91,8 +92,9 @@ public class RestClient implements Closeable {
|
|||
private static final Log logger = LogFactory.getLog(RestClient.class);
|
||||
|
||||
private final CloseableHttpAsyncClient client;
|
||||
//we don't rely on default headers supported by HttpAsyncClient as those cannot be replaced
|
||||
private final Header[] defaultHeaders;
|
||||
// We don't rely on default headers supported by HttpAsyncClient as those cannot be replaced.
|
||||
// These are package private for tests.
|
||||
final List<Header> defaultHeaders;
|
||||
private final long maxRetryTimeoutMillis;
|
||||
private final String pathPrefix;
|
||||
private final AtomicInteger lastHostIndex = new AtomicInteger(0);
|
||||
|
@ -104,7 +106,7 @@ public class RestClient implements Closeable {
|
|||
HttpHost[] hosts, String pathPrefix, FailureListener failureListener) {
|
||||
this.client = client;
|
||||
this.maxRetryTimeoutMillis = maxRetryTimeoutMillis;
|
||||
this.defaultHeaders = defaultHeaders;
|
||||
this.defaultHeaders = Collections.unmodifiableList(Arrays.asList(defaultHeaders));
|
||||
this.failureListener = failureListener;
|
||||
this.pathPrefix = pathPrefix;
|
||||
setHosts(hosts);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.node.stats;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -36,6 +37,7 @@ import org.elasticsearch.monitor.fs.FsInfo;
|
|||
import org.elasticsearch.monitor.jvm.JvmStats;
|
||||
import org.elasticsearch.monitor.os.OsStats;
|
||||
import org.elasticsearch.monitor.process.ProcessStats;
|
||||
import org.elasticsearch.node.AdaptiveSelectionStats;
|
||||
import org.elasticsearch.script.ScriptStats;
|
||||
import org.elasticsearch.threadpool.ThreadPoolStats;
|
||||
import org.elasticsearch.transport.TransportStats;
|
||||
|
@ -86,6 +88,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment {
|
|||
@Nullable
|
||||
private IngestStats ingestStats;
|
||||
|
||||
@Nullable
|
||||
private AdaptiveSelectionStats adaptiveSelectionStats;
|
||||
|
||||
NodeStats() {
|
||||
}
|
||||
|
||||
|
@ -95,7 +100,8 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment {
|
|||
@Nullable AllCircuitBreakerStats breaker,
|
||||
@Nullable ScriptStats scriptStats,
|
||||
@Nullable DiscoveryStats discoveryStats,
|
||||
@Nullable IngestStats ingestStats) {
|
||||
@Nullable IngestStats ingestStats,
|
||||
@Nullable AdaptiveSelectionStats adaptiveSelectionStats) {
|
||||
super(node);
|
||||
this.timestamp = timestamp;
|
||||
this.indices = indices;
|
||||
|
@ -110,6 +116,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment {
|
|||
this.scriptStats = scriptStats;
|
||||
this.discoveryStats = discoveryStats;
|
||||
this.ingestStats = ingestStats;
|
||||
this.adaptiveSelectionStats = adaptiveSelectionStats;
|
||||
}
|
||||
|
||||
public long getTimestamp() {
|
||||
|
@ -199,6 +206,11 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment {
|
|||
return ingestStats;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public AdaptiveSelectionStats getAdaptiveSelectionStats() {
|
||||
return adaptiveSelectionStats;
|
||||
}
|
||||
|
||||
public static NodeStats readNodeStats(StreamInput in) throws IOException {
|
||||
NodeStats nodeInfo = new NodeStats();
|
||||
nodeInfo.readFrom(in);
|
||||
|
@ -223,6 +235,11 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment {
|
|||
scriptStats = in.readOptionalWriteable(ScriptStats::new);
|
||||
discoveryStats = in.readOptionalWriteable(DiscoveryStats::new);
|
||||
ingestStats = in.readOptionalWriteable(IngestStats::new);
|
||||
if (in.getVersion().onOrAfter(Version.V_6_1_0)) {
|
||||
adaptiveSelectionStats = in.readOptionalWriteable(AdaptiveSelectionStats::new);
|
||||
} else {
|
||||
adaptiveSelectionStats = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -246,6 +263,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment {
|
|||
out.writeOptionalWriteable(scriptStats);
|
||||
out.writeOptionalWriteable(discoveryStats);
|
||||
out.writeOptionalWriteable(ingestStats);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_1_0)) {
|
||||
out.writeOptionalWriteable(adaptiveSelectionStats);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -306,6 +326,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment {
|
|||
if (getIngestStats() != null) {
|
||||
getIngestStats().toXContent(builder, params);
|
||||
}
|
||||
if (getAdaptiveSelectionStats() != null) {
|
||||
getAdaptiveSelectionStats().toXContent(builder, params);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.node.stats;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -43,6 +44,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
|||
private boolean script;
|
||||
private boolean discovery;
|
||||
private boolean ingest;
|
||||
private boolean adaptiveSelection;
|
||||
|
||||
public NodesStatsRequest() {
|
||||
}
|
||||
|
@ -71,6 +73,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
|||
this.script = true;
|
||||
this.discovery = true;
|
||||
this.ingest = true;
|
||||
this.adaptiveSelection = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -90,6 +93,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
|||
this.script = false;
|
||||
this.discovery = false;
|
||||
this.ingest = false;
|
||||
this.adaptiveSelection = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -265,6 +269,18 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
|||
return this;
|
||||
}
|
||||
|
||||
public boolean adaptiveSelection() {
|
||||
return adaptiveSelection;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should adaptiveSelection statistics be returned.
|
||||
*/
|
||||
public NodesStatsRequest adaptiveSelection(boolean adaptiveSelection) {
|
||||
this.adaptiveSelection = adaptiveSelection;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
@ -280,6 +296,11 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
|||
script = in.readBoolean();
|
||||
discovery = in.readBoolean();
|
||||
ingest = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_6_1_0)) {
|
||||
adaptiveSelection = in.readBoolean();
|
||||
} else {
|
||||
adaptiveSelection = false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -297,5 +318,8 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
|||
out.writeBoolean(script);
|
||||
out.writeBoolean(discovery);
|
||||
out.writeBoolean(ingest);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_1_0)) {
|
||||
out.writeBoolean(adaptiveSelection);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ public class TransportNodesStatsAction extends TransportNodesAction<NodesStatsRe
|
|||
NodesStatsRequest request = nodeStatsRequest.request;
|
||||
return nodeService.stats(request.indices(), request.os(), request.process(), request.jvm(), request.threadPool(),
|
||||
request.fs(), request.transport(), request.http(), request.breaker(), request.script(), request.discovery(),
|
||||
request.ingest());
|
||||
request.ingest(), request.adaptiveSelection());
|
||||
}
|
||||
|
||||
public static class NodeStatsRequest extends BaseNodeRequest {
|
||||
|
|
0
core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java
Normal file → Executable file
0
core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java
Normal file → Executable file
|
@ -58,35 +58,40 @@ final class SettingsUpdater {
|
|||
persistentSettings.put(currentState.metaData().persistentSettings());
|
||||
changed |= clusterSettings.updateDynamicSettings(persistentToApply, persistentSettings, persistentUpdates, "persistent");
|
||||
|
||||
if (!changed) {
|
||||
return currentState;
|
||||
final ClusterState clusterState;
|
||||
if (changed) {
|
||||
MetaData.Builder metaData = MetaData.builder(currentState.metaData())
|
||||
.persistentSettings(persistentSettings.build())
|
||||
.transientSettings(transientSettings.build());
|
||||
|
||||
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
boolean updatedReadOnly = MetaData.SETTING_READ_ONLY_SETTING.get(metaData.persistentSettings())
|
||||
|| MetaData.SETTING_READ_ONLY_SETTING.get(metaData.transientSettings());
|
||||
if (updatedReadOnly) {
|
||||
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
|
||||
} else {
|
||||
blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
|
||||
}
|
||||
boolean updatedReadOnlyAllowDelete = MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(metaData.persistentSettings())
|
||||
|| MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(metaData.transientSettings());
|
||||
if (updatedReadOnlyAllowDelete) {
|
||||
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
|
||||
} else {
|
||||
blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
|
||||
}
|
||||
clusterState = builder(currentState).metaData(metaData).blocks(blocks).build();
|
||||
} else {
|
||||
clusterState = currentState;
|
||||
}
|
||||
|
||||
MetaData.Builder metaData = MetaData.builder(currentState.metaData())
|
||||
.persistentSettings(persistentSettings.build())
|
||||
.transientSettings(transientSettings.build());
|
||||
|
||||
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
boolean updatedReadOnly = MetaData.SETTING_READ_ONLY_SETTING.get(metaData.persistentSettings())
|
||||
|| MetaData.SETTING_READ_ONLY_SETTING.get(metaData.transientSettings());
|
||||
if (updatedReadOnly) {
|
||||
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
|
||||
} else {
|
||||
blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
|
||||
}
|
||||
boolean updatedReadOnlyAllowDelete = MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(metaData.persistentSettings())
|
||||
|| MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(metaData.transientSettings());
|
||||
if (updatedReadOnlyAllowDelete) {
|
||||
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
|
||||
} else {
|
||||
blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
|
||||
}
|
||||
ClusterState build = builder(currentState).metaData(metaData).blocks(blocks).build();
|
||||
Settings settings = build.metaData().settings();
|
||||
// now we try to apply things and if they are invalid we fail
|
||||
// this dryRun will validate & parse settings but won't actually apply them.
|
||||
/*
|
||||
* Now we try to apply things and if they are invalid we fail. This dry run will validate, parse settings, and trigger deprecation
|
||||
* logging, but will not actually apply them.
|
||||
*/
|
||||
final Settings settings = clusterState.metaData().settings();
|
||||
clusterSettings.validateUpdate(settings);
|
||||
return build;
|
||||
|
||||
return clusterState;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -92,7 +92,8 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
@Override
|
||||
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
|
||||
NodeInfo nodeInfo = nodeService.info(true, true, false, true, false, true, false, true, false, false);
|
||||
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, true, true, true, false, true, false, false, false, false, false, false);
|
||||
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE,
|
||||
true, true, true, false, true, false, false, false, false, false, false, false);
|
||||
List<ShardStats> shardsStats = new ArrayList<>();
|
||||
for (IndexService indexService : indicesService) {
|
||||
for (IndexShard indexShard : indexService) {
|
||||
|
|
|
@ -176,7 +176,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
|||
if (action == null) {
|
||||
action = (AliasActions) o;
|
||||
} else {
|
||||
throw new IllegalArgumentException("Too many operations declared in on opeation entry");
|
||||
throw new IllegalArgumentException("Too many operations declared on operation entry");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,16 +21,39 @@ package org.elasticsearch.action.admin.indices.create;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
|
||||
/**
|
||||
* A response for a create index action.
|
||||
*/
|
||||
public class CreateIndexResponse extends AcknowledgedResponse {
|
||||
public class CreateIndexResponse extends AcknowledgedResponse implements ToXContentObject {
|
||||
|
||||
private static final String SHARDS_ACKNOWLEDGED = "shards_acknowledged";
|
||||
private static final String INDEX = "index";
|
||||
|
||||
private static final ParseField SHARDS_ACKNOWLEDGED_PARSER = new ParseField(SHARDS_ACKNOWLEDGED);
|
||||
private static final ParseField INDEX_PARSER = new ParseField(INDEX);
|
||||
|
||||
private static final ConstructingObjectParser<CreateIndexResponse, Void> PARSER = new ConstructingObjectParser<>("create_index",
|
||||
true, args -> new CreateIndexResponse((boolean) args[0], (boolean) args[1], (String) args[2]));
|
||||
|
||||
static {
|
||||
declareAcknowledgedField(PARSER);
|
||||
PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SHARDS_ACKNOWLEDGED_PARSER,
|
||||
ObjectParser.ValueType.BOOLEAN);
|
||||
PARSER.declareField(constructorArg(), (parser, context) -> parser.text(), INDEX_PARSER, ObjectParser.ValueType.STRING);
|
||||
}
|
||||
|
||||
private boolean shardsAcked;
|
||||
private String index;
|
||||
|
@ -79,7 +102,20 @@ public class CreateIndexResponse extends AcknowledgedResponse {
|
|||
}
|
||||
|
||||
public void addCustomFields(XContentBuilder builder) throws IOException {
|
||||
builder.field("shards_acknowledged", isShardsAcked());
|
||||
builder.field("index", index());
|
||||
builder.field(SHARDS_ACKNOWLEDGED, isShardsAcked());
|
||||
builder.field(INDEX, index());
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
addAcknowledgedField(builder);
|
||||
addCustomFields(builder);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static CreateIndexResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
}
|
||||
|
|
25
core/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java
Normal file → Executable file
25
core/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java
Normal file → Executable file
|
@ -22,13 +22,24 @@ package org.elasticsearch.action.admin.indices.delete;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A response for a delete index action.
|
||||
*/
|
||||
public class DeleteIndexResponse extends AcknowledgedResponse {
|
||||
public class DeleteIndexResponse extends AcknowledgedResponse implements ToXContentObject {
|
||||
|
||||
private static final ConstructingObjectParser<DeleteIndexResponse, Void> PARSER = new ConstructingObjectParser<>("delete_index",
|
||||
true, args -> new DeleteIndexResponse((boolean) args[0]));
|
||||
|
||||
static {
|
||||
declareAcknowledgedField(PARSER);
|
||||
}
|
||||
|
||||
DeleteIndexResponse() {
|
||||
}
|
||||
|
@ -48,4 +59,16 @@ public class DeleteIndexResponse extends AcknowledgedResponse {
|
|||
super.writeTo(out);
|
||||
writeAcknowledged(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
addAcknowledgedField(builder);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static DeleteIndexResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
validationException = addValidationError("name is missing", validationException);
|
||||
}
|
||||
if (indexPatterns == null || indexPatterns.size() == 0) {
|
||||
validationException = addValidationError("pattern is missing", validationException);
|
||||
validationException = addValidationError("index patterns are missing", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
|
|
@ -26,14 +26,17 @@ import org.elasticsearch.action.index.IndexRequest;
|
|||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.threadpool.Scheduler;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.BiConsumer;
|
||||
|
@ -78,22 +81,20 @@ public class BulkProcessor implements Closeable {
|
|||
|
||||
private final BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer;
|
||||
private final Listener listener;
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private final Scheduler scheduler;
|
||||
private final Runnable onClose;
|
||||
private int concurrentRequests = 1;
|
||||
private int bulkActions = 1000;
|
||||
private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB);
|
||||
private TimeValue flushInterval = null;
|
||||
private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff();
|
||||
|
||||
/**
|
||||
* Creates a builder of bulk processor with the client to use and the listener that will be used
|
||||
* to be notified on the completion of bulk requests.
|
||||
*/
|
||||
public Builder(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, Listener listener, ThreadPool threadPool) {
|
||||
private Builder(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, Listener listener,
|
||||
Scheduler scheduler, Runnable onClose) {
|
||||
this.consumer = consumer;
|
||||
this.listener = listener;
|
||||
this.threadPool = threadPool;
|
||||
this.scheduler = scheduler;
|
||||
this.onClose = onClose;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -155,39 +156,51 @@ public class BulkProcessor implements Closeable {
|
|||
* Builds a new bulk processor.
|
||||
*/
|
||||
public BulkProcessor build() {
|
||||
return new BulkProcessor(consumer, backoffPolicy, listener, concurrentRequests, bulkActions, bulkSize, flushInterval, threadPool);
|
||||
return new BulkProcessor(consumer, backoffPolicy, listener, concurrentRequests, bulkActions, bulkSize, flushInterval,
|
||||
scheduler, onClose);
|
||||
}
|
||||
}
|
||||
|
||||
public static Builder builder(Client client, Listener listener) {
|
||||
Objects.requireNonNull(client, "client");
|
||||
Objects.requireNonNull(listener, "listener");
|
||||
return new Builder(client::bulk, listener, client.threadPool(), () -> {});
|
||||
}
|
||||
|
||||
return new Builder(client::bulk, listener, client.threadPool());
|
||||
public static Builder builder(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, Listener listener) {
|
||||
Objects.requireNonNull(consumer, "consumer");
|
||||
Objects.requireNonNull(listener, "listener");
|
||||
final ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = Scheduler.initScheduler(Settings.EMPTY);
|
||||
return new Builder(consumer, listener,
|
||||
(delay, executor, command) -> scheduledThreadPoolExecutor.schedule(command, delay.millis(), TimeUnit.MILLISECONDS),
|
||||
() -> Scheduler.terminate(scheduledThreadPoolExecutor, 10, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
private final int bulkActions;
|
||||
private final long bulkSize;
|
||||
|
||||
private final ThreadPool.Cancellable cancellableFlushTask;
|
||||
private final Scheduler.Cancellable cancellableFlushTask;
|
||||
|
||||
private final AtomicLong executionIdGen = new AtomicLong();
|
||||
|
||||
private BulkRequest bulkRequest;
|
||||
private final BulkRequestHandler bulkRequestHandler;
|
||||
private final Scheduler scheduler;
|
||||
private final Runnable onClose;
|
||||
|
||||
private volatile boolean closed = false;
|
||||
|
||||
BulkProcessor(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BackoffPolicy backoffPolicy, Listener listener,
|
||||
int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval,
|
||||
ThreadPool threadPool) {
|
||||
Scheduler scheduler, Runnable onClose) {
|
||||
this.bulkActions = bulkActions;
|
||||
this.bulkSize = bulkSize.getBytes();
|
||||
this.bulkRequest = new BulkRequest();
|
||||
this.bulkRequestHandler = new BulkRequestHandler(consumer, backoffPolicy, listener, threadPool, concurrentRequests);
|
||||
|
||||
this.scheduler = scheduler;
|
||||
this.bulkRequestHandler = new BulkRequestHandler(consumer, backoffPolicy, listener, scheduler, concurrentRequests);
|
||||
// Start period flushing task after everything is setup
|
||||
this.cancellableFlushTask = startFlushTask(flushInterval, threadPool);
|
||||
this.cancellableFlushTask = startFlushTask(flushInterval, scheduler);
|
||||
this.onClose = onClose;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -200,6 +213,7 @@ public class BulkProcessor implements Closeable {
|
|||
} catch (InterruptedException exc) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
onClose.run();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -289,9 +303,9 @@ public class BulkProcessor implements Closeable {
|
|||
return this;
|
||||
}
|
||||
|
||||
private ThreadPool.Cancellable startFlushTask(TimeValue flushInterval, ThreadPool threadPool) {
|
||||
private Scheduler.Cancellable startFlushTask(TimeValue flushInterval, Scheduler scheduler) {
|
||||
if (flushInterval == null) {
|
||||
return new ThreadPool.Cancellable() {
|
||||
return new Scheduler.Cancellable() {
|
||||
@Override
|
||||
public void cancel() {}
|
||||
|
||||
|
@ -301,9 +315,8 @@ public class BulkProcessor implements Closeable {
|
|||
}
|
||||
};
|
||||
}
|
||||
|
||||
final Runnable flushRunnable = threadPool.getThreadContext().preserveContext(new Flush());
|
||||
return threadPool.scheduleWithFixedDelay(flushRunnable, flushInterval, ThreadPool.Names.GENERIC);
|
||||
final Runnable flushRunnable = scheduler.preserveContext(new Flush());
|
||||
return scheduler.scheduleWithFixedDelay(flushRunnable, flushInterval, ThreadPool.Names.GENERIC);
|
||||
}
|
||||
|
||||
private void executeIfNeeded() {
|
||||
|
|
|
@ -429,6 +429,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
|||
if (upsertRequest != null) {
|
||||
upsertRequest.version(version);
|
||||
upsertRequest.versionType(versionType);
|
||||
upsertRequest.setPipeline(defaultPipeline);
|
||||
}
|
||||
IndexRequest doc = updateRequest.doc();
|
||||
if (doc != null) {
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.Scheduler;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.Semaphore;
|
||||
|
@ -44,14 +44,13 @@ public final class BulkRequestHandler {
|
|||
private final int concurrentRequests;
|
||||
|
||||
BulkRequestHandler(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BackoffPolicy backoffPolicy,
|
||||
BulkProcessor.Listener listener, ThreadPool threadPool,
|
||||
int concurrentRequests) {
|
||||
BulkProcessor.Listener listener, Scheduler scheduler, int concurrentRequests) {
|
||||
assert concurrentRequests >= 0;
|
||||
this.logger = Loggers.getLogger(getClass());
|
||||
this.consumer = consumer;
|
||||
this.listener = listener;
|
||||
this.concurrentRequests = concurrentRequests;
|
||||
this.retry = new Retry(EsRejectedExecutionException.class, backoffPolicy, threadPool);
|
||||
this.retry = new Retry(EsRejectedExecutionException.class, backoffPolicy, scheduler);
|
||||
this.semaphore = new Semaphore(concurrentRequests > 0 ? concurrentRequests : 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.threadpool.Scheduler;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -41,13 +42,12 @@ import java.util.function.Predicate;
|
|||
public class Retry {
|
||||
private final Class<? extends Throwable> retryOnThrowable;
|
||||
private final BackoffPolicy backoffPolicy;
|
||||
private final ThreadPool threadPool;
|
||||
private final Scheduler scheduler;
|
||||
|
||||
|
||||
public Retry(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, ThreadPool threadPool) {
|
||||
public Retry(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Scheduler scheduler) {
|
||||
this.retryOnThrowable = retryOnThrowable;
|
||||
this.backoffPolicy = backoffPolicy;
|
||||
this.threadPool = threadPool;
|
||||
this.scheduler = scheduler;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -58,8 +58,9 @@ public class Retry {
|
|||
* @param listener A listener that is invoked when the bulk request finishes or completes with an exception. The listener is not
|
||||
* @param settings settings
|
||||
*/
|
||||
public void withBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BulkRequest bulkRequest, ActionListener<BulkResponse> listener, Settings settings) {
|
||||
RetryHandler r = new RetryHandler(retryOnThrowable, backoffPolicy, consumer, listener, settings, threadPool);
|
||||
public void withBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BulkRequest bulkRequest,
|
||||
ActionListener<BulkResponse> listener, Settings settings) {
|
||||
RetryHandler r = new RetryHandler(retryOnThrowable, backoffPolicy, consumer, listener, settings, scheduler);
|
||||
r.execute(bulkRequest);
|
||||
}
|
||||
|
||||
|
@ -72,7 +73,8 @@ public class Retry {
|
|||
* @param settings settings
|
||||
* @return a future representing the bulk response returned by the client.
|
||||
*/
|
||||
public PlainActionFuture<BulkResponse> withBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BulkRequest bulkRequest, Settings settings) {
|
||||
public PlainActionFuture<BulkResponse> withBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer,
|
||||
BulkRequest bulkRequest, Settings settings) {
|
||||
PlainActionFuture<BulkResponse> future = PlainActionFuture.newFuture();
|
||||
withBackoff(consumer, bulkRequest, future, settings);
|
||||
return future;
|
||||
|
@ -80,7 +82,7 @@ public class Retry {
|
|||
|
||||
static class RetryHandler implements ActionListener<BulkResponse> {
|
||||
private final Logger logger;
|
||||
private final ThreadPool threadPool;
|
||||
private final Scheduler scheduler;
|
||||
private final BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer;
|
||||
private final ActionListener<BulkResponse> listener;
|
||||
private final Iterator<TimeValue> backoff;
|
||||
|
@ -95,13 +97,13 @@ public class Retry {
|
|||
|
||||
RetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy,
|
||||
BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, ActionListener<BulkResponse> listener,
|
||||
Settings settings, ThreadPool threadPool) {
|
||||
Settings settings, Scheduler scheduler) {
|
||||
this.retryOnThrowable = retryOnThrowable;
|
||||
this.backoff = backoffPolicy.iterator();
|
||||
this.consumer = consumer;
|
||||
this.listener = listener;
|
||||
this.logger = Loggers.getLogger(getClass(), settings);
|
||||
this.threadPool = threadPool;
|
||||
this.scheduler = scheduler;
|
||||
// in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood
|
||||
this.startTimestampNanos = System.nanoTime();
|
||||
}
|
||||
|
@ -136,8 +138,8 @@ public class Retry {
|
|||
assert backoff.hasNext();
|
||||
TimeValue next = backoff.next();
|
||||
logger.trace("Retry of bulk request scheduled in {} ms.", next.millis());
|
||||
Runnable command = threadPool.getThreadContext().preserveContext(() -> this.execute(bulkRequestForRetry));
|
||||
scheduledRequestFuture = threadPool.schedule(next, ThreadPool.Names.SAME, command);
|
||||
Runnable command = scheduler.preserveContext(() -> this.execute(bulkRequestForRetry));
|
||||
scheduledRequestFuture = scheduler.schedule(next, ThreadPool.Names.SAME, command);
|
||||
}
|
||||
|
||||
private BulkRequest createBulkRequestForRetry(BulkResponse bulkItemResponses) {
|
||||
|
|
|
@ -77,7 +77,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
ActionListener<SearchResponse> listener, GroupShardsIterator<SearchShardIterator> shardsIts,
|
||||
TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion,
|
||||
SearchTask task, SearchPhaseResults<Result> resultConsumer, int maxConcurrentShardRequests) {
|
||||
super(name, request, shardsIts, logger, maxConcurrentShardRequests);
|
||||
super(name, request, shardsIts, logger, maxConcurrentShardRequests, executor);
|
||||
this.timeProvider = timeProvider;
|
||||
this.logger = logger;
|
||||
this.searchTransportService = searchTransportService;
|
||||
|
|
|
@ -88,10 +88,9 @@ final class ExpandSearchPhase extends SearchPhase {
|
|||
}
|
||||
for (InnerHitBuilder innerHitBuilder : innerHitBuilders) {
|
||||
SearchSourceBuilder sourceBuilder = buildExpandSearchSourceBuilder(innerHitBuilder)
|
||||
.query(groupQuery);
|
||||
SearchRequest groupRequest = new SearchRequest(searchRequest.indices())
|
||||
.types(searchRequest.types())
|
||||
.source(sourceBuilder);
|
||||
.query(groupQuery)
|
||||
.postFilter(searchRequest.source().postFilter());
|
||||
SearchRequest groupRequest = buildExpandSearchRequest(searchRequest, sourceBuilder);
|
||||
multiRequest.add(groupRequest);
|
||||
}
|
||||
}
|
||||
|
@ -120,6 +119,21 @@ final class ExpandSearchPhase extends SearchPhase {
|
|||
}
|
||||
}
|
||||
|
||||
private SearchRequest buildExpandSearchRequest(SearchRequest orig, SearchSourceBuilder sourceBuilder) {
|
||||
SearchRequest groupRequest = new SearchRequest(orig.indices())
|
||||
.types(orig.types())
|
||||
.source(sourceBuilder)
|
||||
.indicesOptions(orig.indicesOptions())
|
||||
.requestCache(orig.requestCache())
|
||||
.preference(orig.preference())
|
||||
.routing(orig.routing())
|
||||
.searchType(orig.searchType());
|
||||
if (orig.isMaxConcurrentShardRequestsSet()) {
|
||||
groupRequest.setMaxConcurrentShardRequests(orig.getMaxConcurrentShardRequests());
|
||||
}
|
||||
return groupRequest;
|
||||
}
|
||||
|
||||
private SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options) {
|
||||
SearchSourceBuilder groupSource = new SearchSourceBuilder();
|
||||
groupSource.from(options.getFrom());
|
||||
|
|
|
@ -26,12 +26,15 @@ import org.elasticsearch.action.support.TransportActions;
|
|||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
|
@ -45,18 +48,30 @@ import java.util.stream.Stream;
|
|||
*/
|
||||
abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends SearchPhase {
|
||||
private final SearchRequest request;
|
||||
private final GroupShardsIterator<SearchShardIterator> toSkipShardsIts;
|
||||
private final GroupShardsIterator<SearchShardIterator> shardsIts;
|
||||
private final Logger logger;
|
||||
private final int expectedTotalOps;
|
||||
private final AtomicInteger totalOps = new AtomicInteger();
|
||||
private final AtomicInteger shardExecutionIndex = new AtomicInteger(0);
|
||||
private final int maxConcurrentShardRequests;
|
||||
private final Executor executor;
|
||||
|
||||
InitialSearchPhase(String name, SearchRequest request, GroupShardsIterator<SearchShardIterator> shardsIts, Logger logger,
|
||||
int maxConcurrentShardRequests) {
|
||||
int maxConcurrentShardRequests, Executor executor) {
|
||||
super(name);
|
||||
this.request = request;
|
||||
this.shardsIts = shardsIts;
|
||||
final List<SearchShardIterator> toSkipIterators = new ArrayList<>();
|
||||
final List<SearchShardIterator> iterators = new ArrayList<>();
|
||||
for (final SearchShardIterator iterator : shardsIts) {
|
||||
if (iterator.skip()) {
|
||||
toSkipIterators.add(iterator);
|
||||
} else {
|
||||
iterators.add(iterator);
|
||||
}
|
||||
}
|
||||
this.toSkipShardsIts = new GroupShardsIterator<>(toSkipIterators);
|
||||
this.shardsIts = new GroupShardsIterator<>(iterators);
|
||||
this.logger = logger;
|
||||
// we need to add 1 for non active partition, since we count it in the total. This means for each shard in the iterator we sum up
|
||||
// it's number of active shards but use 1 as the default if no replica of a shard is active at this point.
|
||||
|
@ -64,6 +79,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
// we process hence we add one for the non active partition here.
|
||||
this.expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
|
||||
this.maxConcurrentShardRequests = Math.min(maxConcurrentShardRequests, shardsIts.size());
|
||||
this.executor = executor;
|
||||
}
|
||||
|
||||
private void onShardFailure(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId,
|
||||
|
@ -71,19 +87,19 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
// we always add the shard failure for a specific shard instance
|
||||
// we do make sure to clean it on a successful response from a shard
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId(), shardIt.getClusterAlias(),
|
||||
shardIt.getOriginalIndices());
|
||||
shardIt.getOriginalIndices());
|
||||
onShardFailure(shardIndex, shardTarget, e);
|
||||
|
||||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}]",
|
||||
shard != null ? shard.shortSummary() :
|
||||
shardIt.shardId(),
|
||||
request),
|
||||
e);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}]",
|
||||
shard != null ? shard.shortSummary() :
|
||||
shardIt.shardId(),
|
||||
request),
|
||||
e);
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e);
|
||||
}
|
||||
|
@ -94,32 +110,27 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
final boolean lastShard = nextShard == null;
|
||||
// trace log this exception
|
||||
logger.trace(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] lastShard [{}]",
|
||||
shard != null ? shard.shortSummary() : shardIt.shardId(),
|
||||
request,
|
||||
lastShard),
|
||||
e);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] lastShard [{}]",
|
||||
shard != null ? shard.shortSummary() : shardIt.shardId(),
|
||||
request,
|
||||
lastShard),
|
||||
e);
|
||||
if (!lastShard) {
|
||||
try {
|
||||
performPhaseOnShard(shardIndex, shardIt, nextShard);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
onShardFailure(shardIndex, shard, shard.currentNodeId(), shardIt, inner);
|
||||
}
|
||||
performPhaseOnShard(shardIndex, shardIt, nextShard);
|
||||
} else {
|
||||
maybeExecuteNext(); // move to the next execution if needed
|
||||
// no more shards active, add a failure
|
||||
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
||||
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] lastShard [{}]",
|
||||
shard != null ? shard.shortSummary() :
|
||||
shardIt.shardId(),
|
||||
request,
|
||||
lastShard),
|
||||
e);
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] lastShard [{}]",
|
||||
shard != null ? shard.shortSummary() :
|
||||
shardIt.shardId(),
|
||||
request,
|
||||
lastShard),
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -128,14 +139,18 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
|
||||
@Override
|
||||
public final void run() throws IOException {
|
||||
boolean success = shardExecutionIndex.compareAndSet(0, maxConcurrentShardRequests);
|
||||
assert success;
|
||||
for (int i = 0; i < maxConcurrentShardRequests; i++) {
|
||||
SearchShardIterator shardRoutings = shardsIts.get(i);
|
||||
if (shardRoutings.skip()) {
|
||||
skipShard(shardRoutings);
|
||||
} else {
|
||||
performPhaseOnShard(i, shardRoutings, shardRoutings.nextOrNull());
|
||||
for (final SearchShardIterator iterator : toSkipShardsIts) {
|
||||
assert iterator.skip();
|
||||
skipShard(iterator);
|
||||
}
|
||||
if (shardsIts.size() > 0) {
|
||||
int maxConcurrentShardRequests = Math.min(this.maxConcurrentShardRequests, shardsIts.size());
|
||||
final boolean success = shardExecutionIndex.compareAndSet(0, maxConcurrentShardRequests);
|
||||
assert success;
|
||||
for (int index = 0; index < maxConcurrentShardRequests; index++) {
|
||||
final SearchShardIterator shardRoutings = shardsIts.get(index);
|
||||
assert shardRoutings.skip() == false;
|
||||
performPhaseOnShard(index, shardRoutings, shardRoutings.nextOrNull());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -143,38 +158,71 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
private void maybeExecuteNext() {
|
||||
final int index = shardExecutionIndex.getAndIncrement();
|
||||
if (index < shardsIts.size()) {
|
||||
SearchShardIterator shardRoutings = shardsIts.get(index);
|
||||
if (shardRoutings.skip()) {
|
||||
skipShard(shardRoutings);
|
||||
} else {
|
||||
performPhaseOnShard(index, shardRoutings, shardRoutings.nextOrNull());
|
||||
}
|
||||
final SearchShardIterator shardRoutings = shardsIts.get(index);
|
||||
performPhaseOnShard(index, shardRoutings, shardRoutings.nextOrNull());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void maybeFork(final Thread thread, final Runnable runnable) {
|
||||
if (thread == Thread.currentThread()) {
|
||||
fork(runnable);
|
||||
} else {
|
||||
runnable.run();
|
||||
}
|
||||
}
|
||||
|
||||
private void fork(final Runnable runnable) {
|
||||
executor.execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
runnable.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isForceExecution() {
|
||||
// we can not allow a stuffed queue to reject execution here
|
||||
return true;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void performPhaseOnShard(final int shardIndex, final SearchShardIterator shardIt, final ShardRouting shard) {
|
||||
/*
|
||||
* We capture the thread that this phase is starting on. When we are called back after executing the phase, we are either on the
|
||||
* same thread (because we never went async, or the same thread was selected from the thread pool) or a different thread. If we
|
||||
* continue on the same thread in the case that we never went async and this happens repeatedly we will end up recursing deeply and
|
||||
* could stack overflow. To prevent this, we fork if we are called back on the same thread that execution started on and otherwise
|
||||
* we can continue (cf. InitialSearchPhase#maybeFork).
|
||||
*/
|
||||
final Thread thread = Thread.currentThread();
|
||||
if (shard == null) {
|
||||
onShardFailure(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
fork(() -> onShardFailure(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId())));
|
||||
} else {
|
||||
try {
|
||||
executePhaseOnShard(shardIt, shard, new SearchActionListener<FirstResult>(new SearchShardTarget(shard.currentNodeId(),
|
||||
shardIt.shardId(), shardIt.getClusterAlias(), shardIt.getOriginalIndices()), shardIndex) {
|
||||
@Override
|
||||
public void innerOnResponse(FirstResult result) {
|
||||
onShardResult(result, shardIt);
|
||||
maybeFork(thread, () -> onShardResult(result, shardIt));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
onShardFailure(shardIndex, shard, shard.currentNodeId(), shardIt, t);
|
||||
maybeFork(thread, () -> onShardFailure(shardIndex, shard, shard.currentNodeId(), shardIt, t));
|
||||
}
|
||||
});
|
||||
} catch (ConnectTransportException | IllegalArgumentException ex) {
|
||||
// we are getting the connection early here so we might run into nodes that are not connected. in that case we move on to
|
||||
// the next shard. previously when using discovery nodes here we had a special case for null when a node was not connected
|
||||
// at all which is not not needed anymore.
|
||||
onShardFailure(shardIndex, shard, shard.currentNodeId(), shardIt, ex);
|
||||
} catch (final Exception e) {
|
||||
/*
|
||||
* It is possible to run into connection exceptions here because we are getting the connection early and might run in to
|
||||
* nodes that are not connected. In this case, on shard failure will move us to the next shard copy.
|
||||
*/
|
||||
fork(() -> onShardFailure(shardIndex, shard, shard.currentNodeId(), shardIt, e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -204,7 +252,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
} else if (xTotalOps > expectedTotalOps) {
|
||||
throw new AssertionError("unexpected higher total ops [" + xTotalOps + "] compared to expected ["
|
||||
+ expectedTotalOps + "]");
|
||||
} else {
|
||||
} else if (shardsIt.skip() == false) {
|
||||
maybeExecuteNext();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.BiFunction;
|
||||
|
@ -94,6 +95,10 @@ public class SearchTransportService extends AbstractComponent {
|
|||
this.responseWrapper = responseWrapper;
|
||||
}
|
||||
|
||||
public Map<String, Long> getClientConnections() {
|
||||
return Collections.unmodifiableMap(clientConnections);
|
||||
}
|
||||
|
||||
public void sendFreeContext(Transport.Connection connection, final long contextId, OriginalIndices originalIndices) {
|
||||
transportService.sendRequest(connection, FREE_CONTEXT_ACTION_NAME, new SearchFreeContextRequest(originalIndices, contextId),
|
||||
TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(new ActionListener<SearchFreeContextResponse>() {
|
||||
|
|
|
@ -131,7 +131,8 @@ public class ShardSearchFailure implements ShardOperationFailedException {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "shard [" + (shardTarget == null ? "_na" : shardTarget) + "], reason [" + reason + "], cause [" + (cause == null ? "_na" : ExceptionsHelper.stackTrace(cause)) + "]";
|
||||
return "shard [" + (shardTarget == null ? "_na" : shardTarget) + "], reason [" + reason + "], cause [" +
|
||||
(cause == null ? "_na" : ExceptionsHelper.stackTrace(cause)) + "]";
|
||||
}
|
||||
|
||||
public static ShardSearchFailure readShardSearchFailure(StreamInput in) throws IOException {
|
||||
|
@ -210,9 +211,12 @@ public class ShardSearchFailure implements ShardOperationFailedException {
|
|||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
return new ShardSearchFailure(exception,
|
||||
new SearchShardTarget(nodeId,
|
||||
new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), shardId), null, OriginalIndices.NONE));
|
||||
SearchShardTarget searchShardTarget = null;
|
||||
if (nodeId != null) {
|
||||
searchShardTarget = new SearchShardTarget(nodeId,
|
||||
new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), shardId), null, OriginalIndices.NONE);
|
||||
}
|
||||
return new ShardSearchFailure(exception, searchShardTarget);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
19
core/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java
Normal file → Executable file
19
core/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java
Normal file → Executable file
|
@ -19,17 +19,32 @@
|
|||
package org.elasticsearch.action.support.master;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
|
||||
/**
|
||||
* Abstract class that allows to mark action responses that support acknowledgements.
|
||||
* Facilitates consistency across different api.
|
||||
*/
|
||||
public abstract class AcknowledgedResponse extends ActionResponse {
|
||||
|
||||
private static final String ACKNOWLEDGED = "acknowledged";
|
||||
private static final ParseField ACKNOWLEDGED_PARSER = new ParseField(ACKNOWLEDGED);
|
||||
|
||||
protected static <T extends AcknowledgedResponse> void declareAcknowledgedField(ConstructingObjectParser<T, Void> PARSER) {
|
||||
PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), ACKNOWLEDGED_PARSER,
|
||||
ObjectParser.ValueType.BOOLEAN);
|
||||
}
|
||||
|
||||
private boolean acknowledged;
|
||||
|
||||
protected AcknowledgedResponse() {
|
||||
|
@ -61,4 +76,8 @@ public abstract class AcknowledgedResponse extends ActionResponse {
|
|||
protected void writeAcknowledged(StreamOutput out) throws IOException {
|
||||
out.writeBoolean(acknowledged);
|
||||
}
|
||||
|
||||
protected void addAcknowledgedField(XContentBuilder builder) throws IOException {
|
||||
builder.field(ACKNOWLEDGED, isAcknowledged());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.bootstrap;
|
|||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugins.Platforms;
|
||||
import org.elasticsearch.plugins.PluginInfo;
|
||||
|
@ -73,6 +74,9 @@ final class Spawner implements Closeable {
|
|||
*/
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(pluginsFile)) {
|
||||
for (final Path plugin : stream) {
|
||||
if (FileSystemUtils.isDesktopServicesStore(plugin)) {
|
||||
continue;
|
||||
}
|
||||
final PluginInfo info = PluginInfo.readFromProperties(plugin);
|
||||
final Path spawnPath = Platforms.nativeControllerPath(plugin);
|
||||
if (!Files.isRegularFile(spawnPath)) {
|
||||
|
|
|
@ -199,7 +199,6 @@ final class SystemCallFilter {
|
|||
static final int SECCOMP_RET_ALLOW = 0x7FFF0000;
|
||||
|
||||
// some errno constants for error checking/handling
|
||||
static final int EPERM = 0x01;
|
||||
static final int EACCES = 0x0D;
|
||||
static final int EFAULT = 0x0E;
|
||||
static final int EINVAL = 0x16;
|
||||
|
@ -272,27 +271,6 @@ final class SystemCallFilter {
|
|||
"with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
|
||||
}
|
||||
|
||||
// pure paranoia:
|
||||
|
||||
// check that unimplemented syscalls actually return ENOSYS
|
||||
// you never know (e.g. https://code.google.com/p/chromium/issues/detail?id=439795)
|
||||
if (linux_syscall(999) >= 0) {
|
||||
throw new UnsupportedOperationException("seccomp unavailable: your kernel is buggy and you should upgrade");
|
||||
}
|
||||
|
||||
switch (Native.getLastError()) {
|
||||
case ENOSYS:
|
||||
break; // ok
|
||||
case EPERM:
|
||||
// NOT ok, but likely a docker container
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("syscall(BOGUS) bogusly gets EPERM instead of ENOSYS");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException("seccomp unavailable: your kernel is buggy and you should upgrade");
|
||||
}
|
||||
|
||||
// try to check system calls really are who they claim
|
||||
// you never know (e.g. https://chromium.googlesource.com/chromium/src.git/+/master/sandbox/linux/seccomp-bpf/sandbox_bpf.cc#57)
|
||||
final int bogusArg = 0xf7a46a5c;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.common.io;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
@ -65,6 +66,16 @@ public final class FileSystemUtils {
|
|||
return fileName.toString().startsWith(".");
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether the file denoted by the given path is a desktop services store created by Finder on macOS.
|
||||
*
|
||||
* @param path the path
|
||||
* @return true if the current system is macOS and the specified file appears to be a desktop services store file
|
||||
*/
|
||||
public static boolean isDesktopServicesStore(final Path path) {
|
||||
return Constants.MAC_OS_X && Files.isRegularFile(path) && ".DS_Store".equals(path.getFileName().toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends the path to the given base and strips N elements off the path if strip is > 0.
|
||||
*/
|
||||
|
|
|
@ -23,19 +23,19 @@ package org.elasticsearch.common.util.concurrent;
|
|||
* A class used to wrap a {@code Runnable} that allows capturing the time of the task since creation
|
||||
* through execution as well as only execution time.
|
||||
*/
|
||||
class TimedRunnable implements Runnable {
|
||||
class TimedRunnable extends AbstractRunnable {
|
||||
private final Runnable original;
|
||||
private final long creationTimeNanos;
|
||||
private long startTimeNanos;
|
||||
private long finishTimeNanos = -1;
|
||||
|
||||
TimedRunnable(Runnable original) {
|
||||
TimedRunnable(final Runnable original) {
|
||||
this.original = original;
|
||||
this.creationTimeNanos = System.nanoTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
public void doRun() {
|
||||
try {
|
||||
startTimeNanos = System.nanoTime();
|
||||
original.run();
|
||||
|
@ -44,6 +44,32 @@ class TimedRunnable implements Runnable {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRejection(final Exception e) {
|
||||
if (original instanceof AbstractRunnable) {
|
||||
((AbstractRunnable) original).onRejection(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAfter() {
|
||||
if (original instanceof AbstractRunnable) {
|
||||
((AbstractRunnable) original).onAfter();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(final Exception e) {
|
||||
if (original instanceof AbstractRunnable) {
|
||||
((AbstractRunnable) original).onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isForceExecution() {
|
||||
return original instanceof AbstractRunnable && ((AbstractRunnable) original).isForceExecution();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the time since this task was created until it finished running.
|
||||
* If the task is still running or has not yet been run, returns -1.
|
||||
|
@ -67,4 +93,5 @@ class TimedRunnable implements Runnable {
|
|||
}
|
||||
return finishTimeNanos - startTimeNanos;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -453,7 +453,7 @@ public final class ConstructingObjectParser<Value, Context> extends AbstractObje
|
|||
* use of ConstructingObjectParser. You should be using ObjectParser instead. Since this is more of a programmer error and the
|
||||
* parser ought to still work we just assert this.
|
||||
*/
|
||||
assert false == constructorArgInfos.isEmpty() : "[" + objectParser.getName() + "] must configure at least on constructor "
|
||||
assert false == constructorArgInfos.isEmpty() : "[" + objectParser.getName() + "] must configure at least one constructor "
|
||||
+ "argument. If it doesn't have any it should use ObjectParser instead of ConstructingObjectParser. This is a bug "
|
||||
+ "in the parser declaration.";
|
||||
// All missing constructor arguments were optional. Just build the target and return it.
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.discovery;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -26,33 +27,48 @@ import org.elasticsearch.common.io.stream.Writeable;
|
|||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.discovery.zen.PendingClusterStateStats;
|
||||
import org.elasticsearch.discovery.zen.PublishClusterStateStats;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class DiscoveryStats implements Writeable, ToXContentFragment {
|
||||
|
||||
@Nullable
|
||||
private final PendingClusterStateStats queueStats;
|
||||
private final PublishClusterStateStats publishStats;
|
||||
|
||||
public DiscoveryStats(PendingClusterStateStats queueStats) {
|
||||
public DiscoveryStats(PendingClusterStateStats queueStats, PublishClusterStateStats publishStats) {
|
||||
this.queueStats = queueStats;
|
||||
this.publishStats = publishStats;
|
||||
}
|
||||
|
||||
public DiscoveryStats(StreamInput in) throws IOException {
|
||||
queueStats = in.readOptionalWriteable(PendingClusterStateStats::new);
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_6_1_0)) {
|
||||
publishStats = in.readOptionalWriteable(PublishClusterStateStats::new);
|
||||
} else {
|
||||
publishStats = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeOptionalWriteable(queueStats);
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_6_1_0)) {
|
||||
out.writeOptionalWriteable(publishStats);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.DISCOVERY);
|
||||
if (queueStats != null ){
|
||||
if (queueStats != null) {
|
||||
queueStats.toXContent(builder, params);
|
||||
}
|
||||
if (publishStats != null) {
|
||||
publishStats.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
@ -64,4 +80,8 @@ public class DiscoveryStats implements Writeable, ToXContentFragment {
|
|||
public PendingClusterStateStats getQueueStats() {
|
||||
return queueStats;
|
||||
}
|
||||
|
||||
public PublishClusterStateStats getPublishStats() {
|
||||
return publishStats;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.discovery.single;
|
|||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
|
@ -34,6 +33,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryStats;
|
||||
import org.elasticsearch.discovery.zen.PendingClusterStateStats;
|
||||
import org.elasticsearch.discovery.zen.PublishClusterStateStats;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -94,7 +94,7 @@ public class SingleNodeDiscovery extends AbstractLifecycleComponent implements D
|
|||
|
||||
@Override
|
||||
public DiscoveryStats stats() {
|
||||
return new DiscoveryStats((PendingClusterStateStats) null);
|
||||
return new DiscoveryStats(null, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -65,6 +65,7 @@ import java.util.Set;
|
|||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class PublishClusterStateAction extends AbstractComponent {
|
||||
|
||||
|
@ -90,6 +91,10 @@ public class PublishClusterStateAction extends AbstractComponent {
|
|||
private final IncomingClusterStateListener incomingClusterStateListener;
|
||||
private final DiscoverySettings discoverySettings;
|
||||
|
||||
private final AtomicLong fullClusterStateReceivedCount = new AtomicLong();
|
||||
private final AtomicLong incompatibleClusterStateDiffReceivedCount = new AtomicLong();
|
||||
private final AtomicLong compatibleClusterStateDiffReceivedCount = new AtomicLong();
|
||||
|
||||
public PublishClusterStateAction(
|
||||
Settings settings,
|
||||
TransportService transportService,
|
||||
|
@ -380,11 +385,13 @@ public class PublishClusterStateAction extends AbstractComponent {
|
|||
// If true we received full cluster state - otherwise diffs
|
||||
if (in.readBoolean()) {
|
||||
incomingState = ClusterState.readFrom(in, transportService.getLocalNode());
|
||||
fullClusterStateReceivedCount.incrementAndGet();
|
||||
logger.debug("received full cluster state version [{}] with size [{}]", incomingState.version(),
|
||||
request.bytes().length());
|
||||
} else if (lastSeenClusterState != null) {
|
||||
Diff<ClusterState> diff = ClusterState.readDiffFrom(in, lastSeenClusterState.nodes().getLocalNode());
|
||||
incomingState = diff.apply(lastSeenClusterState);
|
||||
compatibleClusterStateDiffReceivedCount.incrementAndGet();
|
||||
logger.debug("received diff cluster state version [{}] with uuid [{}], diff size [{}]",
|
||||
incomingState.version(), incomingState.stateUUID(), request.bytes().length());
|
||||
} else {
|
||||
|
@ -394,6 +401,9 @@ public class PublishClusterStateAction extends AbstractComponent {
|
|||
incomingClusterStateListener.onIncomingClusterState(incomingState);
|
||||
lastSeenClusterState = incomingState;
|
||||
}
|
||||
} catch (IncompatibleClusterStateVersionException e) {
|
||||
incompatibleClusterStateDiffReceivedCount.incrementAndGet();
|
||||
throw e;
|
||||
} finally {
|
||||
IOUtils.close(in);
|
||||
}
|
||||
|
@ -636,4 +646,11 @@ public class PublishClusterStateAction extends AbstractComponent {
|
|||
publishingTimedOut.set(isTimedOut);
|
||||
}
|
||||
}
|
||||
|
||||
public PublishClusterStateStats stats() {
|
||||
return new PublishClusterStateStats(
|
||||
fullClusterStateReceivedCount.get(),
|
||||
incompatibleClusterStateDiffReceivedCount.get(),
|
||||
compatibleClusterStateDiffReceivedCount.get());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.zen;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Class encapsulating stats about the PublishClusterStateAction
|
||||
*/
|
||||
public class PublishClusterStateStats implements Writeable, ToXContentObject {
|
||||
|
||||
private final long fullClusterStateReceivedCount;
|
||||
private final long incompatibleClusterStateDiffReceivedCount;
|
||||
private final long compatibleClusterStateDiffReceivedCount;
|
||||
|
||||
/**
|
||||
* @param fullClusterStateReceivedCount the number of times this node has received a full copy of the cluster state from the master.
|
||||
* @param incompatibleClusterStateDiffReceivedCount the number of times this node has received a cluster-state diff from the master.
|
||||
* @param compatibleClusterStateDiffReceivedCount the number of times that received cluster-state diffs were compatible with
|
||||
*/
|
||||
public PublishClusterStateStats(long fullClusterStateReceivedCount,
|
||||
long incompatibleClusterStateDiffReceivedCount,
|
||||
long compatibleClusterStateDiffReceivedCount) {
|
||||
this.fullClusterStateReceivedCount = fullClusterStateReceivedCount;
|
||||
this.incompatibleClusterStateDiffReceivedCount = incompatibleClusterStateDiffReceivedCount;
|
||||
this.compatibleClusterStateDiffReceivedCount = compatibleClusterStateDiffReceivedCount;
|
||||
}
|
||||
|
||||
public PublishClusterStateStats(StreamInput in) throws IOException {
|
||||
fullClusterStateReceivedCount = in.readVLong();
|
||||
incompatibleClusterStateDiffReceivedCount = in.readVLong();
|
||||
compatibleClusterStateDiffReceivedCount = in.readVLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(fullClusterStateReceivedCount);
|
||||
out.writeVLong(incompatibleClusterStateDiffReceivedCount);
|
||||
out.writeVLong(compatibleClusterStateDiffReceivedCount);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject("published_cluster_states");
|
||||
{
|
||||
builder.field("full_states", fullClusterStateReceivedCount);
|
||||
builder.field("incompatible_diffs", incompatibleClusterStateDiffReceivedCount);
|
||||
builder.field("compatible_diffs", compatibleClusterStateDiffReceivedCount);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
long getFullClusterStateReceivedCount() { return fullClusterStateReceivedCount; }
|
||||
|
||||
long getIncompatibleClusterStateDiffReceivedCount() { return incompatibleClusterStateDiffReceivedCount; }
|
||||
|
||||
long getCompatibleClusterStateDiffReceivedCount() { return compatibleClusterStateDiffReceivedCount; }
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PublishClusterStateStats(full=" + fullClusterStateReceivedCount
|
||||
+ ", incompatible=" + incompatibleClusterStateDiffReceivedCount
|
||||
+ ", compatible=" + compatibleClusterStateDiffReceivedCount
|
||||
+ ")";
|
||||
}
|
||||
}
|
|
@ -412,8 +412,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
|
||||
@Override
|
||||
public DiscoveryStats stats() {
|
||||
PendingClusterStateStats queueStats = pendingStatesQueue.stats();
|
||||
return new DiscoveryStats(queueStats);
|
||||
return new DiscoveryStats(pendingStatesQueue.stats(), publishClusterState.stats());
|
||||
}
|
||||
|
||||
public DiscoverySettings getDiscoverySettings() {
|
||||
|
|
|
@ -845,6 +845,29 @@ public final class NodeEnvironment implements Closeable {
|
|||
return shardIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find all the shards for this index, returning a map of the {@code NodePath} to the number of shards on that path
|
||||
* @param index the index by which to filter shards
|
||||
* @return a map of NodePath to count of the shards for the index on that path
|
||||
* @throws IOException if an IOException occurs
|
||||
*/
|
||||
public Map<NodePath, Long> shardCountPerPath(final Index index) throws IOException {
|
||||
assert index != null;
|
||||
if (nodePaths == null || locks == null) {
|
||||
throw new IllegalStateException("node is not configured to store local location");
|
||||
}
|
||||
assertEnvIsLocked();
|
||||
final Map<NodePath, Long> shardCountPerPath = new HashMap<>();
|
||||
final String indexUniquePathId = index.getUUID();
|
||||
for (final NodePath nodePath : nodePaths) {
|
||||
Path indexLocation = nodePath.indicesPath.resolve(indexUniquePathId);
|
||||
if (Files.isDirectory(indexLocation)) {
|
||||
shardCountPerPath.put(nodePath, (long) findAllShardsForIndex(indexLocation, index).size());
|
||||
}
|
||||
}
|
||||
return shardCountPerPath;
|
||||
}
|
||||
|
||||
private static Set<ShardId> findAllShardsForIndex(Path indexPath, Index index) throws IOException {
|
||||
assert indexPath.getFileName().toString().equals(index.getUUID());
|
||||
Set<ShardId> shardIds = new HashSet<>();
|
||||
|
|
|
@ -708,12 +708,11 @@ public abstract class Engine implements Closeable {
|
|||
protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) {
|
||||
ensureOpen();
|
||||
Map<String, Segment> segments = new HashMap<>();
|
||||
|
||||
// first, go over and compute the search ones...
|
||||
Searcher searcher = acquireSearcher("segments");
|
||||
try {
|
||||
try (Searcher searcher = acquireSearcher("segments")){
|
||||
for (LeafReaderContext reader : searcher.reader().leaves()) {
|
||||
SegmentCommitInfo info = segmentReader(reader.reader()).getSegmentInfo();
|
||||
final SegmentReader segmentReader = segmentReader(reader.reader());
|
||||
SegmentCommitInfo info = segmentReader.getSegmentInfo();
|
||||
assert !segments.containsKey(info.info.name);
|
||||
Segment segment = new Segment(info.info.name);
|
||||
segment.search = true;
|
||||
|
@ -726,7 +725,6 @@ public abstract class Engine implements Closeable {
|
|||
} catch (IOException e) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
|
||||
}
|
||||
final SegmentReader segmentReader = segmentReader(reader.reader());
|
||||
segment.memoryInBytes = segmentReader.ramBytesUsed();
|
||||
segment.segmentSort = info.info.getIndexSort();
|
||||
if (verbose) {
|
||||
|
@ -736,8 +734,6 @@ public abstract class Engine implements Closeable {
|
|||
// TODO: add more fine grained mem stats values to per segment info here
|
||||
segments.put(info.info.name, segment);
|
||||
}
|
||||
} finally {
|
||||
searcher.close();
|
||||
}
|
||||
|
||||
// now, correlate or add the committed ones...
|
||||
|
|
|
@ -82,7 +82,7 @@ public class GetResult implements Streamable, Iterable<DocumentField>, ToXConten
|
|||
}
|
||||
|
||||
/**
|
||||
* Does the document exists.
|
||||
* Does the document exist.
|
||||
*/
|
||||
public boolean isExists() {
|
||||
return exists;
|
||||
|
|
|
@ -43,6 +43,8 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.lucene.search.Queries.newLenientFieldQuery;
|
||||
|
||||
public class MultiMatchQuery extends MatchQuery {
|
||||
|
||||
private Float groupTieBreaker = null;
|
||||
|
@ -204,7 +206,7 @@ public class MultiMatchQuery extends MatchQuery {
|
|||
for (int i = 0; i < terms.length; i++) {
|
||||
values[i] = terms[i].bytes();
|
||||
}
|
||||
return MultiMatchQuery.blendTerms(context, values, commonTermsCutoff, tieBreaker, blendedFields);
|
||||
return MultiMatchQuery.blendTerms(context, values, commonTermsCutoff, tieBreaker, lenient, blendedFields);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -212,7 +214,7 @@ public class MultiMatchQuery extends MatchQuery {
|
|||
if (blendedFields == null) {
|
||||
return super.blendTerm(term, fieldType);
|
||||
}
|
||||
return MultiMatchQuery.blendTerm(context, term.bytes(), commonTermsCutoff, tieBreaker, blendedFields);
|
||||
return MultiMatchQuery.blendTerm(context, term.bytes(), commonTermsCutoff, tieBreaker, lenient, blendedFields);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -227,12 +229,12 @@ public class MultiMatchQuery extends MatchQuery {
|
|||
}
|
||||
|
||||
static Query blendTerm(QueryShardContext context, BytesRef value, Float commonTermsCutoff, float tieBreaker,
|
||||
FieldAndFieldType... blendedFields) {
|
||||
return blendTerms(context, new BytesRef[] {value}, commonTermsCutoff, tieBreaker, blendedFields);
|
||||
boolean lenient, FieldAndFieldType... blendedFields) {
|
||||
return blendTerms(context, new BytesRef[] {value}, commonTermsCutoff, tieBreaker, lenient, blendedFields);
|
||||
}
|
||||
|
||||
static Query blendTerms(QueryShardContext context, BytesRef[] values, Float commonTermsCutoff, float tieBreaker,
|
||||
FieldAndFieldType... blendedFields) {
|
||||
boolean lenient, FieldAndFieldType... blendedFields) {
|
||||
List<Query> queries = new ArrayList<>();
|
||||
Term[] terms = new Term[blendedFields.length * values.length];
|
||||
float[] blendedBoost = new float[blendedFields.length * values.length];
|
||||
|
@ -242,19 +244,12 @@ public class MultiMatchQuery extends MatchQuery {
|
|||
Query query;
|
||||
try {
|
||||
query = ft.fieldType.termQuery(term, context);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// the query expects a certain class of values such as numbers
|
||||
// of ip addresses and the value can't be parsed, so ignore this
|
||||
// field
|
||||
continue;
|
||||
} catch (ElasticsearchParseException parseException) {
|
||||
// date fields throw an ElasticsearchParseException with the
|
||||
// underlying IAE as the cause, ignore this field if that is
|
||||
// the case
|
||||
if (parseException.getCause() instanceof IllegalArgumentException) {
|
||||
continue;
|
||||
} catch (RuntimeException e) {
|
||||
if (lenient) {
|
||||
query = newLenientFieldQuery(ft.fieldType.name(), e);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
throw parseException;
|
||||
}
|
||||
float boost = ft.boost;
|
||||
while (query instanceof BoostQuery) {
|
||||
|
|
|
@ -180,12 +180,19 @@ public final class ShardSearchStats implements SearchOperationListener {
|
|||
public void onFreeScrollContext(SearchContext context) {
|
||||
totalStats.scrollCurrent.dec();
|
||||
assert totalStats.scrollCurrent.count() >= 0;
|
||||
totalStats.scrollMetric.inc(System.nanoTime() - context.getOriginNanoTime());
|
||||
totalStats.scrollMetric.inc(TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - context.getOriginNanoTime()));
|
||||
}
|
||||
|
||||
static final class StatsHolder {
|
||||
public final MeanMetric queryMetric = new MeanMetric();
|
||||
public final MeanMetric fetchMetric = new MeanMetric();
|
||||
/* We store scroll statistics in microseconds because with nanoseconds we run the risk of overflowing the total stats if there are
|
||||
* many scrolls. For example, on a system with 2^24 scrolls that have been executed, each executing for 2^10 seconds, then using
|
||||
* nanoseconds would require a numeric representation that can represent at least 2^24 * 2^10 * 10^9 > 2^24 * 2^10 * 2^29 = 2^63
|
||||
* which exceeds the largest value that can be represented by a long. By using microseconds, we enable capturing one-thousand
|
||||
* times as many scrolls (i.e., billions of scrolls which at one per second would take 32 years to occur), or scrolls that execute
|
||||
* for one-thousand times as long (i.e., scrolls that execute for almost twelve days on average).
|
||||
*/
|
||||
public final MeanMetric scrollMetric = new MeanMetric();
|
||||
public final MeanMetric suggestMetric = new MeanMetric();
|
||||
public final CounterMetric queryCurrent = new CounterMetric();
|
||||
|
@ -197,7 +204,7 @@ public final class ShardSearchStats implements SearchOperationListener {
|
|||
return new SearchStats.Stats(
|
||||
queryMetric.count(), TimeUnit.NANOSECONDS.toMillis(queryMetric.sum()), queryCurrent.count(),
|
||||
fetchMetric.count(), TimeUnit.NANOSECONDS.toMillis(fetchMetric.sum()), fetchCurrent.count(),
|
||||
scrollMetric.count(), TimeUnit.NANOSECONDS.toMillis(scrollMetric.sum()), scrollCurrent.count(),
|
||||
scrollMetric.count(), TimeUnit.MICROSECONDS.toMillis(scrollMetric.sum()), scrollCurrent.count(),
|
||||
suggestMetric.count(), TimeUnit.NANOSECONDS.toMillis(suggestMetric.sum()), suggestCurrent.count()
|
||||
);
|
||||
}
|
||||
|
|
|
@ -19,11 +19,13 @@
|
|||
|
||||
package org.elasticsearch.index.shard;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.store.StoreStats;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -31,22 +33,25 @@ public class DocsStats implements Streamable, ToXContentFragment {
|
|||
|
||||
long count = 0;
|
||||
long deleted = 0;
|
||||
long totalSizeInBytes = 0;
|
||||
|
||||
public DocsStats() {
|
||||
|
||||
}
|
||||
|
||||
public DocsStats(long count, long deleted) {
|
||||
public DocsStats(long count, long deleted, long totalSizeInBytes) {
|
||||
this.count = count;
|
||||
this.deleted = deleted;
|
||||
this.totalSizeInBytes = totalSizeInBytes;
|
||||
}
|
||||
|
||||
public void add(DocsStats docsStats) {
|
||||
if (docsStats == null) {
|
||||
public void add(DocsStats other) {
|
||||
if (other == null) {
|
||||
return;
|
||||
}
|
||||
count += docsStats.count;
|
||||
deleted += docsStats.deleted;
|
||||
this.totalSizeInBytes += other.totalSizeInBytes;
|
||||
this.count += other.count;
|
||||
this.deleted += other.deleted;
|
||||
}
|
||||
|
||||
public long getCount() {
|
||||
|
@ -57,16 +62,40 @@ public class DocsStats implements Streamable, ToXContentFragment {
|
|||
return this.deleted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the total size in bytes of all documents in this stats.
|
||||
* This value may be more reliable than {@link StoreStats#getSizeInBytes()} in estimating the index size.
|
||||
*/
|
||||
public long getTotalSizeInBytes() {
|
||||
return totalSizeInBytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the average size in bytes of all documents in this stats.
|
||||
*/
|
||||
public long getAverageSizeInBytes() {
|
||||
long totalDocs = count + deleted;
|
||||
return totalDocs == 0 ? 0 : totalSizeInBytes / totalDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
count = in.readVLong();
|
||||
deleted = in.readVLong();
|
||||
if (in.getVersion().onOrAfter(Version.V_6_1_0)) {
|
||||
totalSizeInBytes = in.readVLong();
|
||||
} else {
|
||||
totalSizeInBytes = -1;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(count);
|
||||
out.writeVLong(deleted);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_1_0)) {
|
||||
out.writeVLong(totalSizeInBytes);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -879,9 +879,18 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
|
||||
public DocsStats docStats() {
|
||||
try (Engine.Searcher searcher = acquireSearcher("doc_stats")) {
|
||||
return new DocsStats(searcher.reader().numDocs(), searcher.reader().numDeletedDocs());
|
||||
long numDocs = 0;
|
||||
long numDeletedDocs = 0;
|
||||
long sizeInBytes = 0;
|
||||
List<Segment> segments = segments(false);
|
||||
for (Segment segment : segments) {
|
||||
if (segment.search) {
|
||||
numDocs += segment.getNumDocs();
|
||||
numDeletedDocs += segment.getDeletedDocs();
|
||||
sizeInBytes += segment.getSizeInBytes();
|
||||
}
|
||||
}
|
||||
return new DocsStats(numDocs, numDeletedDocs, sizeInBytes);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -31,7 +31,11 @@ import java.math.BigInteger;
|
|||
import java.nio.file.FileStore;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public final class ShardPath {
|
||||
public static final String INDEX_FOLDER_NAME = "index";
|
||||
|
@ -189,23 +193,49 @@ public final class ShardPath {
|
|||
|
||||
// TODO - do we need something more extensible? Yet, this does the job for now...
|
||||
final NodeEnvironment.NodePath[] paths = env.nodePaths();
|
||||
NodeEnvironment.NodePath bestPath = null;
|
||||
BigInteger maxUsableBytes = BigInteger.valueOf(Long.MIN_VALUE);
|
||||
for (NodeEnvironment.NodePath nodePath : paths) {
|
||||
FileStore fileStore = nodePath.fileStore;
|
||||
|
||||
BigInteger usableBytes = BigInteger.valueOf(fileStore.getUsableSpace());
|
||||
assert usableBytes.compareTo(BigInteger.ZERO) >= 0;
|
||||
// If no better path is chosen, use the one with the most space by default
|
||||
NodeEnvironment.NodePath bestPath = getPathWithMostFreeSpace(env);
|
||||
|
||||
// Deduct estimated reserved bytes from usable space:
|
||||
Integer count = dataPathToShardCount.get(nodePath.path);
|
||||
if (count != null) {
|
||||
usableBytes = usableBytes.subtract(estShardSizeInBytes.multiply(BigInteger.valueOf(count)));
|
||||
}
|
||||
if (bestPath == null || usableBytes.compareTo(maxUsableBytes) > 0) {
|
||||
maxUsableBytes = usableBytes;
|
||||
bestPath = nodePath;
|
||||
if (paths.length != 1) {
|
||||
int shardCount = indexSettings.getNumberOfShards();
|
||||
// Maximum number of shards that a path should have for a particular index assuming
|
||||
// all the shards were assigned to this node. For example, with a node with 4 data
|
||||
// paths and an index with 9 primary shards, the maximum number of shards per path
|
||||
// would be 3.
|
||||
int maxShardsPerPath = Math.floorDiv(shardCount, paths.length) + ((shardCount % paths.length) == 0 ? 0 : 1);
|
||||
|
||||
Map<NodeEnvironment.NodePath, Long> pathToShardCount = env.shardCountPerPath(shardId.getIndex());
|
||||
|
||||
// Compute how much space there is on each path
|
||||
final Map<NodeEnvironment.NodePath, BigInteger> pathsToSpace = new HashMap<>(paths.length);
|
||||
for (NodeEnvironment.NodePath nodePath : paths) {
|
||||
FileStore fileStore = nodePath.fileStore;
|
||||
BigInteger usableBytes = BigInteger.valueOf(fileStore.getUsableSpace());
|
||||
pathsToSpace.put(nodePath, usableBytes);
|
||||
}
|
||||
|
||||
bestPath = Arrays.stream(paths)
|
||||
// Filter out paths that have enough space
|
||||
.filter((path) -> pathsToSpace.get(path).subtract(estShardSizeInBytes).compareTo(BigInteger.ZERO) > 0)
|
||||
// Sort by the number of shards for this index
|
||||
.sorted((p1, p2) -> {
|
||||
int cmp = Long.compare(pathToShardCount.getOrDefault(p1, 0L), pathToShardCount.getOrDefault(p2, 0L));
|
||||
if (cmp == 0) {
|
||||
// if the number of shards is equal, tie-break with the number of total shards
|
||||
cmp = Integer.compare(dataPathToShardCount.getOrDefault(p1.path, 0),
|
||||
dataPathToShardCount.getOrDefault(p2.path, 0));
|
||||
if (cmp == 0) {
|
||||
// if the number of shards is equal, tie-break with the usable bytes
|
||||
cmp = pathsToSpace.get(p2).compareTo(pathsToSpace.get(p1));
|
||||
}
|
||||
}
|
||||
return cmp;
|
||||
})
|
||||
// Return the first result
|
||||
.findFirst()
|
||||
// Or the existing best path if there aren't any that fit the criteria
|
||||
.orElse(bestPath);
|
||||
}
|
||||
|
||||
statePath = bestPath.resolve(shardId);
|
||||
|
@ -214,6 +244,24 @@ public final class ShardPath {
|
|||
return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, shardId);
|
||||
}
|
||||
|
||||
static NodeEnvironment.NodePath getPathWithMostFreeSpace(NodeEnvironment env) throws IOException {
|
||||
final NodeEnvironment.NodePath[] paths = env.nodePaths();
|
||||
NodeEnvironment.NodePath bestPath = null;
|
||||
long maxUsableBytes = Long.MIN_VALUE;
|
||||
for (NodeEnvironment.NodePath nodePath : paths) {
|
||||
FileStore fileStore = nodePath.fileStore;
|
||||
long usableBytes = fileStore.getUsableSpace();
|
||||
assert usableBytes >= 0 : "usable bytes must be >= 0, got: " + usableBytes;
|
||||
|
||||
if (bestPath == null || usableBytes > maxUsableBytes) {
|
||||
// This path has been determined to be "better" based on the usable bytes
|
||||
maxUsableBytes = usableBytes;
|
||||
bestPath = nodePath;
|
||||
}
|
||||
}
|
||||
return bestPath;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.elasticsearch.index.shard.IndexShardState;
|
|||
import org.elasticsearch.index.shard.IndexingOperationListener;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Cancellable;
|
||||
import org.elasticsearch.threadpool.Scheduler.Cancellable;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Names;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
|
|
@ -53,7 +53,7 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache,
|
|||
public static final Setting<ByteSizeValue> INDICES_CACHE_QUERY_SIZE_SETTING =
|
||||
Setting.memorySizeSetting("indices.queries.cache.size", "10%", Property.NodeScope);
|
||||
public static final Setting<Integer> INDICES_CACHE_QUERY_COUNT_SETTING =
|
||||
Setting.intSetting("indices.queries.cache.count", 10000, 1, Property.NodeScope);
|
||||
Setting.intSetting("indices.queries.cache.count", 1000, 1, Property.NodeScope);
|
||||
// enables caching on all segments instead of only the larger ones, for testing only
|
||||
public static final Setting<Boolean> INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING =
|
||||
Setting.boolSetting("indices.queries.cache.all_segments", false, Property.NodeScope);
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.ingest;
|
|||
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterStateApplier;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -81,17 +82,21 @@ public class PipelineExecutionService implements ClusterStateApplier {
|
|||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
for (DocWriteRequest actionRequest : actionRequests) {
|
||||
if ((actionRequest instanceof IndexRequest)) {
|
||||
IndexRequest indexRequest = (IndexRequest) actionRequest;
|
||||
if (Strings.hasText(indexRequest.getPipeline())) {
|
||||
try {
|
||||
innerExecute(indexRequest, getPipeline(indexRequest.getPipeline()));
|
||||
//this shouldn't be needed here but we do it for consistency with index api
|
||||
// which requires it to prevent double execution
|
||||
indexRequest.setPipeline(null);
|
||||
} catch (Exception e) {
|
||||
itemFailureHandler.accept(indexRequest, e);
|
||||
}
|
||||
IndexRequest indexRequest = null;
|
||||
if (actionRequest instanceof IndexRequest) {
|
||||
indexRequest = (IndexRequest) actionRequest;
|
||||
} else if (actionRequest instanceof UpdateRequest) {
|
||||
UpdateRequest updateRequest = (UpdateRequest) actionRequest;
|
||||
indexRequest = updateRequest.docAsUpsert() ? updateRequest.doc() : updateRequest.upsertRequest();
|
||||
}
|
||||
if (indexRequest != null && Strings.hasText(indexRequest.getPipeline())) {
|
||||
try {
|
||||
innerExecute(indexRequest, getPipeline(indexRequest.getPipeline()));
|
||||
//this shouldn't be needed here but we do it for consistency with index api
|
||||
// which requires it to prevent double execution
|
||||
indexRequest.setPipeline(null);
|
||||
} catch (Exception e) {
|
||||
itemFailureHandler.accept(indexRequest, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.elasticsearch.common.unit.ByteSizeValue;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.monitor.jvm.JvmStats.GarbageCollector;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Cancellable;
|
||||
import org.elasticsearch.threadpool.Scheduler.Cancellable;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Names;
|
||||
|
||||
import java.util.HashMap;
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.node;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.ToXContent.Params;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Class representing statistics about adaptive replica selection. This includes
|
||||
* EWMA of queue size, service time, and response time, as well as outgoing
|
||||
* searches to each node and the "rank" based on the ARS formula.
|
||||
*/
|
||||
public class AdaptiveSelectionStats implements Writeable, ToXContentFragment {
|
||||
|
||||
private final Map<String, Long> clientOutgoingConnections;
|
||||
private final Map<String, ResponseCollectorService.ComputedNodeStats> nodeComputedStats;
|
||||
|
||||
public AdaptiveSelectionStats(Map<String, Long> clientConnections,
|
||||
Map<String, ResponseCollectorService.ComputedNodeStats> nodeComputedStats) {
|
||||
this.clientOutgoingConnections = clientConnections;
|
||||
this.nodeComputedStats = nodeComputedStats;
|
||||
}
|
||||
|
||||
public AdaptiveSelectionStats(StreamInput in) throws IOException {
|
||||
this.clientOutgoingConnections = in.readMap(StreamInput::readString, StreamInput::readLong);
|
||||
this.nodeComputedStats = in.readMap(StreamInput::readString, ResponseCollectorService.ComputedNodeStats::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeMap(this.clientOutgoingConnections, StreamOutput::writeString, StreamOutput::writeLong);
|
||||
out.writeMap(this.nodeComputedStats, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream));
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject("adaptive_selection");
|
||||
Set<String> allNodeIds = Sets.union(clientOutgoingConnections.keySet(), nodeComputedStats.keySet());
|
||||
for (String nodeId : allNodeIds) {
|
||||
builder.startObject(nodeId);
|
||||
ResponseCollectorService.ComputedNodeStats stats = nodeComputedStats.get(nodeId);
|
||||
if (stats != null) {
|
||||
long outgoingSearches = clientOutgoingConnections.getOrDefault(nodeId, 0L);
|
||||
builder.field("outgoing_searches", outgoingSearches);
|
||||
builder.field("avg_queue_size", stats.queueSize);
|
||||
builder.timeValueField("avg_service_time_ns", "avg_service_time", (long) stats.serviceTime, TimeUnit.NANOSECONDS);
|
||||
builder.timeValueField("avg_response_time_ns", "avg_response_time", (long) stats.responseTime, TimeUnit.NANOSECONDS);
|
||||
builder.field("rank", String.format(Locale.ROOT, "%.1f", stats.rank(outgoingSearches)));
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a map of node id to the outgoing search requests to that node
|
||||
*/
|
||||
public Map<String, Long> getOutgoingConnections() {
|
||||
return clientOutgoingConnections;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a map of node id to the computed stats
|
||||
*/
|
||||
public Map<String, ResponseCollectorService.ComputedNodeStats> getComputedStats() {
|
||||
return nodeComputedStats;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a map of node id to the ranking of the nodes based on the adaptive replica formula
|
||||
*/
|
||||
public Map<String, Double> getRanks() {
|
||||
return nodeComputedStats.entrySet().stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey,
|
||||
e -> e.getValue().rank(clientOutgoingConnections.getOrDefault(e.getKey(), 0L))));
|
||||
}
|
||||
}
|
|
@ -451,7 +451,8 @@ public class Node implements Closeable {
|
|||
clusterModule.getAllocationService());
|
||||
this.nodeService = new NodeService(settings, threadPool, monitorService, discoveryModule.getDiscovery(),
|
||||
transportService, indicesService, pluginsService, circuitBreakerService, scriptModule.getScriptService(),
|
||||
httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter());
|
||||
httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter(), responseCollectorService,
|
||||
searchTransportService);
|
||||
modules.add(b -> {
|
||||
b.bind(Node.class).toInstance(this);
|
||||
b.bind(NodeService.class).toInstance(nodeService);
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
|
||||
import org.elasticsearch.action.search.SearchTransportService;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
|
@ -36,6 +37,7 @@ import org.elasticsearch.indices.IndicesService;
|
|||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.ingest.IngestService;
|
||||
import org.elasticsearch.monitor.MonitorService;
|
||||
import org.elasticsearch.node.ResponseCollectorService;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -54,17 +56,19 @@ public class NodeService extends AbstractComponent implements Closeable {
|
|||
private final CircuitBreakerService circuitBreakerService;
|
||||
private final IngestService ingestService;
|
||||
private final SettingsFilter settingsFilter;
|
||||
private ScriptService scriptService;
|
||||
private final ScriptService scriptService;
|
||||
private final HttpServerTransport httpServerTransport;
|
||||
|
||||
private final ResponseCollectorService responseCollectorService;
|
||||
private final SearchTransportService searchTransportService;
|
||||
|
||||
private final Discovery discovery;
|
||||
|
||||
NodeService(Settings settings, ThreadPool threadPool, MonitorService monitorService, Discovery discovery,
|
||||
TransportService transportService, IndicesService indicesService, PluginsService pluginService,
|
||||
CircuitBreakerService circuitBreakerService, ScriptService scriptService,
|
||||
@Nullable HttpServerTransport httpServerTransport, IngestService ingestService, ClusterService clusterService,
|
||||
SettingsFilter settingsFilter) {
|
||||
TransportService transportService, IndicesService indicesService, PluginsService pluginService,
|
||||
CircuitBreakerService circuitBreakerService, ScriptService scriptService,
|
||||
@Nullable HttpServerTransport httpServerTransport, IngestService ingestService, ClusterService clusterService,
|
||||
SettingsFilter settingsFilter, ResponseCollectorService responseCollectorService,
|
||||
SearchTransportService searchTransportService) {
|
||||
super(settings);
|
||||
this.threadPool = threadPool;
|
||||
this.monitorService = monitorService;
|
||||
|
@ -77,6 +81,8 @@ public class NodeService extends AbstractComponent implements Closeable {
|
|||
this.ingestService = ingestService;
|
||||
this.settingsFilter = settingsFilter;
|
||||
this.scriptService = scriptService;
|
||||
this.responseCollectorService = responseCollectorService;
|
||||
this.searchTransportService = searchTransportService;
|
||||
clusterService.addStateApplier(ingestService.getPipelineStore());
|
||||
clusterService.addStateApplier(ingestService.getPipelineExecutionService());
|
||||
}
|
||||
|
@ -99,7 +105,7 @@ public class NodeService extends AbstractComponent implements Closeable {
|
|||
|
||||
public NodeStats stats(CommonStatsFlags indices, boolean os, boolean process, boolean jvm, boolean threadPool,
|
||||
boolean fs, boolean transport, boolean http, boolean circuitBreaker,
|
||||
boolean script, boolean discoveryStats, boolean ingest) {
|
||||
boolean script, boolean discoveryStats, boolean ingest, boolean adaptiveSelection) {
|
||||
// for indices stats we want to include previous allocated shards stats as well (it will
|
||||
// only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats)
|
||||
return new NodeStats(transportService.getLocalNode(), System.currentTimeMillis(),
|
||||
|
@ -114,7 +120,8 @@ public class NodeService extends AbstractComponent implements Closeable {
|
|||
circuitBreaker ? circuitBreakerService.stats() : null,
|
||||
script ? scriptService.stats() : null,
|
||||
discoveryStats ? discovery.stats() : null,
|
||||
ingest ? ingestService.getPipelineExecutionService().stats() : null
|
||||
ingest ? ingestService.getPipelineExecutionService().stats() : null,
|
||||
adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getClientConnections()) : null
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,9 +26,13 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.ExponentiallyWeightedMovingAverage;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
|
@ -91,6 +95,10 @@ public final class ResponseCollectorService extends AbstractComponent implements
|
|||
return nodeStats;
|
||||
}
|
||||
|
||||
public AdaptiveSelectionStats getAdaptiveStats(Map<String, Long> clientSearchConnections) {
|
||||
return new AdaptiveSelectionStats(clientSearchConnections, getAllNodeStatistics());
|
||||
}
|
||||
|
||||
/**
|
||||
* Optionally return a {@code NodeStatistics} for the given nodeid, if
|
||||
* response information exists for the given node. Returns an empty
|
||||
|
@ -106,7 +114,7 @@ public final class ResponseCollectorService extends AbstractComponent implements
|
|||
* node's statistics. This includes the EWMA of queue size, response time,
|
||||
* and service time.
|
||||
*/
|
||||
public static class ComputedNodeStats {
|
||||
public static class ComputedNodeStats implements Writeable {
|
||||
// We store timestamps with nanosecond precision, however, the
|
||||
// formula specifies milliseconds, therefore we need to convert
|
||||
// the values so the times don't unduely weight the formula
|
||||
|
@ -120,12 +128,34 @@ public final class ResponseCollectorService extends AbstractComponent implements
|
|||
public final double responseTime;
|
||||
public final double serviceTime;
|
||||
|
||||
ComputedNodeStats(int clientNum, NodeStatistics nodeStats) {
|
||||
public ComputedNodeStats(String nodeId, int clientNum, int queueSize, double responseTime, double serviceTime) {
|
||||
this.nodeId = nodeId;
|
||||
this.clientNum = clientNum;
|
||||
this.nodeId = nodeStats.nodeId;
|
||||
this.queueSize = (int) nodeStats.queueSize.getAverage();
|
||||
this.responseTime = nodeStats.responseTime.getAverage();
|
||||
this.serviceTime = nodeStats.serviceTime;
|
||||
this.queueSize = queueSize;
|
||||
this.responseTime = responseTime;
|
||||
this.serviceTime = serviceTime;
|
||||
}
|
||||
|
||||
ComputedNodeStats(int clientNum, NodeStatistics nodeStats) {
|
||||
this(nodeStats.nodeId, clientNum,
|
||||
(int) nodeStats.queueSize.getAverage(), nodeStats.responseTime.getAverage(), nodeStats.serviceTime);
|
||||
}
|
||||
|
||||
ComputedNodeStats(StreamInput in) throws IOException {
|
||||
this.nodeId = in.readString();
|
||||
this.clientNum = in.readInt();
|
||||
this.queueSize = in.readInt();
|
||||
this.responseTime = in.readDouble();
|
||||
this.serviceTime = in.readDouble();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(this.nodeId);
|
||||
out.writeInt(this.clientNum);
|
||||
out.writeInt(this.queueSize);
|
||||
out.writeDouble(this.responseTime);
|
||||
out.writeDouble(this.serviceTime);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -133,9 +163,9 @@ public final class ResponseCollectorService extends AbstractComponent implements
|
|||
* https://www.usenix.org/system/files/conference/nsdi15/nsdi15-paper-suresh.pdf
|
||||
*/
|
||||
private double innerRank(long outstandingRequests) {
|
||||
// this is a placeholder value, the concurrency compensation is
|
||||
// defined as the number of outstanding requests from the client
|
||||
// to the node times the number of clients in the system
|
||||
// the concurrency compensation is defined as the number of
|
||||
// outstanding requests from the client to the node times the number
|
||||
// of clients in the system
|
||||
double concurrencyCompensation = outstandingRequests * clientNum;
|
||||
|
||||
// Cubic queue adjustment factor. The paper chose 3 though we could
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.collect.Tuple;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
@ -326,6 +327,9 @@ public class PluginsService extends AbstractComponent {
|
|||
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(pluginsDirectory)) {
|
||||
for (Path plugin : stream) {
|
||||
if (FileSystemUtils.isDesktopServicesStore(plugin)) {
|
||||
continue;
|
||||
}
|
||||
logger.trace("--- adding plugin [{}]", plugin.toAbsolutePath());
|
||||
final PluginInfo info;
|
||||
try {
|
||||
|
|
|
@ -36,6 +36,7 @@ public class AcknowledgedRestListener<T extends AcknowledgedResponse> extends Re
|
|||
|
||||
@Override
|
||||
public RestResponse buildResponse(T response, XContentBuilder builder) throws Exception {
|
||||
// TODO - Once AcknowledgedResponse implements ToXContent, this method should be updated to call response.toXContent.
|
||||
builder.startObject()
|
||||
.field(Fields.ACKNOWLEDGED, response.isAcknowledged());
|
||||
addCustomFields(builder, response);
|
||||
|
|
|
@ -93,7 +93,7 @@ import org.elasticsearch.search.sort.SortBuilder;
|
|||
import org.elasticsearch.search.suggest.Suggest;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Cancellable;
|
||||
import org.elasticsearch.threadpool.Scheduler.Cancellable;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Names;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
|
||||
|
|
|
@ -301,8 +301,6 @@ public class FetchPhase implements SearchPhase {
|
|||
}
|
||||
context.lookup().source().setSource(nestedSourceAsMap);
|
||||
XContentType contentType = tuple.v1();
|
||||
BytesReference nestedSource = contentBuilder(contentType).map(nestedSourceAsMap).bytes();
|
||||
context.lookup().source().setSource(nestedSource);
|
||||
context.lookup().source().setSourceContentType(contentType);
|
||||
}
|
||||
return new SearchHit(nestedTopDocId, uid.id(), documentMapper.typeText(), nestedIdentity, searchFields);
|
||||
|
|
|
@ -20,13 +20,20 @@
|
|||
package org.elasticsearch.search.fetch.subphase;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.contentBuilder;
|
||||
|
||||
public final class FetchSourceSubPhase implements FetchSubPhase {
|
||||
|
||||
|
@ -35,22 +42,27 @@ public final class FetchSourceSubPhase implements FetchSubPhase {
|
|||
if (context.sourceRequested() == false) {
|
||||
return;
|
||||
}
|
||||
final boolean nestedHit = hitContext.hit().getNestedIdentity() != null;
|
||||
SourceLookup source = context.lookup().source();
|
||||
FetchSourceContext fetchSourceContext = context.fetchSourceContext();
|
||||
assert fetchSourceContext.fetchSource();
|
||||
if (fetchSourceContext.includes().length == 0 && fetchSourceContext.excludes().length == 0) {
|
||||
hitContext.hit().sourceRef(source.internalSourceRef());
|
||||
return;
|
||||
if (nestedHit == false) {
|
||||
if (fetchSourceContext.includes().length == 0 && fetchSourceContext.excludes().length == 0) {
|
||||
hitContext.hit().sourceRef(source.internalSourceRef());
|
||||
return;
|
||||
}
|
||||
if (source.internalSourceRef() == null) {
|
||||
throw new IllegalArgumentException("unable to fetch fields from _source field: _source is disabled in the mappings " +
|
||||
"for index [" + context.indexShard().shardId().getIndexName() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
if (source.internalSourceRef() == null) {
|
||||
throw new IllegalArgumentException("unable to fetch fields from _source field: _source is disabled in the mappings " +
|
||||
"for index [" + context.indexShard().shardId().getIndexName() + "]");
|
||||
Object value = source.filter(fetchSourceContext);
|
||||
if (nestedHit) {
|
||||
value = getNestedSource((Map<String, Object>) value, hitContext);
|
||||
}
|
||||
|
||||
final Object value = source.filter(fetchSourceContext);
|
||||
try {
|
||||
final int initialCapacity = Math.min(1024, source.internalSourceRef().length());
|
||||
final int initialCapacity = nestedHit ? 1024 : Math.min(1024, source.internalSourceRef().length());
|
||||
BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity);
|
||||
XContentBuilder builder = new XContentBuilder(source.sourceContentType().xContent(), streamOutput);
|
||||
builder.value(value);
|
||||
|
@ -58,6 +70,12 @@ public final class FetchSourceSubPhase implements FetchSubPhase {
|
|||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("Error filtering source", e);
|
||||
}
|
||||
}
|
||||
|
||||
private Map<String, Object> getNestedSource(Map<String, Object> sourceAsMap, HitContext hitContext) {
|
||||
for (SearchHit.NestedIdentity o = hitContext.hit().getNestedIdentity(); o != null; o = o.getChild()) {
|
||||
sourceAsMap = (Map<String, Object>) sourceAsMap.get(o.getField().string());
|
||||
}
|
||||
return sourceAsMap;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -283,9 +283,10 @@ abstract class TopDocsCollectorContext extends QueryCollectorContext {
|
|||
return new ScrollingTopDocsCollectorContext(searchContext.scrollContext(),
|
||||
searchContext.sort(), numDocs, searchContext.trackScores(), searchContext.numberOfShards());
|
||||
} else if (searchContext.collapse() != null) {
|
||||
boolean trackScores = searchContext.sort() == null ? true : searchContext.trackScores();
|
||||
int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs);
|
||||
return new CollapsingTopDocsCollectorContext(searchContext.collapse(),
|
||||
searchContext.sort(), numDocs, searchContext.trackScores());
|
||||
searchContext.sort(), numDocs, trackScores);
|
||||
} else {
|
||||
int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs);
|
||||
final boolean rescore = searchContext.rescore().isEmpty() == false;
|
||||
|
|
|
@ -57,7 +57,10 @@ public abstract class WordScorer {
|
|||
final long vocSize = terms.getSumTotalTermFreq();
|
||||
this.vocabluarySize = vocSize == -1 ? reader.maxDoc() : vocSize;
|
||||
this.useTotalTermFreq = vocSize != -1;
|
||||
this.numTerms = terms.size();
|
||||
long numTerms = terms.size();
|
||||
// -1 cannot be used as value, because scoreUnigram(...) can then divide by 0 if vocabluarySize is 1.
|
||||
// -1 is returned when terms is a MultiTerms instance.
|
||||
this.numTerms = vocabluarySize + numTerms > 1 ? numTerms : 0;
|
||||
this.termsEnum = new FreqTermsEnum(reader, field, !useTotalTermFreq, useTotalTermFreq, null, BigArrays.NON_RECYCLING_INSTANCE); // non recycling for now
|
||||
this.reader = reader;
|
||||
this.realWordLikelyhood = realWordLikelyHood;
|
||||
|
|
|
@ -0,0 +1,209 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.threadpool;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.EsAbortPolicy;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* Scheduler that allows to schedule one-shot and periodic commands.
|
||||
*/
|
||||
public interface Scheduler {
|
||||
|
||||
static ScheduledThreadPoolExecutor initScheduler(Settings settings) {
|
||||
ScheduledThreadPoolExecutor scheduler = new ScheduledThreadPoolExecutor(1,
|
||||
EsExecutors.daemonThreadFactory(settings, "scheduler"), new EsAbortPolicy());
|
||||
scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
|
||||
scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
|
||||
scheduler.setRemoveOnCancelPolicy(true);
|
||||
return scheduler;
|
||||
}
|
||||
|
||||
static boolean terminate(ScheduledThreadPoolExecutor scheduledThreadPoolExecutor, long timeout, TimeUnit timeUnit) {
|
||||
scheduledThreadPoolExecutor.shutdown();
|
||||
if (awaitTermination(scheduledThreadPoolExecutor, timeout, timeUnit)) {
|
||||
return true;
|
||||
}
|
||||
// last resort
|
||||
scheduledThreadPoolExecutor.shutdownNow();
|
||||
return awaitTermination(scheduledThreadPoolExecutor, timeout, timeUnit);
|
||||
}
|
||||
|
||||
static boolean awaitTermination(final ScheduledThreadPoolExecutor scheduledThreadPoolExecutor,
|
||||
final long timeout, final TimeUnit timeUnit) {
|
||||
try {
|
||||
if (scheduledThreadPoolExecutor.awaitTermination(timeout, timeUnit)) {
|
||||
return true;
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Does nothing by default but can be used by subclasses to save the current thread context and wraps the command in a Runnable
|
||||
* that restores that context before running the command.
|
||||
*/
|
||||
default Runnable preserveContext(Runnable command) {
|
||||
return command;
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedules a one-shot command to be run after a given delay. The command is not run in the context of the calling thread.
|
||||
* To preserve the context of the calling thread you may call {@link #preserveContext(Runnable)} on the runnable before passing
|
||||
* it to this method.
|
||||
* The command runs on scheduler thread. Do not run blocking calls on the scheduler thread. Subclasses may allow
|
||||
* to execute on a different executor, in which case blocking calls are allowed.
|
||||
*
|
||||
* @param delay delay before the task executes
|
||||
* @param executor the name of the executor that has to execute this task. Ignored in the default implementation but can be used
|
||||
* by subclasses that support multiple executors.
|
||||
* @param command the command to run
|
||||
* @return a ScheduledFuture who's get will return when the task has been added to its target thread pool and throws an exception if
|
||||
* the task is canceled before it was added to its target thread pool. Once the task has been added to its target thread pool
|
||||
* the ScheduledFuture cannot interact with it.
|
||||
* @throws EsRejectedExecutionException if the task cannot be scheduled for execution
|
||||
*/
|
||||
ScheduledFuture<?> schedule(TimeValue delay, String executor, Runnable command);
|
||||
|
||||
/**
|
||||
* Schedules a periodic action that runs on scheduler thread. Do not run blocking calls on the scheduler thread. Subclasses may allow
|
||||
* to execute on a different executor, in which case blocking calls are allowed.
|
||||
*
|
||||
* @param command the action to take
|
||||
* @param interval the delay interval
|
||||
* @param executor the name of the executor that has to execute this task. Ignored in the default implementation but can be used
|
||||
* by subclasses that support multiple executors.
|
||||
* @return a {@link Cancellable} that can be used to cancel the subsequent runs of the command. If the command is running, it will
|
||||
* not be interrupted.
|
||||
*/
|
||||
default Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, String executor) {
|
||||
return new ReschedulingRunnable(command, interval, executor, this, (e) -> {}, (e) -> {});
|
||||
}
|
||||
|
||||
/**
|
||||
* This interface represents an object whose execution may be cancelled during runtime.
|
||||
*/
|
||||
interface Cancellable {
|
||||
|
||||
/**
|
||||
* Cancel the execution of this object. This method is idempotent.
|
||||
*/
|
||||
void cancel();
|
||||
|
||||
/**
|
||||
* Check if the execution has been cancelled
|
||||
* @return true if cancelled
|
||||
*/
|
||||
boolean isCancelled();
|
||||
}
|
||||
|
||||
/**
|
||||
* This class encapsulates the scheduling of a {@link Runnable} that needs to be repeated on a interval. For example, checking a value
|
||||
* for cleanup every second could be done by passing in a Runnable that can perform the check and the specified interval between
|
||||
* executions of this runnable. <em>NOTE:</em> the runnable is only rescheduled to run again after completion of the runnable.
|
||||
*
|
||||
* For this class, <i>completion</i> means that the call to {@link Runnable#run()} returned or an exception was thrown and caught. In
|
||||
* case of an exception, this class will log the exception and reschedule the runnable for its next execution. This differs from the
|
||||
* {@link ScheduledThreadPoolExecutor#scheduleWithFixedDelay(Runnable, long, long, TimeUnit)} semantics as an exception there would
|
||||
* terminate the rescheduling of the runnable.
|
||||
*/
|
||||
final class ReschedulingRunnable extends AbstractRunnable implements Cancellable {
|
||||
|
||||
private final Runnable runnable;
|
||||
private final TimeValue interval;
|
||||
private final String executor;
|
||||
private final Scheduler scheduler;
|
||||
private final Consumer<Exception> rejectionConsumer;
|
||||
private final Consumer<Exception> failureConsumer;
|
||||
|
||||
private volatile boolean run = true;
|
||||
|
||||
/**
|
||||
* Creates a new rescheduling runnable and schedules the first execution to occur after the interval specified
|
||||
*
|
||||
* @param runnable the {@link Runnable} that should be executed periodically
|
||||
* @param interval the time interval between executions
|
||||
* @param executor the executor where this runnable should be scheduled to run
|
||||
* @param scheduler the {@link Scheduler} instance to use for scheduling
|
||||
*/
|
||||
ReschedulingRunnable(Runnable runnable, TimeValue interval, String executor, Scheduler scheduler,
|
||||
Consumer<Exception> rejectionConsumer, Consumer<Exception> failureConsumer) {
|
||||
this.runnable = runnable;
|
||||
this.interval = interval;
|
||||
this.executor = executor;
|
||||
this.scheduler = scheduler;
|
||||
this.rejectionConsumer = rejectionConsumer;
|
||||
this.failureConsumer = failureConsumer;
|
||||
scheduler.schedule(interval, executor, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cancel() {
|
||||
run = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCancelled() {
|
||||
return run == false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doRun() {
|
||||
// always check run here since this may have been cancelled since the last execution and we do not want to run
|
||||
if (run) {
|
||||
runnable.run();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
failureConsumer.accept(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRejection(Exception e) {
|
||||
run = false;
|
||||
rejectionConsumer.accept(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAfter() {
|
||||
// if this has not been cancelled reschedule it to run again
|
||||
if (run) {
|
||||
try {
|
||||
scheduler.schedule(interval, executor, this);
|
||||
} catch (final EsRejectedExecutionException e) {
|
||||
onRejection(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -33,10 +33,7 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.SizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.EsAbortPolicy;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.common.util.concurrent.XRejectedExecutionHandler;
|
||||
|
@ -64,7 +61,7 @@ import java.util.concurrent.TimeUnit;
|
|||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
public class ThreadPool extends AbstractComponent implements Closeable {
|
||||
public class ThreadPool extends AbstractComponent implements Scheduler, Closeable {
|
||||
|
||||
public static class Names {
|
||||
public static final String SAME = "same";
|
||||
|
@ -143,8 +140,6 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
|
||||
private Map<String, ExecutorHolder> executors = new HashMap<>();
|
||||
|
||||
private final ScheduledThreadPoolExecutor scheduler;
|
||||
|
||||
private final CachedTimeThread cachedTimeThread;
|
||||
|
||||
static final ExecutorService DIRECT_EXECUTOR = EsExecutors.newDirectExecutorService();
|
||||
|
@ -153,6 +148,8 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
|
||||
private final Map<String, ExecutorBuilder> builders;
|
||||
|
||||
private final ScheduledThreadPoolExecutor scheduler;
|
||||
|
||||
public Collection<ExecutorBuilder> builders() {
|
||||
return Collections.unmodifiableCollection(builders.values());
|
||||
}
|
||||
|
@ -210,12 +207,7 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
|
||||
executors.put(Names.SAME, new ExecutorHolder(DIRECT_EXECUTOR, new Info(Names.SAME, ThreadPoolType.DIRECT)));
|
||||
this.executors = unmodifiableMap(executors);
|
||||
|
||||
this.scheduler = new ScheduledThreadPoolExecutor(1, EsExecutors.daemonThreadFactory(settings, "scheduler"), new EsAbortPolicy());
|
||||
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
|
||||
this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
|
||||
this.scheduler.setRemoveOnCancelPolicy(true);
|
||||
|
||||
this.scheduler = Scheduler.initScheduler(settings);
|
||||
TimeValue estimatedTimeInterval = ESTIMATED_TIME_INTERVAL_SETTING.get(settings);
|
||||
this.cachedTimeThread = new CachedTimeThread(EsExecutors.threadName(settings, "[timer]"), estimatedTimeInterval.millis());
|
||||
this.cachedTimeThread.start();
|
||||
|
@ -329,25 +321,6 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
return holder.executor();
|
||||
}
|
||||
|
||||
public ScheduledExecutorService scheduler() {
|
||||
return this.scheduler;
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedules a periodic action that runs on the specified thread pool.
|
||||
*
|
||||
* @param command the action to take
|
||||
* @param interval the delay interval
|
||||
* @param executor The name of the thread pool on which to execute this task. {@link Names#SAME} means "execute on the scheduler thread",
|
||||
* which there is only one of. Executing blocking or long running code on the {@link Names#SAME} thread pool should never
|
||||
* be done as it can cause issues with the cluster
|
||||
* @return a {@link Cancellable} that can be used to cancel the subsequent runs of the command. If the command is running, it will
|
||||
* not be interrupted.
|
||||
*/
|
||||
public Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, String executor) {
|
||||
return new ReschedulingRunnable(command, interval, executor, this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedules a one-shot command to run after a given delay. The command is not run in the context of the calling thread. To preserve the
|
||||
* context of the calling thread you may call <code>threadPool.getThreadContext().preserveContext</code> on the runnable before passing
|
||||
|
@ -361,13 +334,30 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
* @return a ScheduledFuture who's get will return when the task is has been added to its target thread pool and throw an exception if
|
||||
* the task is canceled before it was added to its target thread pool. Once the task has been added to its target thread pool
|
||||
* the ScheduledFuture will cannot interact with it.
|
||||
* @throws EsRejectedExecutionException if the task cannot be scheduled for execution
|
||||
* @throws org.elasticsearch.common.util.concurrent.EsRejectedExecutionException if the task cannot be scheduled for execution
|
||||
*/
|
||||
public ScheduledFuture<?> schedule(TimeValue delay, String executor, Runnable command) {
|
||||
if (!Names.SAME.equals(executor)) {
|
||||
command = new ThreadedRunnable(command, executor(executor));
|
||||
}
|
||||
return scheduler.schedule(new LoggingRunnable(command), delay.millis(), TimeUnit.MILLISECONDS);
|
||||
return scheduler.schedule(new ThreadPool.LoggingRunnable(command), delay.millis(), TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, String executor) {
|
||||
return new ReschedulingRunnable(command, interval, executor, this,
|
||||
(e) -> {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]",
|
||||
command, executor), e);
|
||||
}
|
||||
},
|
||||
(e) -> logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]",
|
||||
command, executor), e));
|
||||
}
|
||||
|
||||
public Runnable preserveContext(Runnable command) {
|
||||
return getThreadContext().preserveContext(command);
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
|
@ -376,7 +366,7 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
scheduler.shutdown();
|
||||
for (ExecutorHolder executor : executors.values()) {
|
||||
if (executor.executor() instanceof ThreadPoolExecutor) {
|
||||
((ThreadPoolExecutor) executor.executor()).shutdown();
|
||||
executor.executor().shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -387,7 +377,7 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
scheduler.shutdownNow();
|
||||
for (ExecutorHolder executor : executors.values()) {
|
||||
if (executor.executor() instanceof ThreadPoolExecutor) {
|
||||
((ThreadPoolExecutor) executor.executor()).shutdownNow();
|
||||
executor.executor().shutdownNow();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -396,14 +386,17 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
boolean result = scheduler.awaitTermination(timeout, unit);
|
||||
for (ExecutorHolder executor : executors.values()) {
|
||||
if (executor.executor() instanceof ThreadPoolExecutor) {
|
||||
result &= ((ThreadPoolExecutor) executor.executor()).awaitTermination(timeout, unit);
|
||||
result &= executor.executor().awaitTermination(timeout, unit);
|
||||
}
|
||||
}
|
||||
|
||||
cachedTimeThread.join(unit.toMillis(timeout));
|
||||
return result;
|
||||
}
|
||||
|
||||
public ScheduledExecutorService scheduler() {
|
||||
return this.scheduler;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constrains a value between minimum and maximum values
|
||||
* (inclusive).
|
||||
|
@ -726,7 +719,9 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
if (pool != null) {
|
||||
try {
|
||||
pool.shutdown();
|
||||
if (awaitTermination(pool, timeout, timeUnit)) return true;
|
||||
if (awaitTermination(pool, timeout, timeUnit)) {
|
||||
return true;
|
||||
}
|
||||
// last resort
|
||||
pool.shutdownNow();
|
||||
return awaitTermination(pool, timeout, timeUnit);
|
||||
|
@ -738,11 +733,11 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
}
|
||||
|
||||
private static boolean awaitTermination(
|
||||
final ThreadPool pool,
|
||||
final ThreadPool threadPool,
|
||||
final long timeout,
|
||||
final TimeUnit timeUnit) {
|
||||
try {
|
||||
if (pool.awaitTermination(timeout, timeUnit)) {
|
||||
if (threadPool.awaitTermination(timeout, timeUnit)) {
|
||||
return true;
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
|
@ -760,102 +755,6 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
return threadContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* This interface represents an object whose execution may be cancelled during runtime.
|
||||
*/
|
||||
public interface Cancellable {
|
||||
|
||||
/**
|
||||
* Cancel the execution of this object. This method is idempotent.
|
||||
*/
|
||||
void cancel();
|
||||
|
||||
/**
|
||||
* Check if the execution has been cancelled
|
||||
* @return true if cancelled
|
||||
*/
|
||||
boolean isCancelled();
|
||||
}
|
||||
|
||||
/**
|
||||
* This class encapsulates the scheduling of a {@link Runnable} that needs to be repeated on a interval. For example, checking a value
|
||||
* for cleanup every second could be done by passing in a Runnable that can perform the check and the specified interval between
|
||||
* executions of this runnable. <em>NOTE:</em> the runnable is only rescheduled to run again after completion of the runnable.
|
||||
*
|
||||
* For this class, <i>completion</i> means that the call to {@link Runnable#run()} returned or an exception was thrown and caught. In
|
||||
* case of an exception, this class will log the exception and reschedule the runnable for its next execution. This differs from the
|
||||
* {@link ScheduledThreadPoolExecutor#scheduleWithFixedDelay(Runnable, long, long, TimeUnit)} semantics as an exception there would
|
||||
* terminate the rescheduling of the runnable.
|
||||
*/
|
||||
static final class ReschedulingRunnable extends AbstractRunnable implements Cancellable {
|
||||
|
||||
private final Runnable runnable;
|
||||
private final TimeValue interval;
|
||||
private final String executor;
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private volatile boolean run = true;
|
||||
|
||||
/**
|
||||
* Creates a new rescheduling runnable and schedules the first execution to occur after the interval specified
|
||||
*
|
||||
* @param runnable the {@link Runnable} that should be executed periodically
|
||||
* @param interval the time interval between executions
|
||||
* @param executor the executor where this runnable should be scheduled to run
|
||||
* @param threadPool the {@link ThreadPool} instance to use for scheduling
|
||||
*/
|
||||
ReschedulingRunnable(Runnable runnable, TimeValue interval, String executor, ThreadPool threadPool) {
|
||||
this.runnable = runnable;
|
||||
this.interval = interval;
|
||||
this.executor = executor;
|
||||
this.threadPool = threadPool;
|
||||
threadPool.schedule(interval, executor, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cancel() {
|
||||
run = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCancelled() {
|
||||
return run == false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doRun() {
|
||||
// always check run here since this may have been cancelled since the last execution and we do not want to run
|
||||
if (run) {
|
||||
runnable.run();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
threadPool.logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]", runnable.toString(), executor), e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRejection(Exception e) {
|
||||
run = false;
|
||||
if (threadPool.logger.isDebugEnabled()) {
|
||||
threadPool.logger.debug((Supplier<?>) () -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]", runnable, executor), e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAfter() {
|
||||
// if this has not been cancelled reschedule it to run again
|
||||
if (run) {
|
||||
try {
|
||||
threadPool.schedule(interval, executor, this);
|
||||
} catch (final EsRejectedExecutionException e) {
|
||||
onRejection(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean assertNotScheduleThread(String reason) {
|
||||
assert Thread.currentThread().getName().contains("scheduler") == false :
|
||||
"Expected current thread [" + Thread.currentThread() + "] to not be the scheduler thread. Reason: [" + reason + "]";
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.elasticsearch.common.settings.Setting.Property;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Cancellable;
|
||||
import org.elasticsearch.threadpool.Scheduler.Cancellable;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Names;
|
||||
|
||||
import java.io.IOException;
|
||||
|
|
|
@ -54,6 +54,8 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.hamcrest.core.IsEqual.equalTo;
|
||||
|
||||
public class CollapsingTopDocsCollectorTests extends ESTestCase {
|
||||
private static class SegmentSearcher extends IndexSearcher {
|
||||
private final List<LeafReaderContext> ctx;
|
||||
|
@ -82,12 +84,15 @@ public class CollapsingTopDocsCollectorTests extends ESTestCase {
|
|||
}
|
||||
|
||||
<T extends Comparable> void assertSearchCollapse(CollapsingDocValuesProducer<T> dvProducers, boolean numeric) throws IOException {
|
||||
assertSearchCollapse(dvProducers, numeric, true);
|
||||
assertSearchCollapse(dvProducers, numeric, false);
|
||||
assertSearchCollapse(dvProducers, numeric, true, true);
|
||||
assertSearchCollapse(dvProducers, numeric, true, false);
|
||||
assertSearchCollapse(dvProducers, numeric, false, true);
|
||||
assertSearchCollapse(dvProducers, numeric, false, false);
|
||||
}
|
||||
|
||||
private <T extends Comparable> void assertSearchCollapse(CollapsingDocValuesProducer<T> dvProducers,
|
||||
boolean numeric, boolean multivalued) throws IOException {
|
||||
boolean numeric, boolean multivalued,
|
||||
boolean trackMaxScores) throws IOException {
|
||||
final int numDocs = randomIntBetween(1000, 2000);
|
||||
int maxGroup = randomIntBetween(2, 500);
|
||||
final Directory dir = newDirectory();
|
||||
|
@ -118,14 +123,14 @@ public class CollapsingTopDocsCollectorTests extends ESTestCase {
|
|||
final CollapsingTopDocsCollector collapsingCollector;
|
||||
if (numeric) {
|
||||
collapsingCollector =
|
||||
CollapsingTopDocsCollector.createNumeric(collapseField.getField(), sort, expectedNumGroups, false);
|
||||
CollapsingTopDocsCollector.createNumeric(collapseField.getField(), sort, expectedNumGroups, trackMaxScores);
|
||||
} else {
|
||||
collapsingCollector =
|
||||
CollapsingTopDocsCollector.createKeyword(collapseField.getField(), sort, expectedNumGroups, false);
|
||||
CollapsingTopDocsCollector.createKeyword(collapseField.getField(), sort, expectedNumGroups, trackMaxScores);
|
||||
}
|
||||
|
||||
TopFieldCollector topFieldCollector =
|
||||
TopFieldCollector.create(sort, totalHits, true, false, false);
|
||||
TopFieldCollector.create(sort, totalHits, true, trackMaxScores, trackMaxScores);
|
||||
|
||||
searcher.search(new MatchAllDocsQuery(), collapsingCollector);
|
||||
searcher.search(new MatchAllDocsQuery(), topFieldCollector);
|
||||
|
@ -136,6 +141,11 @@ public class CollapsingTopDocsCollectorTests extends ESTestCase {
|
|||
assertEquals(totalHits, collapseTopFieldDocs.totalHits);
|
||||
assertEquals(totalHits, topDocs.scoreDocs.length);
|
||||
assertEquals(totalHits, topDocs.totalHits);
|
||||
if (trackMaxScores) {
|
||||
assertThat(collapseTopFieldDocs.getMaxScore(), equalTo(topDocs.getMaxScore()));
|
||||
} else {
|
||||
assertThat(collapseTopFieldDocs.getMaxScore(), equalTo(Float.NaN));
|
||||
}
|
||||
|
||||
Set<Object> seen = new HashSet<>();
|
||||
// collapse field is the last sort
|
||||
|
@ -186,14 +196,14 @@ public class CollapsingTopDocsCollectorTests extends ESTestCase {
|
|||
}
|
||||
|
||||
final CollapseTopFieldDocs[] shardHits = new CollapseTopFieldDocs[subSearchers.length];
|
||||
final Weight weight = searcher.createNormalizedWeight(new MatchAllDocsQuery(), false);
|
||||
final Weight weight = searcher.createNormalizedWeight(new MatchAllDocsQuery(), true);
|
||||
for (int shardIDX = 0; shardIDX < subSearchers.length; shardIDX++) {
|
||||
final SegmentSearcher subSearcher = subSearchers[shardIDX];
|
||||
final CollapsingTopDocsCollector c;
|
||||
if (numeric) {
|
||||
c = CollapsingTopDocsCollector.createNumeric(collapseField.getField(), sort, expectedNumGroups, false);
|
||||
c = CollapsingTopDocsCollector.createNumeric(collapseField.getField(), sort, expectedNumGroups, trackMaxScores);
|
||||
} else {
|
||||
c = CollapsingTopDocsCollector.createKeyword(collapseField.getField(), sort, expectedNumGroups, false);
|
||||
c = CollapsingTopDocsCollector.createKeyword(collapseField.getField(), sort, expectedNumGroups, trackMaxScores);
|
||||
}
|
||||
subSearcher.search(weight, c);
|
||||
shardHits[shardIDX] = c.getTopDocs();
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.discovery.DiscoveryStats;
|
||||
import org.elasticsearch.discovery.zen.PendingClusterStateStats;
|
||||
import org.elasticsearch.discovery.zen.PublishClusterStateStats;
|
||||
import org.elasticsearch.http.HttpStats;
|
||||
import org.elasticsearch.indices.breaker.AllCircuitBreakerStats;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerStats;
|
||||
|
@ -32,6 +33,8 @@ import org.elasticsearch.monitor.fs.FsInfo;
|
|||
import org.elasticsearch.monitor.jvm.JvmStats;
|
||||
import org.elasticsearch.monitor.os.OsStats;
|
||||
import org.elasticsearch.monitor.process.ProcessStats;
|
||||
import org.elasticsearch.node.AdaptiveSelectionStats;
|
||||
import org.elasticsearch.node.ResponseCollectorService;
|
||||
import org.elasticsearch.script.ScriptStats;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
@ -46,6 +49,7 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomLongBetween;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
|
||||
|
@ -278,6 +282,22 @@ public class NodeStatsTests extends ESTestCase {
|
|||
assertEquals(stats.getIngestCount(), deserializedStats.getIngestCount());
|
||||
}
|
||||
}
|
||||
AdaptiveSelectionStats adaptiveStats = nodeStats.getAdaptiveSelectionStats();
|
||||
AdaptiveSelectionStats deserializedAdaptiveStats = deserializedNodeStats.getAdaptiveSelectionStats();
|
||||
if (adaptiveStats == null) {
|
||||
assertNull(deserializedAdaptiveStats);
|
||||
} else {
|
||||
assertEquals(adaptiveStats.getOutgoingConnections(), deserializedAdaptiveStats.getOutgoingConnections());
|
||||
assertEquals(adaptiveStats.getRanks(), deserializedAdaptiveStats.getRanks());
|
||||
adaptiveStats.getComputedStats().forEach((k, v) -> {
|
||||
ResponseCollectorService.ComputedNodeStats aStats = adaptiveStats.getComputedStats().get(k);
|
||||
ResponseCollectorService.ComputedNodeStats bStats = deserializedAdaptiveStats.getComputedStats().get(k);
|
||||
assertEquals(aStats.nodeId, bStats.nodeId);
|
||||
assertEquals(aStats.queueSize, bStats.queueSize, 0.01);
|
||||
assertEquals(aStats.serviceTime, bStats.serviceTime, 0.01);
|
||||
assertEquals(aStats.responseTime, bStats.responseTime, 0.01);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -392,8 +412,18 @@ public class NodeStatsTests extends ESTestCase {
|
|||
}
|
||||
ScriptStats scriptStats = frequently() ?
|
||||
new ScriptStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) : null;
|
||||
DiscoveryStats discoveryStats = frequently() ? new DiscoveryStats(randomBoolean() ? new PendingClusterStateStats(randomInt(),
|
||||
randomInt(), randomInt()) : null) : null;
|
||||
DiscoveryStats discoveryStats = frequently()
|
||||
? new DiscoveryStats(
|
||||
randomBoolean()
|
||||
? new PendingClusterStateStats(randomInt(), randomInt(), randomInt())
|
||||
: null,
|
||||
randomBoolean()
|
||||
? new PublishClusterStateStats(
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong())
|
||||
: null)
|
||||
: null;
|
||||
IngestStats ingestStats = null;
|
||||
if (frequently()) {
|
||||
IngestStats.Stats totalStats = new IngestStats.Stats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(),
|
||||
|
@ -407,8 +437,31 @@ public class NodeStatsTests extends ESTestCase {
|
|||
}
|
||||
ingestStats = new IngestStats(totalStats, statsPerPipeline);
|
||||
}
|
||||
AdaptiveSelectionStats adaptiveSelectionStats = null;
|
||||
if (frequently()) {
|
||||
int numNodes = randomIntBetween(0,10);
|
||||
Map<String, Long> nodeConnections = new HashMap<>();
|
||||
Map<String, ResponseCollectorService.ComputedNodeStats> nodeStats = new HashMap<>();
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
String nodeId = randomAlphaOfLengthBetween(3, 10);
|
||||
// add outgoing connection info
|
||||
if (frequently()) {
|
||||
nodeConnections.put(nodeId, randomLongBetween(0, 100));
|
||||
}
|
||||
// add node calculations
|
||||
if (frequently()) {
|
||||
ResponseCollectorService.ComputedNodeStats stats = new ResponseCollectorService.ComputedNodeStats(nodeId,
|
||||
randomIntBetween(1,10), randomIntBetween(0, 2000),
|
||||
randomDoubleBetween(1.0, 10000000.0, true),
|
||||
randomDoubleBetween(1.0, 10000000.0, true));
|
||||
nodeStats.put(nodeId, stats);
|
||||
}
|
||||
}
|
||||
adaptiveSelectionStats = new AdaptiveSelectionStats(nodeConnections, nodeStats);
|
||||
}
|
||||
//TODO NodeIndicesStats are not tested here, way too complicated to create, also they need to be migrated to Writeable yet
|
||||
return new NodeStats(node, randomNonNegativeLong(), null, osStats, processStats, jvmStats, threadPoolStats, fsInfo,
|
||||
transportStats, httpStats, allCircuitBreakerStats, scriptStats, discoveryStats, ingestStats);
|
||||
return new NodeStats(node, randomNonNegativeLong(), null, osStats, processStats, jvmStats, threadPoolStats,
|
||||
fsInfo, transportStats, httpStats, allCircuitBreakerStats, scriptStats, discoveryStats,
|
||||
ingestStats, adaptiveSelectionStats);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,10 +23,15 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class SettingsUpdaterTests extends ESTestCase {
|
||||
|
||||
|
@ -132,4 +137,30 @@ public class SettingsUpdaterTests extends ESTestCase {
|
|||
assertEquals(clusterState.blocks().global().size(), 0);
|
||||
|
||||
}
|
||||
|
||||
public void testDeprecationLogging() {
|
||||
Setting<String> deprecatedSetting =
|
||||
Setting.simpleString("deprecated.setting", Property.Dynamic, Property.NodeScope, Property.Deprecated);
|
||||
final Settings settings = Settings.builder().put("deprecated.setting", "foo").build();
|
||||
final Set<Setting<?>> settingsSet =
|
||||
Stream.concat(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), Stream.of(deprecatedSetting)).collect(Collectors.toSet());
|
||||
final ClusterSettings clusterSettings = new ClusterSettings(settings, settingsSet);
|
||||
clusterSettings.addSettingsUpdateConsumer(deprecatedSetting, s -> {});
|
||||
final SettingsUpdater settingsUpdater = new SettingsUpdater(clusterSettings);
|
||||
final ClusterState clusterState =
|
||||
ClusterState.builder(new ClusterName("foo")).metaData(MetaData.builder().persistentSettings(settings).build()).build();
|
||||
|
||||
final Settings toApplyDebug = Settings.builder().put("logger.org.elasticsearch", "debug").build();
|
||||
final ClusterState afterDebug = settingsUpdater.updateSettings(clusterState, toApplyDebug, Settings.EMPTY);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] { deprecatedSetting });
|
||||
|
||||
final Settings toApplyUnset = Settings.builder().putNull("logger.org.elasticsearch").build();
|
||||
final ClusterState afterUnset = settingsUpdater.updateSettings(afterDebug, toApplyUnset, Settings.EMPTY);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] { deprecatedSetting });
|
||||
|
||||
// we also check that if no settings are changed, deprecation logging still occurs
|
||||
settingsUpdater.updateSettings(afterUnset, toApplyUnset, Settings.EMPTY);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] { deprecatedSetting });
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,12 +20,19 @@
|
|||
package org.elasticsearch.action.admin.indices.create;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.test.XContentTestUtils.insertRandomFields;
|
||||
|
||||
public class CreateIndexResponseTests extends ESTestCase {
|
||||
|
||||
public void testSerialization() throws IOException {
|
||||
|
@ -62,4 +69,59 @@ public class CreateIndexResponseTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testToXContent() {
|
||||
CreateIndexResponse response = new CreateIndexResponse(true, false, "index_name");
|
||||
String output = Strings.toString(response);
|
||||
assertEquals("{\"acknowledged\":true,\"shards_acknowledged\":false,\"index\":\"index_name\"}", output);
|
||||
}
|
||||
|
||||
public void testToAndFromXContent() throws IOException {
|
||||
doFromXContentTestWithRandomFields(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* This test adds random fields and objects to the xContent rendered out to
|
||||
* ensure we can parse it back to be forward compatible with additions to
|
||||
* the xContent
|
||||
*/
|
||||
public void testFromXContentWithRandomFields() throws IOException {
|
||||
doFromXContentTestWithRandomFields(true);
|
||||
}
|
||||
|
||||
private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException {
|
||||
|
||||
final CreateIndexResponse createIndexResponse = createTestItem();
|
||||
|
||||
boolean humanReadable = randomBoolean();
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
BytesReference originalBytes = toShuffledXContent(createIndexResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable);
|
||||
|
||||
BytesReference mutated;
|
||||
if (addRandomFields) {
|
||||
mutated = insertRandomFields(xContentType, originalBytes, null, random());
|
||||
} else {
|
||||
mutated = originalBytes;
|
||||
}
|
||||
CreateIndexResponse parsedCreateIndexResponse;
|
||||
try (XContentParser parser = createParser(xContentType.xContent(), mutated)) {
|
||||
parsedCreateIndexResponse = CreateIndexResponse.fromXContent(parser);
|
||||
assertNull(parser.nextToken());
|
||||
}
|
||||
|
||||
assertEquals(createIndexResponse.index(), parsedCreateIndexResponse.index());
|
||||
assertEquals(createIndexResponse.isShardsAcked(), parsedCreateIndexResponse.isShardsAcked());
|
||||
assertEquals(createIndexResponse.isAcknowledged(), parsedCreateIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a random {@link CreateIndexResponse}.
|
||||
*/
|
||||
private static CreateIndexResponse createTestItem() throws IOException {
|
||||
boolean acknowledged = randomBoolean();
|
||||
boolean shardsAcked = acknowledged && randomBoolean();
|
||||
String index = randomAlphaOfLength(5);
|
||||
|
||||
return new CreateIndexResponse(acknowledged, shardsAcked, index);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.delete;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.test.XContentTestUtils.insertRandomFields;
|
||||
|
||||
public class DeleteIndexResponseTests extends ESTestCase {
|
||||
|
||||
public void testToXContent() {
|
||||
DeleteIndexResponse response = new DeleteIndexResponse(true);
|
||||
String output = Strings.toString(response);
|
||||
assertEquals("{\"acknowledged\":true}", output);
|
||||
}
|
||||
|
||||
public void testToAndFromXContent() throws IOException {
|
||||
doFromXContentTestWithRandomFields(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* This test adds random fields and objects to the xContent rendered out to
|
||||
* ensure we can parse it back to be forward compatible with additions to
|
||||
* the xContent
|
||||
*/
|
||||
public void testFromXContentWithRandomFields() throws IOException {
|
||||
doFromXContentTestWithRandomFields(true);
|
||||
}
|
||||
|
||||
private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException {
|
||||
|
||||
final DeleteIndexResponse deleteIndexResponse = createTestItem();
|
||||
|
||||
boolean humanReadable = randomBoolean();
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
BytesReference originalBytes = toShuffledXContent(deleteIndexResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable);
|
||||
|
||||
BytesReference mutated;
|
||||
if (addRandomFields) {
|
||||
mutated = insertRandomFields(xContentType, originalBytes, null, random());
|
||||
} else {
|
||||
mutated = originalBytes;
|
||||
}
|
||||
DeleteIndexResponse parsedDeleteIndexResponse;
|
||||
try (XContentParser parser = createParser(xContentType.xContent(), mutated)) {
|
||||
parsedDeleteIndexResponse = DeleteIndexResponse.fromXContent(parser);
|
||||
assertNull(parser.nextToken());
|
||||
}
|
||||
|
||||
assertEquals(deleteIndexResponse.isAcknowledged(), parsedDeleteIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a random {@link DeleteIndexResponse}.
|
||||
*/
|
||||
private static DeleteIndexResponse createTestItem() throws IOException {
|
||||
boolean acknowledged = randomBoolean();
|
||||
|
||||
return new DeleteIndexResponse(acknowledged);
|
||||
}
|
||||
}
|
|
@ -82,12 +82,12 @@ public class TransportRolloverActionTests extends ESTestCase {
|
|||
.settings(settings)
|
||||
.build();
|
||||
final HashSet<Condition> conditions = Sets.newHashSet(maxDocsCondition, maxAgeCondition);
|
||||
Set<Condition.Result> results = evaluateConditions(conditions, new DocsStats(matchMaxDocs, 0L), metaData);
|
||||
Set<Condition.Result> results = evaluateConditions(conditions, new DocsStats(matchMaxDocs, 0L, between(1, 10000)), metaData);
|
||||
assertThat(results.size(), equalTo(2));
|
||||
for (Condition.Result result : results) {
|
||||
assertThat(result.matched, equalTo(true));
|
||||
}
|
||||
results = evaluateConditions(conditions, new DocsStats(notMatchMaxDocs, 0), metaData);
|
||||
results = evaluateConditions(conditions, new DocsStats(notMatchMaxDocs, 0, between(1, 10000)), metaData);
|
||||
assertThat(results.size(), equalTo(2));
|
||||
for (Condition.Result result : results) {
|
||||
if (result.condition instanceof MaxAgeCondition) {
|
||||
|
@ -213,10 +213,10 @@ public class TransportRolloverActionTests extends ESTestCase {
|
|||
|
||||
private IndicesStatsResponse createIndecesStatResponse(long totalDocs, long primaryDocs) {
|
||||
final CommonStats primaryStats = mock(CommonStats.class);
|
||||
when(primaryStats.getDocs()).thenReturn(new DocsStats(primaryDocs, 0));
|
||||
when(primaryStats.getDocs()).thenReturn(new DocsStats(primaryDocs, 0, between(1, 10000)));
|
||||
|
||||
final CommonStats totalStats = mock(CommonStats.class);
|
||||
when(totalStats.getDocs()).thenReturn(new DocsStats(totalDocs, 0));
|
||||
when(totalStats.getDocs()).thenReturn(new DocsStats(totalDocs, 0, between(1, 10000)));
|
||||
|
||||
final IndicesStatsResponse response = mock(IndicesStatsResponse.class);
|
||||
when(response.getPrimaries()).thenReturn(primaryStats);
|
||||
|
|
|
@ -73,7 +73,7 @@ public class TransportShrinkActionTests extends ESTestCase {
|
|||
assertTrue(
|
||||
expectThrows(IllegalStateException.class, () ->
|
||||
TransportShrinkAction.prepareCreateIndexRequest(new ShrinkRequest("target", "source"), state,
|
||||
(i) -> new DocsStats(Integer.MAX_VALUE, randomIntBetween(1, 1000)), new IndexNameExpressionResolver(Settings.EMPTY))
|
||||
(i) -> new DocsStats(Integer.MAX_VALUE, between(1, 1000), between(1, 100)), new IndexNameExpressionResolver(Settings.EMPTY))
|
||||
).getMessage().startsWith("Can't merge index with more than [2147483519] docs - too many documents in shards "));
|
||||
|
||||
|
||||
|
@ -84,7 +84,7 @@ public class TransportShrinkActionTests extends ESTestCase {
|
|||
ClusterState clusterState = createClusterState("source", 8, 1,
|
||||
Settings.builder().put("index.blocks.write", true).build());
|
||||
TransportShrinkAction.prepareCreateIndexRequest(req, clusterState,
|
||||
(i) -> i == 2 || i == 3 ? new DocsStats(Integer.MAX_VALUE/2, randomIntBetween(1, 1000)) : null,
|
||||
(i) -> i == 2 || i == 3 ? new DocsStats(Integer.MAX_VALUE / 2, between(1, 1000), between(1, 10000)) : null,
|
||||
new IndexNameExpressionResolver(Settings.EMPTY));
|
||||
}
|
||||
).getMessage().startsWith("Can't merge index with more than [2147483519] docs - too many documents in shards "));
|
||||
|
@ -106,7 +106,7 @@ public class TransportShrinkActionTests extends ESTestCase {
|
|||
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
||||
|
||||
TransportShrinkAction.prepareCreateIndexRequest(new ShrinkRequest("target", "source"), clusterState,
|
||||
(i) -> new DocsStats(randomIntBetween(1, 1000), randomIntBetween(1, 1000)), new IndexNameExpressionResolver(Settings.EMPTY));
|
||||
(i) -> new DocsStats(between(1, 1000), between(1, 1000), between(0, 10000)), new IndexNameExpressionResolver(Settings.EMPTY));
|
||||
}
|
||||
|
||||
public void testShrinkIndexSettings() {
|
||||
|
@ -128,7 +128,7 @@ public class TransportShrinkActionTests extends ESTestCase {
|
|||
routingTable.index(indexName).shardsWithState(ShardRoutingState.INITIALIZING)).routingTable();
|
||||
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
||||
int numSourceShards = clusterState.metaData().index(indexName).getNumberOfShards();
|
||||
DocsStats stats = new DocsStats(randomIntBetween(0, (IndexWriter.MAX_DOCS) / numSourceShards), randomIntBetween(1, 1000));
|
||||
DocsStats stats = new DocsStats(between(0, (IndexWriter.MAX_DOCS) / numSourceShards), between(1, 1000), between(1, 10000));
|
||||
ShrinkRequest target = new ShrinkRequest("target", indexName);
|
||||
final ActiveShardCount activeShardCount = randomBoolean() ? ActiveShardCount.ALL : ActiveShardCount.ONE;
|
||||
target.setWaitForActiveShards(activeShardCount);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.action.admin.indices.template.put;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -32,6 +33,11 @@ import java.util.Arrays;
|
|||
import java.util.Base64;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
|
||||
public class PutIndexTemplateRequestTests extends ESTestCase {
|
||||
|
||||
// bwc for #21009
|
||||
|
@ -107,4 +113,21 @@ public class PutIndexTemplateRequestTests extends ESTestCase {
|
|||
assertEquals("template", request.patterns().get(0));
|
||||
}
|
||||
}
|
||||
|
||||
public void testValidateErrorMessage() throws Exception {
|
||||
PutIndexTemplateRequest request = new PutIndexTemplateRequest();
|
||||
ActionRequestValidationException withoutNameAndPattern = request.validate();
|
||||
assertThat(withoutNameAndPattern.getMessage(), containsString("name is missing"));
|
||||
assertThat(withoutNameAndPattern.getMessage(), containsString("index patterns are missing"));
|
||||
|
||||
request.name("foo");
|
||||
ActionRequestValidationException withoutIndexPatterns = request.validate();
|
||||
assertThat(withoutIndexPatterns.validationErrors(), hasSize(1));
|
||||
assertThat(withoutIndexPatterns.getMessage(), containsString("index patterns are missing"));
|
||||
|
||||
request.patterns(Collections.singletonList("test-*"));
|
||||
ActionRequestValidationException noError = request.validate();
|
||||
assertThat(noError, is(nullValue()));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ public class BulkProcessorTests extends ESTestCase {
|
|||
final BulkProcessor bulkProcessor;
|
||||
assertNull(threadPool.getThreadContext().getHeader(headerKey));
|
||||
assertNull(threadPool.getThreadContext().getTransient(transientKey));
|
||||
try (ThreadContext.StoredContext ctx = threadPool.getThreadContext().stashContext()) {
|
||||
try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) {
|
||||
threadPool.getThreadContext().putHeader(headerKey, headerValue);
|
||||
threadPool.getThreadContext().putTransient(transientKey, transientValue);
|
||||
bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(), new BulkProcessor.Listener() {
|
||||
|
@ -82,7 +82,7 @@ public class BulkProcessorTests extends ESTestCase {
|
|||
@Override
|
||||
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
|
||||
}
|
||||
}, 1, bulkSize, new ByteSizeValue(5, ByteSizeUnit.MB), flushInterval, threadPool);
|
||||
}, 1, bulkSize, new ByteSizeValue(5, ByteSizeUnit.MB), flushInterval, threadPool, () -> {});
|
||||
}
|
||||
assertNull(threadPool.getThreadContext().getHeader(headerKey));
|
||||
assertNull(threadPool.getThreadContext().getTransient(transientKey));
|
||||
|
|
|
@ -24,9 +24,12 @@ import org.elasticsearch.action.OriginalIndices;
|
|||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -38,11 +41,12 @@ import java.util.Collections;
|
|||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
|
||||
|
||||
|
||||
public void testFilterShards() throws InterruptedException {
|
||||
|
||||
final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, System.nanoTime(),
|
||||
|
@ -185,6 +189,7 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
|
|||
lookup.put("node1", new SearchAsyncActionTests.MockConnection(primaryNode));
|
||||
lookup.put("node2", new SearchAsyncActionTests.MockConnection(replicaNode));
|
||||
|
||||
|
||||
final SearchTransportService searchTransportService =
|
||||
new SearchTransportService(Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
@Override
|
||||
|
@ -197,11 +202,11 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
|
|||
}
|
||||
};
|
||||
|
||||
final AtomicReference<GroupShardsIterator<SearchShardIterator>> result = new AtomicReference<>();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
final OriginalIndices originalIndices = new OriginalIndices(new String[]{"idx"}, IndicesOptions.strictExpandOpenAndForbidClosed());
|
||||
final GroupShardsIterator<SearchShardIterator> shardsIter =
|
||||
SearchAsyncActionTests.getShardsIter("idx", originalIndices, 2048, randomBoolean(), primaryNode, replicaNode);
|
||||
SearchAsyncActionTests.getShardsIter("idx", originalIndices, 4096, randomBoolean(), primaryNode, replicaNode);
|
||||
final ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, Runtime.getRuntime().availableProcessors()));
|
||||
final CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase(
|
||||
logger,
|
||||
searchTransportService,
|
||||
|
@ -215,16 +220,38 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
|
|||
timeProvider,
|
||||
0,
|
||||
null,
|
||||
(iter) -> new SearchPhase("test") {
|
||||
(iter) -> new InitialSearchPhase<SearchPhaseResult>("test", null, iter, logger, randomIntBetween(1, 32), executor) {
|
||||
@Override
|
||||
public void run() throws IOException {
|
||||
result.set(iter);
|
||||
void onPhaseDone() {
|
||||
latch.countDown();
|
||||
}});
|
||||
}
|
||||
|
||||
@Override
|
||||
void onShardFailure(final int shardIndex, final SearchShardTarget shardTarget, final Exception ex) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
void onShardSuccess(final SearchPhaseResult result) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void executePhaseOnShard(
|
||||
final SearchShardIterator shardIt,
|
||||
final ShardRouting shard,
|
||||
final SearchActionListener<SearchPhaseResult> listener) {
|
||||
if (randomBoolean()) {
|
||||
listener.onResponse(new SearchPhaseResult() {});
|
||||
} else {
|
||||
listener.onFailure(new Exception("failure"));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
canMatchPhase.start();
|
||||
latch.await();
|
||||
|
||||
executor.shutdown();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
|
@ -242,4 +243,43 @@ public class ExpandSearchPhaseTests extends ESTestCase {
|
|||
assertNotNull(reference.get());
|
||||
assertEquals(1, mockSearchPhaseContext.phasesExecuted.get());
|
||||
}
|
||||
|
||||
public void testExpandRequestOptions() throws IOException {
|
||||
MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1);
|
||||
mockSearchPhaseContext.searchTransport = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
|
||||
@Override
|
||||
void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener<MultiSearchResponse> listener) {
|
||||
final QueryBuilder postFilter = QueryBuilders.existsQuery("foo");
|
||||
assertTrue(request.requests().stream().allMatch((r) -> "foo".equals(r.preference())));
|
||||
assertTrue(request.requests().stream().allMatch((r) -> "baz".equals(r.routing())));
|
||||
assertTrue(request.requests().stream().allMatch((r) -> postFilter.equals(r.source().postFilter())));
|
||||
}
|
||||
};
|
||||
mockSearchPhaseContext.getRequest().source(new SearchSourceBuilder()
|
||||
.collapse(
|
||||
new CollapseBuilder("someField")
|
||||
.setInnerHits(new InnerHitBuilder().setName("foobarbaz"))
|
||||
)
|
||||
.postFilter(QueryBuilders.existsQuery("foo")))
|
||||
.preference("foobar")
|
||||
.routing("baz");
|
||||
|
||||
SearchHits hits = new SearchHits(new SearchHit[0], 1, 1.0f);
|
||||
InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1);
|
||||
AtomicReference<SearchResponse> reference = new AtomicReference<>();
|
||||
ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, r ->
|
||||
new SearchPhase("test") {
|
||||
@Override
|
||||
public void run() throws IOException {
|
||||
reference.set(mockSearchPhaseContext.buildSearchResponse(r, null));
|
||||
}
|
||||
}
|
||||
);
|
||||
phase.run();
|
||||
mockSearchPhaseContext.assertNoFailure();
|
||||
assertNotNull(reference.get());
|
||||
assertEquals(1, mockSearchPhaseContext.phasesExecuted.get());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,6 +50,8 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
|
@ -285,6 +287,7 @@ public class SearchAsyncActionTests extends ESTestCase {
|
|||
lookup.put(primaryNode.getId(), new MockConnection(primaryNode));
|
||||
lookup.put(replicaNode.getId(), new MockConnection(replicaNode));
|
||||
Map<String, AliasFilter> aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY));
|
||||
final ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, Runtime.getRuntime().availableProcessors()));
|
||||
AbstractSearchAsyncAction asyncAction =
|
||||
new AbstractSearchAsyncAction<TestSearchPhaseResult>(
|
||||
"test",
|
||||
|
@ -295,7 +298,7 @@ public class SearchAsyncActionTests extends ESTestCase {
|
|||
return lookup.get(node); },
|
||||
aliasFilters,
|
||||
Collections.emptyMap(),
|
||||
null,
|
||||
executor,
|
||||
request,
|
||||
responseListener,
|
||||
shardsIter,
|
||||
|
@ -349,6 +352,7 @@ public class SearchAsyncActionTests extends ESTestCase {
|
|||
} else {
|
||||
assertTrue(nodeToContextMap.get(replicaNode).toString(), nodeToContextMap.get(replicaNode).isEmpty());
|
||||
}
|
||||
executor.shutdown();
|
||||
}
|
||||
|
||||
static GroupShardsIterator<SearchShardIterator> getShardsIter(String index, OriginalIndices originalIndices, int numShards,
|
||||
|
|
|
@ -175,7 +175,7 @@ public class SearchResponseTests extends ESTestCase {
|
|||
ShardSearchFailure parsedFailure = parsed.getShardFailures()[i];
|
||||
ShardSearchFailure originalFailure = failures[i];
|
||||
assertEquals(originalFailure.index(), parsedFailure.index());
|
||||
assertEquals(originalFailure.shard().getNodeId(), parsedFailure.shard().getNodeId());
|
||||
assertEquals(originalFailure.shard(), parsedFailure.shard());
|
||||
assertEquals(originalFailure.shardId(), parsedFailure.shardId());
|
||||
String originalMsg = originalFailure.getCause().getMessage();
|
||||
assertEquals(parsedFailure.getCause().getMessage(), "Elasticsearch exception [type=parsing_exception, reason=" +
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -40,12 +41,14 @@ public class ShardSearchFailureTests extends ESTestCase {
|
|||
public static ShardSearchFailure createTestItem() {
|
||||
String randomMessage = randomAlphaOfLengthBetween(3, 20);
|
||||
Exception ex = new ParsingException(0, 0, randomMessage , new IllegalArgumentException("some bad argument"));
|
||||
String nodeId = randomAlphaOfLengthBetween(5, 10);
|
||||
String indexName = randomAlphaOfLengthBetween(5, 10);
|
||||
String indexUuid = randomAlphaOfLengthBetween(5, 10);
|
||||
int shardId = randomInt();
|
||||
return new ShardSearchFailure(ex,
|
||||
new SearchShardTarget(nodeId, new ShardId(new Index(indexName, indexUuid), shardId), null, null));
|
||||
SearchShardTarget searchShardTarget = null;
|
||||
if (randomBoolean()) {
|
||||
String nodeId = randomAlphaOfLengthBetween(5, 10);
|
||||
String indexName = randomAlphaOfLengthBetween(5, 10);
|
||||
searchShardTarget = new SearchShardTarget(nodeId,
|
||||
new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), randomInt()), null, null);
|
||||
}
|
||||
return new ShardSearchFailure(ex, searchShardTarget);
|
||||
}
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
|
@ -80,10 +83,10 @@ public class ShardSearchFailureTests extends ESTestCase {
|
|||
assertNull(parser.nextToken());
|
||||
}
|
||||
assertEquals(response.index(), parsed.index());
|
||||
assertEquals(response.shard().getNodeId(), parsed.shard().getNodeId());
|
||||
assertEquals(response.shard(), parsed.shard());
|
||||
assertEquals(response.shardId(), parsed.shardId());
|
||||
|
||||
/**
|
||||
/*
|
||||
* we cannot compare the cause, because it will be wrapped in an outer
|
||||
* ElasticSearchException best effort: try to check that the original
|
||||
* message appears somewhere in the rendered xContent
|
||||
|
|
|
@ -152,11 +152,11 @@ public class DiskUsageTests extends ESTestCase {
|
|||
};
|
||||
List<NodeStats> nodeStats = Arrays.asList(
|
||||
new NodeStats(new DiscoveryNode("node_1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0,
|
||||
null,null,null,null,null,new FsInfo(0, null, node1FSInfo), null,null,null,null,null, null),
|
||||
null,null,null,null,null,new FsInfo(0, null, node1FSInfo), null,null,null,null,null, null, null),
|
||||
new NodeStats(new DiscoveryNode("node_2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0,
|
||||
null,null,null,null,null, new FsInfo(0, null, node2FSInfo), null,null,null,null,null, null),
|
||||
null,null,null,null,null, new FsInfo(0, null, node2FSInfo), null,null,null,null,null, null, null),
|
||||
new NodeStats(new DiscoveryNode("node_3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0,
|
||||
null,null,null,null,null, new FsInfo(0, null, node3FSInfo), null,null,null,null,null, null)
|
||||
null,null,null,null,null, new FsInfo(0, null, node3FSInfo), null,null,null,null,null, null, null)
|
||||
);
|
||||
InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvaiableUsages, newMostAvaiableUsages);
|
||||
DiskUsage leastNode_1 = newLeastAvaiableUsages.get("node_1");
|
||||
|
@ -193,11 +193,11 @@ public class DiskUsageTests extends ESTestCase {
|
|||
};
|
||||
List<NodeStats> nodeStats = Arrays.asList(
|
||||
new NodeStats(new DiscoveryNode("node_1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0,
|
||||
null,null,null,null,null,new FsInfo(0, null, node1FSInfo), null,null,null,null,null, null),
|
||||
null,null,null,null,null,new FsInfo(0, null, node1FSInfo), null,null,null,null,null, null, null),
|
||||
new NodeStats(new DiscoveryNode("node_2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0,
|
||||
null,null,null,null,null, new FsInfo(0, null, node2FSInfo), null,null,null,null,null, null),
|
||||
null,null,null,null,null, new FsInfo(0, null, node2FSInfo), null,null,null,null,null, null, null),
|
||||
new NodeStats(new DiscoveryNode("node_3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0,
|
||||
null,null,null,null,null, new FsInfo(0, null, node3FSInfo), null,null,null,null,null, null)
|
||||
null,null,null,null,null, new FsInfo(0, null, node3FSInfo), null,null,null,null,null, null, null)
|
||||
);
|
||||
InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvailableUsages, newMostAvailableUsages);
|
||||
DiskUsage leastNode_1 = newLeastAvailableUsages.get("node_1");
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.io;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressFileSystems;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
|
@ -34,6 +35,8 @@ import java.nio.file.Path;
|
|||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
* Unit tests for {@link org.elasticsearch.common.io.FileSystemUtils}.
|
||||
*/
|
||||
|
@ -137,4 +140,16 @@ public class FileSystemUtilsTests extends ESTestCase {
|
|||
assertArrayEquals(expectedBytes, actualBytes);
|
||||
}
|
||||
}
|
||||
|
||||
public void testIsDesktopServicesStoreFile() throws IOException {
|
||||
final Path path = createTempDir();
|
||||
final Path desktopServicesStore = path.resolve(".DS_Store");
|
||||
Files.createFile(desktopServicesStore);
|
||||
assertThat(FileSystemUtils.isDesktopServicesStore(desktopServicesStore), equalTo(Constants.MAC_OS_X));
|
||||
|
||||
Files.delete(desktopServicesStore);
|
||||
Files.createDirectory(desktopServicesStore);
|
||||
assertFalse(FileSystemUtils.isDesktopServicesStore(desktopServicesStore));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.util.concurrent;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.concurrent.RejectedExecutionException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public final class TimedRunnableTests extends ESTestCase {
|
||||
|
||||
public void testTimedRunnableDelegatesToAbstractRunnable() {
|
||||
final boolean isForceExecution = randomBoolean();
|
||||
final AtomicBoolean onAfter = new AtomicBoolean();
|
||||
final AtomicReference<Exception> onRejection = new AtomicReference<>();
|
||||
final AtomicReference<Exception> onFailure = new AtomicReference<>();
|
||||
final AtomicBoolean doRun = new AtomicBoolean();
|
||||
|
||||
final AbstractRunnable runnable = new AbstractRunnable() {
|
||||
@Override
|
||||
public boolean isForceExecution() {
|
||||
return isForceExecution;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAfter() {
|
||||
onAfter.set(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRejection(final Exception e) {
|
||||
onRejection.set(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(final Exception e) {
|
||||
onFailure.set(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
doRun.set(true);
|
||||
}
|
||||
};
|
||||
|
||||
final TimedRunnable timedRunnable = new TimedRunnable(runnable);
|
||||
|
||||
assertThat(timedRunnable.isForceExecution(), equalTo(isForceExecution));
|
||||
|
||||
timedRunnable.onAfter();
|
||||
assertTrue(onAfter.get());
|
||||
|
||||
final Exception rejection = new RejectedExecutionException();
|
||||
timedRunnable.onRejection(rejection);
|
||||
assertThat(onRejection.get(), equalTo(rejection));
|
||||
|
||||
final Exception failure = new Exception();
|
||||
timedRunnable.onFailure(failure);
|
||||
assertThat(onFailure.get(), equalTo(failure));
|
||||
|
||||
timedRunnable.run();
|
||||
assertTrue(doRun.get());
|
||||
}
|
||||
|
||||
public void testTimedRunnableDelegatesRunInFailureCase() {
|
||||
final AtomicBoolean onAfter = new AtomicBoolean();
|
||||
final AtomicReference<Exception> onFailure = new AtomicReference<>();
|
||||
final AtomicBoolean doRun = new AtomicBoolean();
|
||||
|
||||
final Exception exception = new Exception();
|
||||
|
||||
final AbstractRunnable runnable = new AbstractRunnable() {
|
||||
@Override
|
||||
public void onAfter() {
|
||||
onAfter.set(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(final Exception e) {
|
||||
onFailure.set(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
doRun.set(true);
|
||||
throw exception;
|
||||
}
|
||||
};
|
||||
|
||||
final TimedRunnable timedRunnable = new TimedRunnable(runnable);
|
||||
timedRunnable.run();
|
||||
assertTrue(doRun.get());
|
||||
assertThat(onFailure.get(), equalTo(exception));
|
||||
assertTrue(onAfter.get());
|
||||
}
|
||||
|
||||
}
|
|
@ -224,7 +224,7 @@ public class ConstructingObjectParserTests extends ESTestCase {
|
|||
parser.apply(createParser(JsonXContent.jsonXContent, "{}"), null);
|
||||
fail("Expected AssertionError");
|
||||
} catch (AssertionError e) {
|
||||
assertEquals("[constructor_args_required] must configure at least on constructor argument. If it doesn't have any it should "
|
||||
assertEquals("[constructor_args_required] must configure at least one constructor argument. If it doesn't have any it should "
|
||||
+ "use ObjectParser instead of ConstructingObjectParser. This is a bug in the parser declaration.", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -705,6 +705,73 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void assertPublishClusterStateStats(String description, MockNode node, long expectedFull, long expectedIncompatibleDiffs,
|
||||
long expectedCompatibleDiffs) {
|
||||
PublishClusterStateStats stats = node.action.stats();
|
||||
assertThat(description + ": full cluster states", stats.getFullClusterStateReceivedCount(), equalTo(expectedFull));
|
||||
assertThat(description + ": incompatible cluster state diffs", stats.getIncompatibleClusterStateDiffReceivedCount(),
|
||||
equalTo(expectedIncompatibleDiffs));
|
||||
assertThat(description + ": compatible cluster state diffs", stats.getCompatibleClusterStateDiffReceivedCount(),
|
||||
equalTo(expectedCompatibleDiffs));
|
||||
}
|
||||
|
||||
public void testPublishClusterStateStats() throws Exception {
|
||||
MockNode nodeA = createMockNode("nodeA").setAsMaster();
|
||||
MockNode nodeB = createMockNode("nodeB");
|
||||
|
||||
assertPublishClusterStateStats("nodeA: initial state", nodeA, 0, 0, 0);
|
||||
assertPublishClusterStateStats("nodeB: initial state", nodeB, 0, 0, 0);
|
||||
|
||||
// Initial cluster state
|
||||
ClusterState clusterState = nodeA.clusterState;
|
||||
|
||||
// cluster state update - add nodeB
|
||||
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(clusterState.nodes()).add(nodeB.discoveryNode).build();
|
||||
ClusterState previousClusterState = clusterState;
|
||||
clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build();
|
||||
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
|
||||
|
||||
// Sent as a full cluster state update
|
||||
assertPublishClusterStateStats("nodeA: after full update", nodeA, 0, 0, 0);
|
||||
assertPublishClusterStateStats("nodeB: after full update", nodeB, 1, 0, 0);
|
||||
|
||||
// Increment cluster state version
|
||||
previousClusterState = clusterState;
|
||||
clusterState = ClusterState.builder(clusterState).incrementVersion().build();
|
||||
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
|
||||
|
||||
// Sent, successfully, as a cluster state diff
|
||||
assertPublishClusterStateStats("nodeA: after successful diff update", nodeA, 0, 0, 0);
|
||||
assertPublishClusterStateStats("nodeB: after successful diff update", nodeB, 1, 0, 1);
|
||||
|
||||
// Increment cluster state version twice
|
||||
previousClusterState = ClusterState.builder(clusterState).incrementVersion().build();
|
||||
clusterState = ClusterState.builder(previousClusterState).incrementVersion().build();
|
||||
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
|
||||
|
||||
// Sent, unsuccessfully, as a diff and then retried as a full update
|
||||
assertPublishClusterStateStats("nodeA: after unsuccessful diff update", nodeA, 0, 0, 0);
|
||||
assertPublishClusterStateStats("nodeB: after unsuccessful diff update", nodeB, 2, 1, 1);
|
||||
|
||||
// node A steps down from being master
|
||||
nodeA.resetMasterId();
|
||||
nodeB.resetMasterId();
|
||||
|
||||
// node B becomes the master and sends a version of the cluster state that goes back
|
||||
discoveryNodes = DiscoveryNodes.builder(discoveryNodes)
|
||||
.add(nodeA.discoveryNode)
|
||||
.add(nodeB.discoveryNode)
|
||||
.masterNodeId(nodeB.discoveryNode.getId())
|
||||
.localNodeId(nodeB.discoveryNode.getId())
|
||||
.build();
|
||||
previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
|
||||
clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build();
|
||||
publishStateAndWait(nodeB.action, clusterState, previousClusterState);
|
||||
|
||||
// Sent, unsuccessfully, as a diff, and then retried as a full update
|
||||
assertPublishClusterStateStats("nodeA: B became master", nodeA, 1, 1, 0);
|
||||
assertPublishClusterStateStats("nodeB: B became master", nodeB, 2, 1, 1);
|
||||
}
|
||||
|
||||
private MetaData buildMetaDataForVersion(MetaData metaData, long version) {
|
||||
ImmutableOpenMap.Builder<String, IndexMetaData> indices = ImmutableOpenMap.builder(metaData.indices());
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
|||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.UnknownHostException;
|
||||
|
@ -255,6 +254,11 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
|||
" \"total\" : 0,\n" +
|
||||
" \"pending\" : 0,\n" +
|
||||
" \"committed\" : 0\n" +
|
||||
" },\n" +
|
||||
" \"published_cluster_states\" : {\n" +
|
||||
" \"full_states\" : 0,\n" +
|
||||
" \"incompatible_diffs\" : 0,\n" +
|
||||
" \"compatible_diffs\" : 0\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
|
@ -275,6 +279,11 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
|
|||
assertThat(stats.getQueueStats().getCommitted(), equalTo(0));
|
||||
assertThat(stats.getQueueStats().getPending(), equalTo(0));
|
||||
|
||||
assertThat(stats.getPublishStats(), notNullValue());
|
||||
assertThat(stats.getPublishStats().getFullClusterStateReceivedCount(), equalTo(0L));
|
||||
assertThat(stats.getPublishStats().getIncompatibleClusterStateDiffReceivedCount(), equalTo(0L));
|
||||
assertThat(stats.getPublishStats().getCompatibleClusterStateDiffReceivedCount(), equalTo(0L));
|
||||
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
|
||||
builder.startObject();
|
||||
stats.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
|
|
|
@ -35,7 +35,9 @@ import org.elasticsearch.test.ESSingleNodeTestCase;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
|
||||
|
@ -155,8 +157,9 @@ public class MultiFieldTests extends ESSingleNodeTestCase {
|
|||
// can to unnecessary re-syncing of the mappings between the local instance and cluster state
|
||||
public void testMultiFieldsInConsistentOrder() throws Exception {
|
||||
String[] multiFieldNames = new String[randomIntBetween(2, 10)];
|
||||
Set<String> seenFields = new HashSet<>();
|
||||
for (int i = 0; i < multiFieldNames.length; i++) {
|
||||
multiFieldNames[i] = randomAlphaOfLength(4);
|
||||
multiFieldNames[i] = randomValueOtherThanMany(s -> !seenFields.add(s), () -> randomAlphaOfLength(4));
|
||||
}
|
||||
|
||||
XContentBuilder builder = jsonBuilder().startObject().startObject("type").startObject("properties")
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.search.SynonymQuery;
|
|||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
|
@ -110,7 +111,7 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase {
|
|||
Query expected = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f);
|
||||
Query actual = MultiMatchQuery.blendTerm(
|
||||
indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null),
|
||||
new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
|
||||
new BytesRef("baz"), null, 1f, false, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
|
||||
assertEquals(expected, actual);
|
||||
}
|
||||
|
||||
|
@ -126,11 +127,11 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase {
|
|||
Query expected = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f);
|
||||
Query actual = MultiMatchQuery.blendTerm(
|
||||
indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null),
|
||||
new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
|
||||
new BytesRef("baz"), null, 1f, false, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
|
||||
assertEquals(expected, actual);
|
||||
}
|
||||
|
||||
public void testBlendTermsUnsupportedValue() {
|
||||
public void testBlendTermsUnsupportedValueWithLenient() {
|
||||
FakeFieldType ft1 = new FakeFieldType();
|
||||
ft1.setName("foo");
|
||||
FakeFieldType ft2 = new FakeFieldType() {
|
||||
|
@ -142,13 +143,29 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase {
|
|||
ft2.setName("bar");
|
||||
Term[] terms = new Term[] { new Term("foo", "baz") };
|
||||
float[] boosts = new float[] {2};
|
||||
Query expected = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f);
|
||||
Query expected = new DisjunctionMaxQuery(Arrays.asList(
|
||||
Queries.newMatchNoDocsQuery("failed [" + ft2.name() + "] query, caused by illegal_argument_exception:[null]"),
|
||||
BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f)
|
||||
), 1f);
|
||||
Query actual = MultiMatchQuery.blendTerm(
|
||||
indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null),
|
||||
new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
|
||||
new BytesRef("baz"), null, 1f, true, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
|
||||
assertEquals(expected, actual);
|
||||
}
|
||||
|
||||
public void testBlendTermsUnsupportedValueWithoutLenient() {
|
||||
FakeFieldType ft = new FakeFieldType() {
|
||||
@Override
|
||||
public Query termQuery(Object value, QueryShardContext context) {
|
||||
throw new IllegalArgumentException();
|
||||
}
|
||||
};
|
||||
ft.setName("bar");
|
||||
expectThrows(IllegalArgumentException.class, () -> MultiMatchQuery.blendTerm(
|
||||
indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null),
|
||||
new BytesRef("baz"), null, 1f, false, new FieldAndFieldType(ft, 1)));
|
||||
}
|
||||
|
||||
public void testBlendNoTermQuery() {
|
||||
FakeFieldType ft1 = new FakeFieldType();
|
||||
ft1.setName("foo");
|
||||
|
@ -170,7 +187,7 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase {
|
|||
), 1.0f);
|
||||
Query actual = MultiMatchQuery.blendTerm(
|
||||
indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null),
|
||||
new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
|
||||
new BytesRef("baz"), null, 1f, false, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3));
|
||||
assertEquals(expected, actual);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.shard;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class DocsStatsTests extends ESTestCase {
|
||||
|
||||
public void testCalculateAverageDocSize() throws Exception {
|
||||
DocsStats stats = new DocsStats(10, 2, 120);
|
||||
assertThat(stats.getAverageSizeInBytes(), equalTo(10L));
|
||||
|
||||
stats.add(new DocsStats(0, 0, 0));
|
||||
assertThat(stats.getAverageSizeInBytes(), equalTo(10L));
|
||||
|
||||
stats.add(new DocsStats(8, 30, 480));
|
||||
assertThat(stats.getCount(), equalTo(18L));
|
||||
assertThat(stats.getDeleted(), equalTo(32L));
|
||||
assertThat(stats.getTotalSizeInBytes(), equalTo(600L));
|
||||
assertThat(stats.getAverageSizeInBytes(), equalTo(12L));
|
||||
}
|
||||
|
||||
public void testSerialize() throws Exception {
|
||||
DocsStats originalStats = new DocsStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong());
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
originalStats.writeTo(out);
|
||||
BytesReference bytes = out.bytes();
|
||||
try (StreamInput in = bytes.streamInput()) {
|
||||
DocsStats cloneStats = new DocsStats();
|
||||
cloneStats.readFrom(in);
|
||||
assertThat(cloneStats.getCount(), equalTo(originalStats.getCount()));
|
||||
assertThat(cloneStats.getDeleted(), equalTo(originalStats.getDeleted()));
|
||||
assertThat(cloneStats.getAverageSizeInBytes(), equalTo(originalStats.getAverageSizeInBytes()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -67,6 +67,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
|||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
|
@ -88,6 +89,7 @@ import org.elasticsearch.index.mapper.Uid;
|
|||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.store.StoreStats;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.index.translog.TranslogTests;
|
||||
import org.elasticsearch.indices.IndicesQueryCache;
|
||||
|
@ -151,6 +153,7 @@ import static org.hamcrest.Matchers.hasKey;
|
|||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.hasToString;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
|
@ -2229,6 +2232,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
final DocsStats docsStats = indexShard.docStats();
|
||||
assertThat(docsStats.getCount(), equalTo(numDocs));
|
||||
assertThat(docsStats.getDeleted(), equalTo(0L));
|
||||
assertThat(docsStats.getAverageSizeInBytes(), greaterThan(0L));
|
||||
}
|
||||
|
||||
final List<Integer> ids = randomSubsetOf(
|
||||
|
@ -2265,12 +2269,70 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
final DocsStats docStats = indexShard.docStats();
|
||||
assertThat(docStats.getCount(), equalTo(numDocs));
|
||||
assertThat(docStats.getDeleted(), equalTo(0L));
|
||||
assertThat(docStats.getAverageSizeInBytes(), greaterThan(0L));
|
||||
}
|
||||
} finally {
|
||||
closeShards(indexShard);
|
||||
}
|
||||
}
|
||||
|
||||
public void testEstimateTotalDocSize() throws Exception {
|
||||
IndexShard indexShard = null;
|
||||
try {
|
||||
indexShard = newStartedShard(true);
|
||||
|
||||
int numDoc = randomIntBetween(100, 200);
|
||||
for (int i = 0; i < numDoc; i++) {
|
||||
String doc = XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("count", randomInt())
|
||||
.field("point", randomFloat())
|
||||
.field("description", randomUnicodeOfCodepointLength(100))
|
||||
.endObject().string();
|
||||
indexDoc(indexShard, "doc", Integer.toString(i), doc);
|
||||
}
|
||||
|
||||
assertThat("Without flushing, segment sizes should be zero",
|
||||
indexShard.docStats().getTotalSizeInBytes(), equalTo(0L));
|
||||
|
||||
indexShard.flush(new FlushRequest());
|
||||
indexShard.refresh("test");
|
||||
{
|
||||
final DocsStats docsStats = indexShard.docStats();
|
||||
final StoreStats storeStats = indexShard.storeStats();
|
||||
assertThat(storeStats.sizeInBytes(), greaterThan(numDoc * 100L)); // A doc should be more than 100 bytes.
|
||||
|
||||
assertThat("Estimated total document size is too small compared with the stored size",
|
||||
docsStats.getTotalSizeInBytes(), greaterThanOrEqualTo(storeStats.sizeInBytes() * 80/100));
|
||||
assertThat("Estimated total document size is too large compared with the stored size",
|
||||
docsStats.getTotalSizeInBytes(), lessThanOrEqualTo(storeStats.sizeInBytes() * 120/100));
|
||||
}
|
||||
|
||||
// Do some updates and deletes, then recheck the correlation again.
|
||||
for (int i = 0; i < numDoc / 2; i++) {
|
||||
if (randomBoolean()) {
|
||||
deleteDoc(indexShard, "doc", Integer.toString(i));
|
||||
} else {
|
||||
indexDoc(indexShard, "doc", Integer.toString(i), "{\"foo\": \"bar\"}");
|
||||
}
|
||||
}
|
||||
|
||||
indexShard.flush(new FlushRequest());
|
||||
indexShard.refresh("test");
|
||||
{
|
||||
final DocsStats docsStats = indexShard.docStats();
|
||||
final StoreStats storeStats = indexShard.storeStats();
|
||||
assertThat("Estimated total document size is too small compared with the stored size",
|
||||
docsStats.getTotalSizeInBytes(), greaterThanOrEqualTo(storeStats.sizeInBytes() * 80/100));
|
||||
assertThat("Estimated total document size is too large compared with the stored size",
|
||||
docsStats.getTotalSizeInBytes(), lessThanOrEqualTo(storeStats.sizeInBytes() * 120/100));
|
||||
}
|
||||
|
||||
} finally {
|
||||
closeShards(indexShard);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* here we are simulating the scenario that happens when we do async shard fetching from GatewaySerivce while we are finishing
|
||||
* a recovery and concurrently clean files. This should always be possible without any exception. Yet there was a bug where IndexShard
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.index.shard;
|
|||
|
||||
|
||||
import org.apache.lucene.mockfile.FilterFileSystemProvider;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.io.PathUtilsForTesting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -33,8 +34,10 @@ import org.junit.AfterClass;
|
|||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.math.BigInteger;
|
||||
import java.nio.file.FileStore;
|
||||
import java.nio.file.FileSystem;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.FileAttributeView;
|
||||
import java.nio.file.attribute.FileStoreAttributeView;
|
||||
|
@ -45,6 +48,9 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/** Separate test class from ShardPathTests because we need static (BeforeClass) setup to install mock filesystems... */
|
||||
public class NewPathForShardTests extends ESTestCase {
|
||||
|
||||
|
@ -158,6 +164,10 @@ public class NewPathForShardTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
static void createFakeShard(ShardPath path) throws IOException {
|
||||
Files.createDirectories(path.resolveIndex().getParent());
|
||||
}
|
||||
|
||||
public void testSelectNewPathForShard() throws Exception {
|
||||
Path path = PathUtils.get(createTempDir().toString());
|
||||
|
||||
|
@ -199,8 +209,10 @@ public class NewPathForShardTests extends ESTestCase {
|
|||
|
||||
Map<Path,Integer> dataPathToShardCount = new HashMap<>();
|
||||
ShardPath result1 = ShardPath.selectNewPathForShard(nodeEnv, shardId, INDEX_SETTINGS, 100, dataPathToShardCount);
|
||||
createFakeShard(result1);
|
||||
dataPathToShardCount.put(NodeEnvironment.shardStatePathToDataPath(result1.getDataPath()), 1);
|
||||
ShardPath result2 = ShardPath.selectNewPathForShard(nodeEnv, shardId, INDEX_SETTINGS, 100, dataPathToShardCount);
|
||||
createFakeShard(result2);
|
||||
|
||||
// #11122: this was the original failure: on a node with 2 disks that have nearly equal
|
||||
// free space, we would always allocate all N incoming shards to the one path that
|
||||
|
@ -210,4 +222,153 @@ public class NewPathForShardTests extends ESTestCase {
|
|||
|
||||
nodeEnv.close();
|
||||
}
|
||||
|
||||
public void testSelectNewPathForShardEvenly() throws Exception {
|
||||
Path path = PathUtils.get(createTempDir().toString());
|
||||
|
||||
// Use 2 data paths:
|
||||
String[] paths = new String[] {path.resolve("a").toString(),
|
||||
path.resolve("b").toString()};
|
||||
|
||||
Settings settings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), path)
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), paths).build();
|
||||
NodeEnvironment nodeEnv = new NodeEnvironment(settings, new Environment(settings));
|
||||
|
||||
// Make sure all our mocking above actually worked:
|
||||
NodePath[] nodePaths = nodeEnv.nodePaths();
|
||||
assertEquals(2, nodePaths.length);
|
||||
|
||||
assertEquals("mocka", nodePaths[0].fileStore.name());
|
||||
assertEquals("mockb", nodePaths[1].fileStore.name());
|
||||
|
||||
// Path a has lots of free space, but b has little, so new shard should go to a:
|
||||
aFileStore.usableSpace = 100000;
|
||||
bFileStore.usableSpace = 10000;
|
||||
|
||||
ShardId shardId = new ShardId("index", "uid1", 0);
|
||||
ShardPath result = ShardPath.selectNewPathForShard(nodeEnv, shardId, INDEX_SETTINGS, 100, Collections.<Path,Integer>emptyMap());
|
||||
createFakeShard(result);
|
||||
// First shard should go to a
|
||||
assertThat(result.getDataPath().toString(), containsString(aPathPart));
|
||||
|
||||
shardId = new ShardId("index", "uid1", 1);
|
||||
result = ShardPath.selectNewPathForShard(nodeEnv, shardId, INDEX_SETTINGS, 100, Collections.<Path,Integer>emptyMap());
|
||||
createFakeShard(result);
|
||||
// Second shard should go to b
|
||||
assertThat(result.getDataPath().toString(), containsString(bPathPart));
|
||||
|
||||
Map<Path,Integer> dataPathToShardCount = new HashMap<>();
|
||||
shardId = new ShardId("index2", "uid2", 0);
|
||||
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index2",
|
||||
Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3).build());
|
||||
ShardPath result1 = ShardPath.selectNewPathForShard(nodeEnv, shardId, idxSettings, 100, dataPathToShardCount);
|
||||
createFakeShard(result1);
|
||||
dataPathToShardCount.put(NodeEnvironment.shardStatePathToDataPath(result1.getDataPath()), 1);
|
||||
shardId = new ShardId("index2", "uid2", 1);
|
||||
ShardPath result2 = ShardPath.selectNewPathForShard(nodeEnv, shardId, idxSettings, 100, dataPathToShardCount);
|
||||
createFakeShard(result2);
|
||||
dataPathToShardCount.put(NodeEnvironment.shardStatePathToDataPath(result2.getDataPath()), 1);
|
||||
shardId = new ShardId("index2", "uid2", 2);
|
||||
ShardPath result3 = ShardPath.selectNewPathForShard(nodeEnv, shardId, idxSettings, 100, dataPathToShardCount);
|
||||
createFakeShard(result3);
|
||||
// 2 shards go to 'a' and 1 to 'b'
|
||||
assertThat(result1.getDataPath().toString(), containsString(aPathPart));
|
||||
assertThat(result2.getDataPath().toString(), containsString(bPathPart));
|
||||
assertThat(result3.getDataPath().toString(), containsString(aPathPart));
|
||||
|
||||
nodeEnv.close();
|
||||
}
|
||||
|
||||
public void testGettingPathWithMostFreeSpace() throws Exception {
|
||||
Path path = PathUtils.get(createTempDir().toString());
|
||||
|
||||
// Use 2 data paths:
|
||||
String[] paths = new String[] {path.resolve("a").toString(),
|
||||
path.resolve("b").toString()};
|
||||
|
||||
Settings settings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), path)
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), paths).build();
|
||||
NodeEnvironment nodeEnv = new NodeEnvironment(settings, new Environment(settings));
|
||||
|
||||
aFileStore.usableSpace = 100000;
|
||||
bFileStore.usableSpace = 1000;
|
||||
|
||||
assertThat(ShardPath.getPathWithMostFreeSpace(nodeEnv), equalTo(nodeEnv.nodePaths()[0]));
|
||||
|
||||
aFileStore.usableSpace = 10000;
|
||||
bFileStore.usableSpace = 20000;
|
||||
|
||||
assertThat(ShardPath.getPathWithMostFreeSpace(nodeEnv), equalTo(nodeEnv.nodePaths()[1]));
|
||||
|
||||
nodeEnv.close();
|
||||
}
|
||||
|
||||
public void testTieBreakWithMostShards() throws Exception {
|
||||
Path path = PathUtils.get(createTempDir().toString());
|
||||
|
||||
// Use 2 data paths:
|
||||
String[] paths = new String[] {path.resolve("a").toString(),
|
||||
path.resolve("b").toString()};
|
||||
|
||||
Settings settings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), path)
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), paths).build();
|
||||
NodeEnvironment nodeEnv = new NodeEnvironment(settings, new Environment(settings));
|
||||
|
||||
// Make sure all our mocking above actually worked:
|
||||
NodePath[] nodePaths = nodeEnv.nodePaths();
|
||||
assertEquals(2, nodePaths.length);
|
||||
|
||||
assertEquals("mocka", nodePaths[0].fileStore.name());
|
||||
assertEquals("mockb", nodePaths[1].fileStore.name());
|
||||
|
||||
// Path a has lots of free space, but b has little, so new shard should go to a:
|
||||
aFileStore.usableSpace = 100000;
|
||||
bFileStore.usableSpace = 10000;
|
||||
|
||||
Map<Path, Integer> dataPathToShardCount = new HashMap<>();
|
||||
|
||||
ShardId shardId = new ShardId("index", "uid1", 0);
|
||||
ShardPath result = ShardPath.selectNewPathForShard(nodeEnv, shardId, INDEX_SETTINGS, 100, dataPathToShardCount);
|
||||
createFakeShard(result);
|
||||
// First shard should go to a
|
||||
assertThat(result.getDataPath().toString(), containsString(aPathPart));
|
||||
dataPathToShardCount.compute(NodeEnvironment.shardStatePathToDataPath(result.getDataPath()), (k, v) -> v == null ? 1 : v + 1);
|
||||
|
||||
shardId = new ShardId("index", "uid1", 1);
|
||||
result = ShardPath.selectNewPathForShard(nodeEnv, shardId, INDEX_SETTINGS, 100, dataPathToShardCount);
|
||||
createFakeShard(result);
|
||||
// Second shard should go to b
|
||||
assertThat(result.getDataPath().toString(), containsString(bPathPart));
|
||||
dataPathToShardCount.compute(NodeEnvironment.shardStatePathToDataPath(result.getDataPath()), (k, v) -> v == null ? 1 : v + 1);
|
||||
|
||||
shardId = new ShardId("index2", "uid3", 0);
|
||||
result = ShardPath.selectNewPathForShard(nodeEnv, shardId, INDEX_SETTINGS, 100, dataPathToShardCount);
|
||||
createFakeShard(result);
|
||||
// Shard for new index should go to a
|
||||
assertThat(result.getDataPath().toString(), containsString(aPathPart));
|
||||
dataPathToShardCount.compute(NodeEnvironment.shardStatePathToDataPath(result.getDataPath()), (k, v) -> v == null ? 1 : v + 1);
|
||||
|
||||
shardId = new ShardId("index2", "uid2", 0);
|
||||
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index2",
|
||||
Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3).build());
|
||||
ShardPath result1 = ShardPath.selectNewPathForShard(nodeEnv, shardId, idxSettings, 100, dataPathToShardCount);
|
||||
createFakeShard(result1);
|
||||
dataPathToShardCount.compute(NodeEnvironment.shardStatePathToDataPath(result1.getDataPath()), (k, v) -> v == null ? 1 : v + 1);
|
||||
shardId = new ShardId("index2", "uid2", 1);
|
||||
ShardPath result2 = ShardPath.selectNewPathForShard(nodeEnv, shardId, idxSettings, 100, dataPathToShardCount);
|
||||
createFakeShard(result2);
|
||||
dataPathToShardCount.compute(NodeEnvironment.shardStatePathToDataPath(result2.getDataPath()), (k, v) -> v == null ? 1 : v + 1);
|
||||
shardId = new ShardId("index2", "uid2", 2);
|
||||
ShardPath result3 = ShardPath.selectNewPathForShard(nodeEnv, shardId, idxSettings, 100, dataPathToShardCount);
|
||||
createFakeShard(result3);
|
||||
// 2 shards go to 'b' and 1 to 'a'
|
||||
assertThat(result1.getDataPath().toString(), containsString(bPathPart));
|
||||
assertThat(result2.getDataPath().toString(), containsString(aPathPart));
|
||||
assertThat(result3.getDataPath().toString(), containsString(bPathPart));
|
||||
|
||||
nodeEnv.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ import org.elasticsearch.test.IndexSettingsModule;
|
|||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Cancellable;
|
||||
import org.elasticsearch.threadpool.Scheduler.Cancellable;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Names;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.elasticsearch.index.shard.IndexShardTestCase;
|
|||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Cancellable;
|
||||
import org.elasticsearch.threadpool.Scheduler.Cancellable;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
|
|
@ -36,11 +36,16 @@ import org.elasticsearch.action.ingest.SimulateDocumentBaseResult;
|
|||
import org.elasticsearch.action.ingest.SimulatePipelineRequest;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineResponse;
|
||||
import org.elasticsearch.action.ingest.WritePipelineResponse;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationActionTests;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -169,6 +174,43 @@ public class IngestClientIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testBulkWithUpsert() throws Exception {
|
||||
createIndex("index");
|
||||
|
||||
BytesReference source = jsonBuilder().startObject()
|
||||
.field("description", "my_pipeline")
|
||||
.startArray("processors")
|
||||
.startObject()
|
||||
.startObject("test")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endArray()
|
||||
.endObject().bytes();
|
||||
PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON);
|
||||
client().admin().cluster().putPipeline(putPipelineRequest).get();
|
||||
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
IndexRequest indexRequest = new IndexRequest("index", "type", "1").setPipeline("_id");
|
||||
indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "val1");
|
||||
bulkRequest.add(indexRequest);
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "type", "2");
|
||||
updateRequest.doc("{}", Requests.INDEX_CONTENT_TYPE);
|
||||
updateRequest.upsert("{\"field1\":\"upserted_val\"}", XContentType.JSON).upsertRequest().setPipeline("_id");
|
||||
bulkRequest.add(updateRequest);
|
||||
|
||||
BulkResponse response = client().bulk(bulkRequest).actionGet();
|
||||
|
||||
assertThat(response.getItems().length, equalTo(bulkRequest.requests().size()));
|
||||
Map<String, Object> inserted = client().prepareGet("index", "type", "1")
|
||||
.get().getSourceAsMap();
|
||||
assertThat(inserted.get("field1"), equalTo("val1"));
|
||||
assertThat(inserted.get("processed"), equalTo(true));
|
||||
Map<String, Object> upserted = client().prepareGet("index", "type", "2")
|
||||
.get().getSourceAsMap();
|
||||
assertThat(upserted.get("field1"), equalTo("upserted_val"));
|
||||
assertThat(upserted.get("processed"), equalTo(true));
|
||||
}
|
||||
|
||||
public void test() throws Exception {
|
||||
BytesReference source = jsonBuilder().startObject()
|
||||
.field("description", "my_pipeline")
|
||||
|
|
|
@ -22,9 +22,9 @@ package org.elasticsearch.monitor.jvm;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.Scheduler.Cancellable;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Cancellable;
|
||||
|
||||
import java.util.AbstractMap;
|
||||
import java.util.HashSet;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -27,7 +28,9 @@ import org.elasticsearch.index.IndexModule;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.FileSystemException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
@ -36,6 +39,7 @@ import java.util.Locale;
|
|||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.hasToString;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
@LuceneTestCase.SuppressFileSystems(value = "ExtrasFS")
|
||||
public class PluginsServiceTests extends ESTestCase {
|
||||
|
@ -124,6 +128,32 @@ public class PluginsServiceTests extends ESTestCase {
|
|||
assertThat(e, hasToString(containsString(expected)));
|
||||
}
|
||||
|
||||
public void testDesktopServicesStoreFiles() throws IOException {
|
||||
final Path home = createTempDir();
|
||||
final Settings settings =
|
||||
Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), home)
|
||||
.build();
|
||||
final Path plugins = home.resolve("plugins");
|
||||
Files.createDirectories(plugins);
|
||||
final Path desktopServicesStore = plugins.resolve(".DS_Store");
|
||||
Files.createFile(desktopServicesStore);
|
||||
if (Constants.MAC_OS_X) {
|
||||
@SuppressWarnings("unchecked") final PluginsService pluginsService = newPluginsService(settings);
|
||||
assertNotNull(pluginsService);
|
||||
} else {
|
||||
final IllegalStateException e = expectThrows(IllegalStateException.class, () -> newPluginsService(settings));
|
||||
assertThat(e, hasToString(containsString("Could not load plugin descriptor for existing plugin [.DS_Store]")));
|
||||
assertNotNull(e.getCause());
|
||||
assertThat(e.getCause(), instanceOf(FileSystemException.class));
|
||||
if (Constants.WINDOWS) {
|
||||
assertThat(e.getCause(), instanceOf(NoSuchFileException.class));
|
||||
} else {
|
||||
assertThat(e.getCause(), hasToString(containsString("Not a directory")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testStartupWithRemovingMarker() throws IOException {
|
||||
final Path home = createTempDir();
|
||||
final Settings settings =
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest.action.document;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.rest.FakeRestRequest;
|
||||
import org.hamcrest.CustomMatcher;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.argThat;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
/**
|
||||
* Tests for {@link RestBulkAction}.
|
||||
*/
|
||||
public class RestBulkActionTests extends ESTestCase {
|
||||
|
||||
public void testBulkPipelineUpsert() throws Exception {
|
||||
final NodeClient mockClient = mock(NodeClient.class);
|
||||
final Map<String, String> params = new HashMap<>();
|
||||
params.put("pipeline", "timestamps");
|
||||
new RestBulkAction(settings(Version.CURRENT).build(), mock(RestController.class))
|
||||
.handleRequest(
|
||||
new FakeRestRequest.Builder(
|
||||
xContentRegistry()).withPath("my_index/my_type/_bulk").withParams(params)
|
||||
.withContent(
|
||||
new BytesArray(
|
||||
"{\"index\":{\"_id\":\"1\"}}\n" +
|
||||
"{\"field1\":\"val1\"}\n" +
|
||||
"{\"update\":{\"_id\":\"2\"}}\n" +
|
||||
"{\"script\":{\"source\":\"ctx._source.counter++;\"},\"upsert\":{\"field1\":\"upserted_val\"}}\n"
|
||||
),
|
||||
XContentType.JSON
|
||||
).withMethod(RestRequest.Method.POST).build(),
|
||||
mock(RestChannel.class), mockClient
|
||||
);
|
||||
Mockito.verify(mockClient)
|
||||
.bulk(argThat(new CustomMatcher<BulkRequest>("Pipeline in upsert request") {
|
||||
@Override
|
||||
public boolean matches(final Object item) {
|
||||
BulkRequest request = (BulkRequest) item;
|
||||
UpdateRequest update = (UpdateRequest) request.requests().get(1);
|
||||
return "timestamps".equals(update.upsertRequest().getPipeline());
|
||||
}
|
||||
}), any());
|
||||
}
|
||||
}
|
|
@ -729,7 +729,7 @@ public class TopHitsIT extends ESIntegTestCase {
|
|||
assertThat(searchHits.getTotalHits(), equalTo(1L));
|
||||
assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat(extractValue("comments.date", searchHits.getAt(0).getSourceAsMap()), equalTo(1));
|
||||
assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(1));
|
||||
|
||||
bucket = terms.getBucketByKey("b");
|
||||
assertThat(bucket.getDocCount(), equalTo(2L));
|
||||
|
@ -738,10 +738,10 @@ public class TopHitsIT extends ESIntegTestCase {
|
|||
assertThat(searchHits.getTotalHits(), equalTo(2L));
|
||||
assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1));
|
||||
assertThat(extractValue("comments.date", searchHits.getAt(0).getSourceAsMap()), equalTo(2));
|
||||
assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(2));
|
||||
assertThat(searchHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(searchHits.getAt(1).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat(extractValue("comments.date", searchHits.getAt(1).getSourceAsMap()), equalTo(3));
|
||||
assertThat(extractValue("date", searchHits.getAt(1).getSourceAsMap()), equalTo(3));
|
||||
|
||||
bucket = terms.getBucketByKey("c");
|
||||
assertThat(bucket.getDocCount(), equalTo(1L));
|
||||
|
@ -750,7 +750,7 @@ public class TopHitsIT extends ESIntegTestCase {
|
|||
assertThat(searchHits.getTotalHits(), equalTo(1L));
|
||||
assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1));
|
||||
assertThat(extractValue("comments.date", searchHits.getAt(0).getSourceAsMap()), equalTo(4));
|
||||
assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(4));
|
||||
}
|
||||
|
||||
public void testTopHitsInSecondLayerNested() throws Exception {
|
||||
|
@ -803,49 +803,49 @@ public class TopHitsIT extends ESIntegTestCase {
|
|||
assertThat(topReviewers.getHits().getHits().length, equalTo(7));
|
||||
|
||||
assertThat(topReviewers.getHits().getAt(0).getId(), equalTo("1"));
|
||||
assertThat(extractValue("comments.reviewers.name", topReviewers.getHits().getAt(0).getSourceAsMap()), equalTo("user a"));
|
||||
assertThat(extractValue("name", topReviewers.getHits().getAt(0).getSourceAsMap()), equalTo("user a"));
|
||||
assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
|
||||
assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0));
|
||||
|
||||
assertThat(topReviewers.getHits().getAt(1).getId(), equalTo("1"));
|
||||
assertThat(extractValue("comments.reviewers.name", topReviewers.getHits().getAt(1).getSourceAsMap()), equalTo("user b"));
|
||||
assertThat(extractValue("name", topReviewers.getHits().getAt(1).getSourceAsMap()), equalTo("user b"));
|
||||
assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
|
||||
assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getChild().getOffset(), equalTo(1));
|
||||
|
||||
assertThat(topReviewers.getHits().getAt(2).getId(), equalTo("1"));
|
||||
assertThat(extractValue("comments.reviewers.name", topReviewers.getHits().getAt(2).getSourceAsMap()), equalTo("user c"));
|
||||
assertThat(extractValue("name", topReviewers.getHits().getAt(2).getSourceAsMap()), equalTo("user c"));
|
||||
assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
|
||||
assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getOffset(), equalTo(2));
|
||||
|
||||
assertThat(topReviewers.getHits().getAt(3).getId(), equalTo("1"));
|
||||
assertThat(extractValue("comments.reviewers.name", topReviewers.getHits().getAt(3).getSourceAsMap()), equalTo("user c"));
|
||||
assertThat(extractValue("name", topReviewers.getHits().getAt(3).getSourceAsMap()), equalTo("user c"));
|
||||
assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getOffset(), equalTo(1));
|
||||
assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
|
||||
assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getOffset(), equalTo(0));
|
||||
|
||||
assertThat(topReviewers.getHits().getAt(4).getId(), equalTo("1"));
|
||||
assertThat(extractValue("comments.reviewers.name", topReviewers.getHits().getAt(4).getSourceAsMap()), equalTo("user d"));
|
||||
assertThat(extractValue("name", topReviewers.getHits().getAt(4).getSourceAsMap()), equalTo("user d"));
|
||||
assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getOffset(), equalTo(1));
|
||||
assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
|
||||
assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getChild().getOffset(), equalTo(1));
|
||||
|
||||
assertThat(topReviewers.getHits().getAt(5).getId(), equalTo("1"));
|
||||
assertThat(extractValue("comments.reviewers.name", topReviewers.getHits().getAt(5).getSourceAsMap()), equalTo("user e"));
|
||||
assertThat(extractValue("name", topReviewers.getHits().getAt(5).getSourceAsMap()), equalTo("user e"));
|
||||
assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getOffset(), equalTo(1));
|
||||
assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
|
||||
assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getChild().getOffset(), equalTo(2));
|
||||
|
||||
assertThat(topReviewers.getHits().getAt(6).getId(), equalTo("2"));
|
||||
assertThat(extractValue("comments.reviewers.name", topReviewers.getHits().getAt(6).getSourceAsMap()), equalTo("user f"));
|
||||
assertThat(extractValue("name", topReviewers.getHits().getAt(6).getSourceAsMap()), equalTo("user f"));
|
||||
assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
|
||||
|
@ -901,7 +901,7 @@ public class TopHitsIT extends ESIntegTestCase {
|
|||
assertThat(field.getValue().toString(), equalTo("5"));
|
||||
|
||||
assertThat(searchHit.getSourceAsMap().size(), equalTo(1));
|
||||
assertThat(extractValue("comments.message", searchHit.getSourceAsMap()), equalTo("some comment"));
|
||||
assertThat(extractValue("message", searchHit.getSourceAsMap()), equalTo("some comment"));
|
||||
}
|
||||
|
||||
public void testTopHitsInNested() throws Exception {
|
||||
|
@ -934,7 +934,7 @@ public class TopHitsIT extends ESIntegTestCase {
|
|||
for (int j = 0; j < 3; j++) {
|
||||
assertThat(searchHits.getAt(j).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(searchHits.getAt(j).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat(extractValue("comments.id", searchHits.getAt(j).getSourceAsMap()), equalTo(0));
|
||||
assertThat(extractValue("id", searchHits.getAt(j).getSourceAsMap()), equalTo(0));
|
||||
|
||||
HighlightField highlightField = searchHits.getAt(j).getHighlightFields().get("comments.message");
|
||||
assertThat(highlightField.getFragments().length, equalTo(1));
|
||||
|
|
|
@ -596,9 +596,9 @@ public class InnerHitsIT extends ESIntegTestCase {
|
|||
client().prepareIndex("index1", "message", "1").setSource(jsonBuilder().startObject()
|
||||
.field("message", "quick brown fox")
|
||||
.startArray("comments")
|
||||
.startObject().field("message", "fox eat quick").endObject()
|
||||
.startObject().field("message", "fox ate rabbit x y z").endObject()
|
||||
.startObject().field("message", "rabbit got away").endObject()
|
||||
.startObject().field("message", "fox eat quick").field("x", "y").endObject()
|
||||
.startObject().field("message", "fox ate rabbit x y z").field("x", "y").endObject()
|
||||
.startObject().field("message", "rabbit got away").field("x", "y").endObject()
|
||||
.endArray()
|
||||
.endObject()).get();
|
||||
refresh();
|
||||
|
@ -614,9 +614,11 @@ public class InnerHitsIT extends ESIntegTestCase {
|
|||
assertHitCount(response, 1);
|
||||
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits(), equalTo(2L));
|
||||
assertThat(extractValue("comments.message", response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap()),
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(1));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"),
|
||||
equalTo("fox eat quick"));
|
||||
assertThat(extractValue("comments.message", response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap()),
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().size(), equalTo(1));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"),
|
||||
equalTo("fox ate rabbit x y z"));
|
||||
|
||||
response = client().prepareSearch()
|
||||
|
@ -627,9 +629,11 @@ public class InnerHitsIT extends ESIntegTestCase {
|
|||
assertHitCount(response, 1);
|
||||
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits(), equalTo(2L));
|
||||
assertThat(extractValue("comments.message", response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap()),
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"),
|
||||
equalTo("fox eat quick"));
|
||||
assertThat(extractValue("comments.message", response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap()),
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"),
|
||||
equalTo("fox ate rabbit x y z"));
|
||||
}
|
||||
|
||||
|
|
|
@ -472,6 +472,7 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
|
|||
.setQuery(randomizeType(multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.analyzer("category")
|
||||
.lenient(true)
|
||||
.operator(Operator.AND))).get();
|
||||
assertHitCount(searchResponse, 1L);
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
@ -480,6 +481,7 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
|
|||
.setQuery(randomizeType(multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill", "int-field")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.analyzer("category")
|
||||
.lenient(true)
|
||||
.operator(Operator.AND))).get();
|
||||
assertHitCount(searchResponse, 1L);
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
@ -488,6 +490,7 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
|
|||
.setQuery(randomizeType(multiMatchQuery("captain america 15", "skill", "full_name", "first_name", "last_name", "category", "int-field")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.analyzer("category")
|
||||
.lenient(true)
|
||||
.operator(Operator.AND))).get();
|
||||
assertHitCount(searchResponse, 1L);
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
@ -496,6 +499,7 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
|
|||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("captain america 15", "first_name", "last_name", "skill")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.lenient(true)
|
||||
.analyzer("category"))).get();
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue