Merge branch 'master' into ccr

* master:
  Upgrade to Lucene-7.4-snapshot-6705632810 (#30519)
  add version compatibility from 6.4.0 after backport, see #30319 (#30390)
  Security: Simplify security index listeners (#30466)
  Add proper longitude validation in geo_polygon_query (#30497)
  Remove Discovery.AckListener.onTimeout() (#30514)
  Build: move generated-resources to build (#30366)
  Reindex: Fold "with all deps" project into reindex (#30154)
  Isolate REST client single host tests (#30504)
  Solve Gradle deprecation warnings around shadowJar (#30483)
  SAML: Process only signed data (#30420)
  Remove BWC repository test (#30500)
  Build: Remove xpack specific run task (#30487)
  AwaitsFix IntegTestZipClientYamlTestSuiteIT#indices.split tests
  LLClient: Add setJsonEntity (#30447)
  Expose CommonStatsFlags directly in IndicesStatsRequest. (#30163)
  Silence IndexUpgradeIT test failures. (#30430)
  Bump Gradle heap to 1792m (#30484)
  [docs] add warning for read-write indices in force merge documentation (#28869)
  Avoid deadlocks in cache (#30461)
  Test: remove hardcoded list of unconfigured ciphers (#30367)
  mute SplitIndexIT due to https://github.com/elastic/elasticsearch/issues/30416
  Docs: Test examples that recreate lang analyzers  (#29535)
  BulkProcessor to retry based on status code (#29329)
  Add GET Repository High Level REST API (#30362)
  add a comment explaining the need for RetryOnReplicaException on missing mappings
  Add `coordinating_only` node selector (#30313)
  Stop forking groovyc (#30471)
  Avoid setting connection request timeout (#30384)
  Use date format in `date_range` mapping before fallback to default (#29310)
  Watcher: Increase HttpClient parallel sent requests (#30130)

# Conflicts:
#	x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java
This commit is contained in:
Nhat Nguyen 2018-05-10 13:04:08 -04:00
commit a5be4149a3
231 changed files with 2050 additions and 1435 deletions

View File

@ -24,22 +24,22 @@ buildscript {
} }
} }
dependencies { dependencies {
classpath 'com.github.jengelman.gradle.plugins:shadow:2.0.2' classpath 'com.github.jengelman.gradle.plugins:shadow:2.0.4'
} }
} }
apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.build'
// build an uberjar with all benchmarks
apply plugin: 'com.github.johnrengelman.shadow' // order of this seciont matters, see: https://github.com/johnrengelman/shadow/issues/336
// have the shadow plugin provide the runShadow task apply plugin: 'application' // have the shadow plugin provide the runShadow task
apply plugin: 'application' mainClassName = 'org.openjdk.jmh.Main'
apply plugin: 'com.github.johnrengelman.shadow' // build an uberjar with all benchmarks
// Not published so no need to assemble // Not published so no need to assemble
tasks.remove(assemble) tasks.remove(assemble)
build.dependsOn.remove('assemble') build.dependsOn.remove('assemble')
archivesBaseName = 'elasticsearch-benchmarks' archivesBaseName = 'elasticsearch-benchmarks'
mainClassName = 'org.openjdk.jmh.Main'
test.enabled = false test.enabled = false

View File

@ -535,10 +535,15 @@ class BuildPlugin implements Plugin<Project> {
} }
// also apply release flag to groovy, which is used in build-tools // also apply release flag to groovy, which is used in build-tools
project.tasks.withType(GroovyCompile) { project.tasks.withType(GroovyCompile) {
final JavaVersion targetCompatibilityVersion = JavaVersion.toVersion(it.targetCompatibility) final compilerJavaHomeFile = new File(project.compilerJavaHome)
// we only fork if the Gradle JDK is not the same as the compiler JDK
if (compilerJavaHomeFile.canonicalPath == Jvm.current().javaHome.canonicalPath) {
options.fork = false
} else {
options.fork = true options.fork = true
options.forkOptions.javaHome = new File(project.compilerJavaHome) options.forkOptions.javaHome = compilerJavaHomeFile
options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion options.compilerArgs << '--release' << JavaVersion.toVersion(it.targetCompatibility).majorVersion
}
} }
} }
} }

View File

@ -141,9 +141,11 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
private static final String SYNTAX = { private static final String SYNTAX = {
String method = /(?<method>GET|PUT|POST|HEAD|OPTIONS|DELETE)/ String method = /(?<method>GET|PUT|POST|HEAD|OPTIONS|DELETE)/
String pathAndQuery = /(?<pathAndQuery>[^\n]+)/ String pathAndQuery = /(?<pathAndQuery>[^\n]+)/
String badBody = /GET|PUT|POST|HEAD|OPTIONS|DELETE|#/ String badBody = /GET|PUT|POST|HEAD|OPTIONS|DELETE|startyaml|#/
String body = /(?<body>(?:\n(?!$badBody)[^\n]+)+)/ String body = /(?<body>(?:\n(?!$badBody)[^\n]+)+)/
String nonComment = /$method\s+$pathAndQuery$body?/ String rawRequest = /(?:$method\s+$pathAndQuery$body?)/
String yamlRequest = /(?:startyaml(?s)(?<yaml>.+?)(?-s)endyaml)/
String nonComment = /(?:$rawRequest|$yamlRequest)/
String comment = /(?<comment>#.+)/ String comment = /(?<comment>#.+)/
/(?:$comment|$nonComment)\n+/ /(?:$comment|$nonComment)\n+/
}() }()
@ -333,6 +335,11 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
// Comment // Comment
return return
} }
String yamlRequest = matcher.group("yaml");
if (yamlRequest != null) {
current.println(yamlRequest)
return
}
String method = matcher.group("method") String method = matcher.group("method")
String pathAndQuery = matcher.group("pathAndQuery") String pathAndQuery = matcher.group("pathAndQuery")
String body = matcher.group("body") String body = matcher.group("body")

View File

@ -1,5 +1,5 @@
elasticsearch = 7.0.0-alpha1 elasticsearch = 7.0.0-alpha1
lucene = 7.4.0-snapshot-1ed95c097b lucene = 7.4.0-snapshot-6705632810
# optional dependencies # optional dependencies
spatial4j = 0.7 spatial4j = 0.7

View File

@ -29,6 +29,7 @@ import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType; import org.apache.http.entity.ContentType;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
@ -644,6 +645,17 @@ final class RequestConverters {
return request; return request;
} }
static Request getRepositories(GetRepositoriesRequest getRepositoriesRequest) {
String[] repositories = getRepositoriesRequest.repositories() == null ? Strings.EMPTY_ARRAY : getRepositoriesRequest.repositories();
String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot").addCommaSeparatedPathParts(repositories).build();
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
Params parameters = new Params(request);
parameters.withMasterTimeout(getRepositoriesRequest.masterNodeTimeout());
parameters.withLocal(getRepositoriesRequest.local());
return request;
}
static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException { static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException {
String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build(); String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build();
Request request = new Request(HttpPut.METHOD_NAME, endpoint); Request request = new Request(HttpPut.METHOD_NAME, endpoint);

View File

@ -26,6 +26,8 @@ import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequest;
@ -189,6 +191,7 @@ public class RestHighLevelClient implements Closeable {
private final IndicesClient indicesClient = new IndicesClient(this); private final IndicesClient indicesClient = new IndicesClient(this);
private final ClusterClient clusterClient = new ClusterClient(this); private final ClusterClient clusterClient = new ClusterClient(this);
private final SnapshotClient snapshotClient = new SnapshotClient(this);
/** /**
* Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the * Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the
@ -252,6 +255,15 @@ public class RestHighLevelClient implements Closeable {
return clusterClient; return clusterClient;
} }
/**
* Provides a {@link SnapshotClient} which can be used to access the Snapshot API.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html">Snapshot API on elastic.co</a>
*/
public final SnapshotClient snapshot() {
return snapshotClient;
}
/** /**
* Executes a bulk request using the Bulk API * Executes a bulk request using the Bulk API
* *

View File

@ -0,0 +1,70 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.Header;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
import java.io.IOException;
import static java.util.Collections.emptySet;
/**
* A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Snapshot API.
* <p>
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html">Snapshot API on elastic.co</a>
*/
public final class SnapshotClient {
private final RestHighLevelClient restHighLevelClient;
SnapshotClient(RestHighLevelClient restHighLevelClient) {
this.restHighLevelClient = restHighLevelClient;
}
/**
* Gets a list of snapshot repositories. If the list of repositories is empty or it contains a single element "_all", all
* registered repositories are returned.
* <p>
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
* API on elastic.co</a>
*/
public GetRepositoriesResponse getRepositories(GetRepositoriesRequest getRepositoriesRequest, Header... headers)
throws IOException {
return restHighLevelClient.performRequestAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories,
GetRepositoriesResponse::fromXContent, emptySet(), headers);
}
/**
* Asynchronously gets a list of snapshot repositories. If the list of repositories is empty or it contains a single element "_all", all
* registered repositories are returned.
* <p>
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
* API on elastic.co</a>
*/
public void getRepositoriesAsync(GetRepositoriesRequest getRepositoriesRequest,
ActionListener<GetRepositoriesResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories,
GetRepositoriesResponse::fromXContent, listener, emptySet(), headers);
}
}

View File

@ -0,0 +1,219 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.get.MultiGetRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.rest.RestStatus;
import java.util.Collections;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
public class BulkProcessorRetryIT extends ESRestHighLevelClientTestCase {
private static final String INDEX_NAME = "index";
private static final String TYPE_NAME = "type";
private static BulkProcessor.Builder initBulkProcessorBuilder(BulkProcessor.Listener listener) {
return BulkProcessor.builder(highLevelClient()::bulkAsync, listener);
}
public void testBulkRejectionLoadWithoutBackoff() throws Exception {
boolean rejectedExecutionExpected = true;
executeBulkRejectionLoad(BackoffPolicy.noBackoff(), rejectedExecutionExpected);
}
public void testBulkRejectionLoadWithBackoff() throws Throwable {
boolean rejectedExecutionExpected = false;
executeBulkRejectionLoad(BackoffPolicy.exponentialBackoff(), rejectedExecutionExpected);
}
private void executeBulkRejectionLoad(BackoffPolicy backoffPolicy, boolean rejectedExecutionExpected) throws Exception {
final CorrelatingBackoffPolicy internalPolicy = new CorrelatingBackoffPolicy(backoffPolicy);
final int numberOfAsyncOps = randomIntBetween(600, 700);
final CountDownLatch latch = new CountDownLatch(numberOfAsyncOps);
final Set<Object> responses = Collections.newSetFromMap(new ConcurrentHashMap<>());
BulkProcessor bulkProcessor = initBulkProcessorBuilder(new BulkProcessor.Listener() {
@Override
public void beforeBulk(long executionId, BulkRequest request) {
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
internalPolicy.logResponse(response);
responses.add(response);
latch.countDown();
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
responses.add(failure);
latch.countDown();
}
}).setBulkActions(1)
.setConcurrentRequests(randomIntBetween(0, 100))
.setBackoffPolicy(internalPolicy)
.build();
MultiGetRequest multiGetRequest = indexDocs(bulkProcessor, numberOfAsyncOps);
latch.await(10, TimeUnit.SECONDS);
bulkProcessor.close();
assertEquals(responses.size(), numberOfAsyncOps);
boolean rejectedAfterAllRetries = false;
for (Object response : responses) {
if (response instanceof BulkResponse) {
BulkResponse bulkResponse = (BulkResponse) response;
for (BulkItemResponse bulkItemResponse : bulkResponse.getItems()) {
if (bulkItemResponse.isFailed()) {
BulkItemResponse.Failure failure = bulkItemResponse.getFailure();
if (failure.getStatus() == RestStatus.TOO_MANY_REQUESTS) {
if (rejectedExecutionExpected == false) {
Iterator<TimeValue> backoffState = internalPolicy.backoffStateFor(bulkResponse);
assertNotNull("backoffState is null (indicates a bulk request got rejected without retry)", backoffState);
if (backoffState.hasNext()) {
// we're not expecting that we overwhelmed it even once when we maxed out the number of retries
throw new AssertionError("Got rejected although backoff policy would allow more retries",
failure.getCause());
} else {
rejectedAfterAllRetries = true;
logger.debug("We maxed out the number of bulk retries and got rejected (this is ok).");
}
}
} else {
throw new AssertionError("Unexpected failure with status: " + failure.getStatus());
}
}
}
} else {
Throwable t = (Throwable) response;
// we're not expecting any other errors
throw new AssertionError("Unexpected failure", t);
}
}
highLevelClient().indices().refresh(new RefreshRequest());
int multiGetResponsesCount = highLevelClient().multiGet(multiGetRequest).getResponses().length;
if (rejectedExecutionExpected) {
assertThat(multiGetResponsesCount, lessThanOrEqualTo(numberOfAsyncOps));
} else if (rejectedAfterAllRetries) {
assertThat(multiGetResponsesCount, lessThan(numberOfAsyncOps));
} else {
assertThat(multiGetResponsesCount, equalTo(numberOfAsyncOps));
}
}
private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) {
MultiGetRequest multiGetRequest = new MultiGetRequest();
for (int i = 1; i <= numDocs; i++) {
processor.add(new IndexRequest(INDEX_NAME, TYPE_NAME, Integer.toString(i))
.source(XContentType.JSON, "field", randomRealisticUnicodeOfCodepointLengthBetween(1, 30)));
multiGetRequest.add(INDEX_NAME, TYPE_NAME, Integer.toString(i));
}
return multiGetRequest;
}
/**
* Internal helper class to correlate backoff states with bulk responses. This is needed to check whether we maxed out the number
* of retries but still got rejected (which is perfectly fine and can also happen from time to time under heavy load).
*
* This implementation relies on an implementation detail in Retry, namely that the bulk listener is notified on the same thread
* as the last call to the backoff policy's iterator. The advantage is that this is non-invasive to the rest of the production code.
*/
private static class CorrelatingBackoffPolicy extends BackoffPolicy {
private final Map<BulkResponse, Iterator<TimeValue>> correlations = new ConcurrentHashMap<>();
// this is intentionally *not* static final. We will only ever have one instance of this class per test case and want the
// thread local to be eligible for garbage collection right after the test to avoid leaks.
private final ThreadLocal<Iterator<TimeValue>> iterators = new ThreadLocal<>();
private final BackoffPolicy delegate;
private CorrelatingBackoffPolicy(BackoffPolicy delegate) {
this.delegate = delegate;
}
public Iterator<TimeValue> backoffStateFor(BulkResponse response) {
return correlations.get(response);
}
// Assumption: This method is called from the same thread as the last call to the internal iterator's #hasNext() / #next()
// see also Retry.AbstractRetryHandler#onResponse().
public void logResponse(BulkResponse response) {
Iterator<TimeValue> iterator = iterators.get();
// did we ever retry?
if (iterator != null) {
// we should correlate any iterator only once
iterators.remove();
correlations.put(response, iterator);
}
}
@Override
public Iterator<TimeValue> iterator() {
return new CorrelatingIterator(iterators, delegate.iterator());
}
private static class CorrelatingIterator implements Iterator<TimeValue> {
private final Iterator<TimeValue> delegate;
private final ThreadLocal<Iterator<TimeValue>> iterators;
private CorrelatingIterator(ThreadLocal<Iterator<TimeValue>> iterators, Iterator<TimeValue> delegate) {
this.iterators = iterators;
this.delegate = delegate;
}
@Override
public boolean hasNext() {
// update on every invocation as we might get rescheduled on a different thread. Unfortunately, there is a chance that
// we pollute the thread local map with stale values. Due to the implementation of Retry and the life cycle of the
// enclosing class CorrelatingBackoffPolicy this should not pose a major problem though.
iterators.set(this);
return delegate.hasNext();
}
@Override
public TimeValue next() {
// update on every invocation
iterators.set(this);
return delegate.next();
}
}
}
}

View File

@ -29,6 +29,7 @@ import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
@ -1427,6 +1428,26 @@ public class RequestConvertersTests extends ESTestCase {
assertEquals(expectedParams, request.getParameters()); assertEquals(expectedParams, request.getParameters());
} }
public void testGetRepositories() {
Map<String, String> expectedParams = new HashMap<>();
StringBuilder endpoint = new StringBuilder("/_snapshot");
GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest();
setRandomMasterTimeout(getRepositoriesRequest, expectedParams);
setRandomLocal(getRepositoriesRequest, expectedParams);
if (randomBoolean()) {
String[] entries = new String[] {"a", "b", "c"};
getRepositoriesRequest.repositories(entries);
endpoint.append("/" + String.join(",", entries));
}
Request request = RequestConverters.getRepositories(getRepositoriesRequest);
assertThat(endpoint.toString(), equalTo(request.getEndpoint()));
assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod()));
assertThat(expectedParams, equalTo(request.getParameters()));
}
public void testPutTemplateRequest() throws Exception { public void testPutTemplateRequest() throws Exception {
Map<String, String> names = new HashMap<>(); Map<String, String> names = new HashMap<>();
names.put("log", "log"); names.put("log", "log");

View File

@ -0,0 +1,82 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.util.Collections;
import static org.hamcrest.Matchers.equalTo;
public class SnapshotIT extends ESRestHighLevelClientTestCase {
public void testModulesGetRepositoriesUsingParams() throws IOException {
String repository = "test";
String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}";
highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + repository, Collections.emptyMap(),
new StringEntity(repositorySettings, ContentType.APPLICATION_JSON));
highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + repository + "_other", Collections.emptyMap(),
new StringEntity(repositorySettings, ContentType.APPLICATION_JSON));
{
GetRepositoriesRequest request = new GetRepositoriesRequest();
request.repositories(new String[]{repository});
GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories,
highLevelClient().snapshot()::getRepositoriesAsync);
assertThat(1, equalTo(response.repositories().size()));
}
{
GetRepositoriesRequest request = new GetRepositoriesRequest();
GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories,
highLevelClient().snapshot()::getRepositoriesAsync);
assertThat(2, equalTo(response.repositories().size()));
}
}
public void testModulesGetDefaultRepositories() throws IOException {
String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}";
GetRepositoriesRequest request = new GetRepositoriesRequest();
highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/test", Collections.emptyMap(),
new StringEntity(repositorySettings, ContentType.APPLICATION_JSON));
GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories,
highLevelClient().snapshot()::getRepositoriesAsync);
assertThat(1, equalTo(response.repositories().size()));
}
public void testModulesGetRepositoriesNonExistent() throws IOException {
String repository = "doesnotexist";
GetRepositoriesRequest request = new GetRepositoriesRequest(new String[]{repository});
ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(request,
highLevelClient().snapshot()::getRepositories, highLevelClient().snapshot()::getRepositoriesAsync));
assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND));
assertThat(exception.getMessage(), equalTo(
"Elasticsearch exception [type=repository_missing_exception, reason=[" + repository + "] missing]"));
}
}

View File

@ -0,0 +1,135 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.documentation;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.LatchedActionListener;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.common.unit.TimeValue;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static org.hamcrest.Matchers.equalTo;
/**
* This class is used to generate the Java Cluster API documentation.
* You need to wrap your code between two tags like:
* // tag::example
* // end::example
*
* Where example is your tag name.
*
* Then in the documentation, you can extract what is between tag and end tags with
* ["source","java",subs="attributes,callouts,macros"]
* --------------------------------------------------
* include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[example]
* --------------------------------------------------
*
* The column width of the code block is 84. If the code contains a line longer
* than 84, the line will be cut and a horizontal scroll bar will be displayed.
* (the code indentation of the tag is not included in the width)
*/
public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase {
private static final String testRepository = "test_repository";
public void testSnapshotGetRepository() throws IOException {
RestHighLevelClient client = highLevelClient();
createTestRepositories();
// tag::get-repository-request
GetRepositoriesRequest request = new GetRepositoriesRequest();
// end::get-repository-request
// tag::get-repository-request-repositories
String [] repositories = new String[] { testRepository };
request.repositories(repositories); // <1>
// end::get-repository-request-repositories
// tag::get-repository-request-local
request.local(true); // <1>
// end::get-repository-request-local
// tag::get-repository-request-masterTimeout
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
request.masterNodeTimeout("1m"); // <2>
// end::get-repository-request-masterTimeout
// tag::get-repository-execute
GetRepositoriesResponse response = client.snapshot().getRepositories(request);
// end::get-repository-execute
// tag::get-repository-response
List<RepositoryMetaData> repositoryMetaDataResponse = response.repositories();
// end::get-repository-response
assertThat(1, equalTo(repositoryMetaDataResponse.size()));
assertThat(testRepository, equalTo(repositoryMetaDataResponse.get(0).name()));
}
public void testSnapshotGetRepositoryAsync() throws InterruptedException {
RestHighLevelClient client = highLevelClient();
{
GetRepositoriesRequest request = new GetRepositoriesRequest();
// tag::get-repository-execute-listener
ActionListener<GetRepositoriesResponse> listener =
new ActionListener<GetRepositoriesResponse>() {
@Override
public void onResponse(GetRepositoriesResponse getRepositoriesResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::get-repository-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::get-repository-execute-async
client.snapshot().getRepositoriesAsync(request, listener); // <1>
// end::get-repository-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
private void createTestRepositories() throws IOException {
RestHighLevelClient client = highLevelClient();
String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}";
highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + testRepository, Collections.emptyMap(),
new StringEntity(repositorySettings, ContentType.APPLICATION_JSON));
}
}

View File

@ -19,8 +19,10 @@
package org.elasticsearch.client; package org.elasticsearch.client;
import org.apache.http.entity.ContentType;
import org.apache.http.Header; import org.apache.http.Header;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.apache.http.nio.entity.NStringEntity;
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
import java.util.Arrays; import java.util.Arrays;
@ -103,6 +105,17 @@ public final class Request {
this.entity = entity; this.entity = entity;
} }
/**
* Set the body of the request to a string. If not set or set to
* {@code null} then no body is sent with the request. The
* {@code Content-Type} will be sent as {@code application/json}.
* If you need a different content type then use
* {@link #setEntity(HttpEntity)}.
*/
public void setJsonEntity(String entity) {
setEntity(entity == null ? null : new NStringEntity(entity, ContentType.APPLICATION_JSON));
}
/** /**
* The body of the request. If {@code null} then no body * The body of the request. If {@code null} then no body
* is sent with the request. * is sent with the request.

View File

@ -43,7 +43,6 @@ public final class RestClientBuilder {
public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000; public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000;
public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 30000; public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 30000;
public static final int DEFAULT_MAX_RETRY_TIMEOUT_MILLIS = DEFAULT_SOCKET_TIMEOUT_MILLIS; public static final int DEFAULT_MAX_RETRY_TIMEOUT_MILLIS = DEFAULT_SOCKET_TIMEOUT_MILLIS;
public static final int DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS = 500;
public static final int DEFAULT_MAX_CONN_PER_ROUTE = 10; public static final int DEFAULT_MAX_CONN_PER_ROUTE = 10;
public static final int DEFAULT_MAX_CONN_TOTAL = 30; public static final int DEFAULT_MAX_CONN_TOTAL = 30;
@ -196,8 +195,7 @@ public final class RestClientBuilder {
//default timeouts are all infinite //default timeouts are all infinite
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom() RequestConfig.Builder requestConfigBuilder = RequestConfig.custom()
.setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS) .setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
.setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS) .setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS);
.setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS);
if (requestConfigCallback != null) { if (requestConfigCallback != null) {
requestConfigBuilder = requestConfigCallback.customizeRequestConfig(requestConfigBuilder); requestConfigBuilder = requestConfigCallback.customizeRequestConfig(requestConfigBuilder);
} }

View File

@ -19,6 +19,8 @@
package org.elasticsearch.client; package org.elasticsearch.client;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
@ -27,9 +29,11 @@ import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType; import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity; import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader; import org.apache.http.message.BasicHeader;
import org.apache.http.nio.entity.NStringEntity;
import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
public class RequestTests extends RestClientTestCase { public class RequestTests extends RestClientTestCase {
@ -99,12 +103,27 @@ public class RequestTests extends RestClientTestCase {
final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); final String endpoint = randomAsciiLettersOfLengthBetween(1, 10);
final HttpEntity entity = final HttpEntity entity =
randomBoolean() ? new StringEntity(randomAsciiLettersOfLengthBetween(1, 100), ContentType.TEXT_PLAIN) : null; randomBoolean() ? new StringEntity(randomAsciiLettersOfLengthBetween(1, 100), ContentType.TEXT_PLAIN) : null;
Request request = new Request(method, endpoint);
Request request = new Request(method, endpoint);
request.setEntity(entity); request.setEntity(entity);
assertEquals(entity, request.getEntity()); assertEquals(entity, request.getEntity());
} }
public void testSetJsonEntity() throws IOException {
final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"});
final String endpoint = randomAsciiLettersOfLengthBetween(1, 10);
Request request = new Request(method, endpoint);
assertNull(request.getEntity());
final String json = randomAsciiLettersOfLengthBetween(1, 100);
request.setJsonEntity(json);
assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType().getValue());
ByteArrayOutputStream os = new ByteArrayOutputStream();
request.getEntity().writeTo(os);
assertEquals(json, new String(os.toByteArray(), ContentType.APPLICATION_JSON.getCharset()));
}
public void testSetHeaders() { public void testSetHeaders() {
final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"}); final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"});
final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); final String endpoint = randomAsciiLettersOfLengthBetween(1, 10);

View File

@ -177,4 +177,24 @@ public class RestClientBuilderTests extends RestClientTestCase {
} }
} }
/**
* This test verifies that we don't change the default value for the connection request timeout as that causes problems.
* See https://github.com/elastic/elasticsearch/issues/24069
*/
public void testDefaultConnectionRequestTimeout() throws IOException {
RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200));
builder.setRequestConfigCallback(new RestClientBuilder.RequestConfigCallback() {
@Override
public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
RequestConfig requestConfig = requestConfigBuilder.build();
assertEquals(RequestConfig.DEFAULT.getConnectionRequestTimeout(), requestConfig.getConnectionRequestTimeout());
//this way we get notified if the default ever changes
assertEquals(-1, requestConfig.getConnectionRequestTimeout());
return requestConfigBuilder;
}
});
try (RestClient restClient = builder.build()) {
assertNotNull(restClient);
}
}
} }

View File

@ -32,9 +32,12 @@ import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity; import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.client.BasicCredentialsProvider;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.nio.entity.NStringEntity;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
import org.elasticsearch.mocksocket.MockHttpServer; import org.elasticsearch.mocksocket.MockHttpServer;
import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import java.io.IOException; import java.io.IOException;
@ -48,6 +51,9 @@ import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes;
import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
@ -65,20 +71,20 @@ import static org.junit.Assert.fail;
*/ */
public class RestClientSingleHostIntegTests extends RestClientTestCase { public class RestClientSingleHostIntegTests extends RestClientTestCase {
private static HttpServer httpServer; private HttpServer httpServer;
private static RestClient restClient; private RestClient restClient;
private static String pathPrefix; private String pathPrefix;
private static Header[] defaultHeaders; private Header[] defaultHeaders;
@BeforeClass @Before
public static void startHttpServer() throws Exception { public void startHttpServer() throws Exception {
pathPrefix = randomBoolean() ? "/testPathPrefix/" + randomAsciiLettersOfLengthBetween(1, 5) : ""; pathPrefix = randomBoolean() ? "/testPathPrefix/" + randomAsciiLettersOfLengthBetween(1, 5) : "";
httpServer = createHttpServer(); httpServer = createHttpServer();
defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default");
restClient = createRestClient(false, true); restClient = createRestClient(false, true);
} }
private static HttpServer createHttpServer() throws Exception { private HttpServer createHttpServer() throws Exception {
HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
httpServer.start(); httpServer.start();
//returns a different status code depending on the path //returns a different status code depending on the path
@ -123,7 +129,7 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
} }
} }
private static RestClient createRestClient(final boolean useAuth, final boolean usePreemptiveAuth) { private RestClient createRestClient(final boolean useAuth, final boolean usePreemptiveAuth) {
// provide the username/password for every request // provide the username/password for every request
final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials("user", "pass")); credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials("user", "pass"));
@ -151,14 +157,50 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
return restClientBuilder.build(); return restClientBuilder.build();
} }
@AfterClass @After
public static void stopHttpServers() throws IOException { public void stopHttpServers() throws IOException {
restClient.close(); restClient.close();
restClient = null; restClient = null;
httpServer.stop(0); httpServer.stop(0);
httpServer = null; httpServer = null;
} }
/**
* Tests sending a bunch of async requests works well (e.g. no TimeoutException from the leased pool)
* See https://github.com/elastic/elasticsearch/issues/24069
*/
public void testManyAsyncRequests() throws Exception {
int iters = randomIntBetween(500, 1000);
final CountDownLatch latch = new CountDownLatch(iters);
final List<Exception> exceptions = new CopyOnWriteArrayList<>();
for (int i = 0; i < iters; i++) {
Request request = new Request("PUT", "/200");
request.setEntity(new NStringEntity("{}", ContentType.APPLICATION_JSON));
restClient.performRequestAsync(request, new ResponseListener() {
@Override
public void onSuccess(Response response) {
latch.countDown();
}
@Override
public void onFailure(Exception exception) {
exceptions.add(exception);
latch.countDown();
}
});
}
assertTrue("timeout waiting for requests to be sent", latch.await(10, TimeUnit.SECONDS));
if (exceptions.isEmpty() == false) {
AssertionError error = new AssertionError("expected no failures but got some. see suppressed for first 10 of ["
+ exceptions.size() + "] failures");
for (Exception exception : exceptions.subList(0, Math.min(10, exceptions.size()))) {
error.addSuppressed(exception);
}
throw error;
}
}
/** /**
* End to end test for headers. We test it explicitly against a real http client as there are different ways * End to end test for headers. We test it explicitly against a real http client as there are different ways
* to set/add headers to the {@link org.apache.http.client.HttpClient}. * to set/add headers to the {@link org.apache.http.client.HttpClient}.

View File

@ -168,10 +168,13 @@ public class RestClientDocumentation {
request.addParameter("pretty", "true"); request.addParameter("pretty", "true");
//end::rest-client-parameters //end::rest-client-parameters
//tag::rest-client-body //tag::rest-client-body
request.setEntity(new StringEntity( request.setEntity(new NStringEntity(
"{\"json\":\"text\"}", "{\"json\":\"text\"}",
ContentType.APPLICATION_JSON)); ContentType.APPLICATION_JSON));
//end::rest-client-body //end::rest-client-body
//tag::rest-client-body-shorter
request.setJsonEntity("{\"json\":\"text\"}");
//end::rest-client-body-shorter
//tag::rest-client-headers //tag::rest-client-headers
request.setHeaders( request.setHeaders(
new BasicHeader("Accept", "text/plain"), new BasicHeader("Accept", "text/plain"),

View File

@ -312,6 +312,22 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
task run(type: RunTask) { task run(type: RunTask) {
distribution = System.getProperty('run.distribution', 'zip') distribution = System.getProperty('run.distribution', 'zip')
if (distribution == 'zip') {
String licenseType = System.getProperty("license_type", "basic")
if (licenseType == 'trial') {
setting 'xpack.ml.enabled', 'true'
setting 'xpack.graph.enabled', 'true'
setting 'xpack.watcher.enabled', 'true'
setting 'xpack.license.self_generated.type', 'trial'
} else if (licenseType != 'basic') {
throw new IllegalArgumentException("Unsupported self-generated license type: [" + licenseType + "[basic] or [trial].")
}
setting 'xpack.security.enabled', 'true'
setting 'xpack.monitoring.enabled', 'true'
setting 'xpack.sql.enabled', 'true'
setting 'xpack.rollup.enabled', 'true'
keystoreSetting 'bootstrap.password', 'password'
}
} }
/** /**

View File

@ -104,6 +104,8 @@ ones that the user is authorized to access in case field level security is enabl
[float] [float]
=== Bug Fixes === Bug Fixes
Use date format in `date_range` mapping before fallback to default ({pull}29310[#29310])
Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365]) Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365])
Fixed prerelease version of elasticsearch in the `deb` package to sort before GA versions Fixed prerelease version of elasticsearch in the `deb` package to sort before GA versions
@ -163,6 +165,10 @@ analysis module. ({pull}30397[#30397])
Added new "Request" object flavored request methods in the RestClient. Prefer Added new "Request" object flavored request methods in the RestClient. Prefer
these instead of the multi-argument versions. ({pull}29623[#29623]) these instead of the multi-argument versions. ({pull}29623[#29623])
Added `setJsonEntity` to `Request` object so it is marginally easier to send JSON. ({pull}30447[#30447])
Watcher HTTP client used in watches now allows more parallel connections to the
same endpoint and evicts long running connections. ({pull}30130[#30130])
The cluster state listener to decide if watcher should be The cluster state listener to decide if watcher should be
stopped/started/paused now runs far less code in an executor but is more stopped/started/paused now runs far less code in an executor but is more
synchronous and predictable. Also the trigger engine thread is only started on synchronous and predictable. Also the trigger engine thread is only started on
@ -171,9 +177,14 @@ started or stopped. ({pull}30118[#30118])
Added put index template API to the high level rest client ({pull}30400[#30400]) Added put index template API to the high level rest client ({pull}30400[#30400])
Add ability to filter coordinating-only nodes when interacting with cluster
APIs. ({pull}30313[#30313])
[float] [float]
=== Bug Fixes === Bug Fixes
Use date format in `date_range` mapping before fallback to default ({pull}29310[#29310])
Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365]) Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365])
Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216])

View File

@ -68,6 +68,23 @@ for its modifiers:
but rather than the setup defined in `docs/build.gradle` the setup is defined but rather than the setup defined in `docs/build.gradle` the setup is defined
right in the documentation file. right in the documentation file.
In addition to the standard CONSOLE syntax these snippets can contain blocks
of yaml surrounded by markers like this:
```
startyaml
- compare_analyzers: {index: thai_example, first: thai, second: rebuilt_thai}
endyaml
```
This allows slightly more expressive testing of the snippets. Since that syntax
is not supported by CONSOLE the usual way to incorporate it is with a
`// TEST[s//]` marker like this:
```
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: thai_example, first: thai, second: rebuilt_thai}\nendyaml\n/]
```
Any place you can use json you can use elements like `$body.path.to.thing` Any place you can use json you can use elements like `$body.path.to.thing`
which is replaced on the fly with the contents of the thing at `path.to.thing` which is replaced on the fly with the contents of the thing at `path.to.thing`
in the last response. in the last response.

View File

@ -60,6 +60,8 @@ buildRestTests.docs = fileTree(projectDir) {
exclude 'build.gradle' exclude 'build.gradle'
// That is where the snippets go, not where they come from! // That is where the snippets go, not where they come from!
exclude 'build' exclude 'build'
// Just syntax examples
exclude 'README.asciidoc'
} }
Closure setupTwitter = { String name, int count -> Closure setupTwitter = { String name, int count ->

View File

@ -0,0 +1,86 @@
[[java-rest-high-snapshot-get-repository]]
=== Snapshot Get Repository API
The Snapshot Get Repository API allows to retrieve information about a registered repository.
[[java-rest-high-snapshot-get-repository-request]]
==== Snapshot Get Repository Request
A `GetRepositoriesRequest`:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[get-repository-request]
--------------------------------------------------
==== Optional Arguments
The following arguments can optionally be provided:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[get-repository-request-repositories]
--------------------------------------------------
<1> Sets the repositories to retrieve
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[get-repository-request-local]
--------------------------------------------------
<1> The `local` flag (defaults to `false`) controls whether the repositories need
to be looked up in the local cluster state or in the cluster state held by
the elected master node
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[get-repository-request-masterTimeout]
--------------------------------------------------
<1> Timeout to connect to the master node as a `TimeValue`
<2> Timeout to connect to the master node as a `String`
[[java-rest-high-snapshot-get-repository-sync]]
==== Synchronous Execution
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[get-repository-execute]
--------------------------------------------------
[[java-rest-high-snapshot-get-repository-async]]
==== Asynchronous Execution
The asynchronous execution of a snapshot get repository requires both the
`GetRepositoriesRequest` instance and an `ActionListener` instance to be
passed to the asynchronous method:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[get-repository-execute-async]
--------------------------------------------------
<1> The `GetRepositoriesRequest` to execute and the `ActionListener`
to use when the execution completes
The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back using the `onResponse` method
if the execution successfully completed or using the `onFailure` method if
it failed.
A typical listener for `GetRepositoriesResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[get-repository-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed. The response is
provided as an argument
<2> Called in case of a failure. The raised exception is provided as an argument
[[java-rest-high-cluster-get-repository-response]]
==== Snapshot Get Repository Response
The returned `GetRepositoriesResponse` allows to retrieve information about the
executed operation as follows:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[get-repository-response]
--------------------------------------------------

View File

@ -104,3 +104,11 @@ The Java High Level REST Client supports the following Cluster APIs:
* <<java-rest-high-cluster-put-settings>> * <<java-rest-high-cluster-put-settings>>
include::cluster/put_settings.asciidoc[] include::cluster/put_settings.asciidoc[]
== Snapshot APIs
The Java High Level REST Client supports the following Snapshot APIs:
* <<java-rest-high-snapshot-get-repository>>
include::snapshot/get_repository.asciidoc[]

View File

@ -263,6 +263,14 @@ IMPORTANT: The `ContentType` specified for the `HttpEntity` is important
because it will be used to set the `Content-Type` header so that Elasticsearch because it will be used to set the `Content-Type` header so that Elasticsearch
can properly parse the content. can properly parse the content.
You can also set it to a `String` which will default to
a `ContentType` of `application/json`.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-body-shorter]
--------------------------------------------------
And you can set a list of headers to send with the request: And you can set a list of headers to send with the request:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]

View File

@ -97,10 +97,11 @@ PUT /arabic_example
} }
}, },
"analyzer": { "analyzer": {
"arabic": { "rebuilt_arabic": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
"decimal_digit",
"arabic_stop", "arabic_stop",
"arabic_normalization", "arabic_normalization",
"arabic_keywords", "arabic_keywords",
@ -113,6 +114,8 @@ PUT /arabic_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"arabic_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: arabic_example, first: arabic, second: rebuilt_arabic}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -144,7 +147,7 @@ PUT /armenian_example
} }
}, },
"analyzer": { "analyzer": {
"armenian": { "rebuilt_armenian": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -159,6 +162,8 @@ PUT /armenian_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"armenian_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: armenian_example, first: armenian, second: rebuilt_armenian}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -190,7 +195,7 @@ PUT /basque_example
} }
}, },
"analyzer": { "analyzer": {
"basque": { "rebuilt_basque": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -205,6 +210,8 @@ PUT /basque_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"basque_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: basque_example, first: basque, second: rebuilt_basque}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -236,14 +243,15 @@ PUT /bengali_example
} }
}, },
"analyzer": { "analyzer": {
"bengali": { "rebuilt_bengali": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
"decimal_digit",
"bengali_keywords",
"indic_normalization", "indic_normalization",
"bengali_normalization", "bengali_normalization",
"bengali_stop", "bengali_stop",
"bengali_keywords",
"bengali_stemmer" "bengali_stemmer"
] ]
} }
@ -253,6 +261,8 @@ PUT /bengali_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"bengali_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bengali_example, first: bengali, second: rebuilt_bengali}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -284,7 +294,7 @@ PUT /brazilian_example
} }
}, },
"analyzer": { "analyzer": {
"brazilian": { "rebuilt_brazilian": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -299,6 +309,8 @@ PUT /brazilian_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"brazilian_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: brazilian_example, first: brazilian, second: rebuilt_brazilian}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -330,7 +342,7 @@ PUT /bulgarian_example
} }
}, },
"analyzer": { "analyzer": {
"bulgarian": { "rebuilt_bulgarian": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -345,6 +357,8 @@ PUT /bulgarian_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"bulgarian_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bulgarian_example, first: bulgarian, second: rebuilt_bulgarian}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -380,7 +394,7 @@ PUT /catalan_example
} }
}, },
"analyzer": { "analyzer": {
"catalan": { "rebuilt_catalan": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"catalan_elision", "catalan_elision",
@ -396,6 +410,8 @@ PUT /catalan_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"catalan_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: catalan_example, first: catalan, second: rebuilt_catalan}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -415,11 +431,17 @@ PUT /cjk_example
"filter": { "filter": {
"english_stop": { "english_stop": {
"type": "stop", "type": "stop",
"stopwords": "_english_" <1> "stopwords": [ <1>
"a", "and", "are", "as", "at", "be", "but", "by", "for",
"if", "in", "into", "is", "it", "no", "not", "of", "on",
"or", "s", "such", "t", "that", "the", "their", "then",
"there", "these", "they", "this", "to", "was", "will",
"with", "www"
]
} }
}, },
"analyzer": { "analyzer": {
"cjk": { "rebuilt_cjk": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"cjk_width", "cjk_width",
@ -434,8 +456,12 @@ PUT /cjk_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"cjk_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: cjk_example, first: cjk, second: rebuilt_cjk}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters. The default stop words are
*almost* the same as the `_english_` set, but not exactly
the same.
[[czech-analyzer]] [[czech-analyzer]]
===== `czech` analyzer ===== `czech` analyzer
@ -463,7 +489,7 @@ PUT /czech_example
} }
}, },
"analyzer": { "analyzer": {
"czech": { "rebuilt_czech": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -478,6 +504,8 @@ PUT /czech_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"czech_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: czech_example, first: czech, second: rebuilt_czech}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -509,7 +537,7 @@ PUT /danish_example
} }
}, },
"analyzer": { "analyzer": {
"danish": { "rebuilt_danish": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -524,6 +552,8 @@ PUT /danish_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"danish_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: danish_example, first: danish, second: rebuilt_danish}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -564,7 +594,7 @@ PUT /dutch_example
} }
}, },
"analyzer": { "analyzer": {
"dutch": { "rebuilt_dutch": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -580,6 +610,8 @@ PUT /dutch_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"dutch_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: dutch_example, first: dutch, second: rebuilt_dutch}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -615,7 +647,7 @@ PUT /english_example
} }
}, },
"analyzer": { "analyzer": {
"english": { "rebuilt_english": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"english_possessive_stemmer", "english_possessive_stemmer",
@ -631,6 +663,8 @@ PUT /english_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"english_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: english_example, first: english, second: rebuilt_english}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -662,7 +696,7 @@ PUT /finnish_example
} }
}, },
"analyzer": { "analyzer": {
"finnish": { "rebuilt_finnish": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -677,6 +711,8 @@ PUT /finnish_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"finnish_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: finnish_example, first: finnish, second: rebuilt_finnish}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -717,7 +753,7 @@ PUT /french_example
} }
}, },
"analyzer": { "analyzer": {
"french": { "rebuilt_french": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"french_elision", "french_elision",
@ -733,6 +769,8 @@ PUT /french_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"french_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: french_example, first: french, second: rebuilt_french}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -764,7 +802,7 @@ PUT /galician_example
} }
}, },
"analyzer": { "analyzer": {
"galician": { "rebuilt_galician": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -779,6 +817,8 @@ PUT /galician_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"galician_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: galician_example, first: galician, second: rebuilt_galician}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -810,7 +850,7 @@ PUT /german_example
} }
}, },
"analyzer": { "analyzer": {
"german": { "rebuilt_german": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -826,6 +866,8 @@ PUT /german_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"german_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: german_example, first: german, second: rebuilt_german}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -861,7 +903,7 @@ PUT /greek_example
} }
}, },
"analyzer": { "analyzer": {
"greek": { "rebuilt_greek": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"greek_lowercase", "greek_lowercase",
@ -876,6 +918,8 @@ PUT /greek_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"greek_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: greek_example, first: greek, second: rebuilt_greek}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -907,14 +951,15 @@ PUT /hindi_example
} }
}, },
"analyzer": { "analyzer": {
"hindi": { "rebuilt_hindi": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
"decimal_digit",
"hindi_keywords",
"indic_normalization", "indic_normalization",
"hindi_normalization", "hindi_normalization",
"hindi_stop", "hindi_stop",
"hindi_keywords",
"hindi_stemmer" "hindi_stemmer"
] ]
} }
@ -924,6 +969,8 @@ PUT /hindi_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"hindi_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hindi_example, first: hindi, second: rebuilt_hindi}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -955,7 +1002,7 @@ PUT /hungarian_example
} }
}, },
"analyzer": { "analyzer": {
"hungarian": { "rebuilt_hungarian": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -970,6 +1017,8 @@ PUT /hungarian_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"hungarian_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hungarian_example, first: hungarian, second: rebuilt_hungarian}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -1002,7 +1051,7 @@ PUT /indonesian_example
} }
}, },
"analyzer": { "analyzer": {
"indonesian": { "rebuilt_indonesian": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -1017,6 +1066,8 @@ PUT /indonesian_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"indonesian_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: indonesian_example, first: indonesian, second: rebuilt_indonesian}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -1034,9 +1085,15 @@ PUT /irish_example
"settings": { "settings": {
"analysis": { "analysis": {
"filter": { "filter": {
"irish_hyphenation": {
"type": "stop",
"stopwords": [ "h", "n", "t" ],
"ignore_case": true
},
"irish_elision": { "irish_elision": {
"type": "elision", "type": "elision",
"articles": [ "h", "n", "t" ] "articles": [ "d", "m", "b" ],
"articles_case": true
}, },
"irish_stop": { "irish_stop": {
"type": "stop", "type": "stop",
@ -1056,12 +1113,13 @@ PUT /irish_example
} }
}, },
"analyzer": { "analyzer": {
"irish": { "rebuilt_irish": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"irish_stop", "irish_hyphenation",
"irish_elision", "irish_elision",
"irish_lowercase", "irish_lowercase",
"irish_stop",
"irish_keywords", "irish_keywords",
"irish_stemmer" "irish_stemmer"
] ]
@ -1072,6 +1130,8 @@ PUT /irish_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"irish_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: irish_example, first: irish, second: rebuilt_irish}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -1112,7 +1172,7 @@ PUT /italian_example
} }
}, },
"analyzer": { "analyzer": {
"italian": { "rebuilt_italian": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"italian_elision", "italian_elision",
@ -1128,6 +1188,8 @@ PUT /italian_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"italian_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: italian_example, first: italian, second: rebuilt_italian}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -1159,7 +1221,7 @@ PUT /latvian_example
} }
}, },
"analyzer": { "analyzer": {
"latvian": { "rebuilt_latvian": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -1174,6 +1236,8 @@ PUT /latvian_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"latvian_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: latvian_example, first: latvian, second: rebuilt_latvian}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -1205,7 +1269,7 @@ PUT /lithuanian_example
} }
}, },
"analyzer": { "analyzer": {
"lithuanian": { "rebuilt_lithuanian": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -1220,6 +1284,8 @@ PUT /lithuanian_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"lithuanian_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: lithuanian_example, first: lithuanian, second: rebuilt_lithuanian}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -1251,7 +1317,7 @@ PUT /norwegian_example
} }
}, },
"analyzer": { "analyzer": {
"norwegian": { "rebuilt_norwegian": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -1266,6 +1332,8 @@ PUT /norwegian_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"norwegian_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: norwegian_example, first: norwegian, second: rebuilt_norwegian}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -1295,11 +1363,12 @@ PUT /persian_example
} }
}, },
"analyzer": { "analyzer": {
"persian": { "rebuilt_persian": {
"tokenizer": "standard", "tokenizer": "standard",
"char_filter": [ "zero_width_spaces" ], "char_filter": [ "zero_width_spaces" ],
"filter": [ "filter": [
"lowercase", "lowercase",
"decimal_digit",
"arabic_normalization", "arabic_normalization",
"persian_normalization", "persian_normalization",
"persian_stop" "persian_stop"
@ -1311,6 +1380,7 @@ PUT /persian_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: persian_example, first: persian, second: rebuilt_persian}\nendyaml\n/]
<1> Replaces zero-width non-joiners with an ASCII space. <1> Replaces zero-width non-joiners with an ASCII space.
<2> The default stopwords can be overridden with the `stopwords` <2> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
@ -1341,7 +1411,7 @@ PUT /portuguese_example
} }
}, },
"analyzer": { "analyzer": {
"portuguese": { "rebuilt_portuguese": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -1356,6 +1426,8 @@ PUT /portuguese_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"portuguese_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: portuguese_example, first: portuguese, second: rebuilt_portuguese}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -1387,7 +1459,7 @@ PUT /romanian_example
} }
}, },
"analyzer": { "analyzer": {
"romanian": { "rebuilt_romanian": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -1402,6 +1474,8 @@ PUT /romanian_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"romanian_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: romanian_example, first: romanian, second: rebuilt_romanian}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -1434,7 +1508,7 @@ PUT /russian_example
} }
}, },
"analyzer": { "analyzer": {
"russian": { "rebuilt_russian": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -1449,6 +1523,8 @@ PUT /russian_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"russian_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: russian_example, first: russian, second: rebuilt_russian}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -1480,11 +1556,12 @@ PUT /sorani_example
} }
}, },
"analyzer": { "analyzer": {
"sorani": { "rebuilt_sorani": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"sorani_normalization", "sorani_normalization",
"lowercase", "lowercase",
"decimal_digit",
"sorani_stop", "sorani_stop",
"sorani_keywords", "sorani_keywords",
"sorani_stemmer" "sorani_stemmer"
@ -1496,6 +1573,8 @@ PUT /sorani_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"sorani_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: sorani_example, first: sorani, second: rebuilt_sorani}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -1527,7 +1606,7 @@ PUT /spanish_example
} }
}, },
"analyzer": { "analyzer": {
"spanish": { "rebuilt_spanish": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -1542,6 +1621,8 @@ PUT /spanish_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"spanish_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: spanish_example, first: spanish, second: rebuilt_spanish}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -1573,7 +1654,7 @@ PUT /swedish_example
} }
}, },
"analyzer": { "analyzer": {
"swedish": { "rebuilt_swedish": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"lowercase", "lowercase",
@ -1588,6 +1669,8 @@ PUT /swedish_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"swedish_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: swedish_example, first: swedish, second: rebuilt_swedish}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -1623,7 +1706,7 @@ PUT /turkish_example
} }
}, },
"analyzer": { "analyzer": {
"turkish": { "rebuilt_turkish": {
"tokenizer": "standard", "tokenizer": "standard",
"filter": [ "filter": [
"apostrophe", "apostrophe",
@ -1639,6 +1722,8 @@ PUT /turkish_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"turkish_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: turkish_example, first: turkish, second: rebuilt_turkish}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.
<2> This filter should be removed unless there are words which should <2> This filter should be removed unless there are words which should
@ -1662,10 +1747,11 @@ PUT /thai_example
} }
}, },
"analyzer": { "analyzer": {
"thai": { "rebuilt_thai": {
"tokenizer": "thai", "tokenizer": "thai",
"filter": [ "filter": [
"lowercase", "lowercase",
"decimal_digit",
"thai_stop" "thai_stop"
] ]
} }
@ -1675,5 +1761,7 @@ PUT /thai_example
} }
---------------------------------------------------- ----------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/"thai_keywords",//]
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: thai_example, first: thai, second: rebuilt_thai}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords` <1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters. or `stopwords_path` parameters.

View File

@ -10,6 +10,11 @@ This call will block until the merge is complete. If the http connection is
lost, the request will continue in the background, and any new requests will lost, the request will continue in the background, and any new requests will
block until the previous force merge is complete. block until the previous force merge is complete.
WARNING: Force merge should only be called against *read-only indices*. Running
force merge against a read-write index can cause very large segments to be produced
(>5Gb per segment), and the merge policy will never consider it for merging again until
it mostly consists of deleted docs. This can cause very large segments to remain in the shards.
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
POST /twitter/_forcemerge POST /twitter/_forcemerge

View File

@ -20,18 +20,39 @@
package org.elasticsearch.smoketest; package org.elasticsearch.smoketest;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.lucene.util.BytesRef;
import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.test.rest.yaml.ClientYamlDocsTestClient; import org.elasticsearch.test.rest.yaml.ClientYamlDocsTestClient;
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
import org.elasticsearch.test.rest.yaml.ClientYamlTestClient; import org.elasticsearch.test.rest.yaml.ClientYamlTestClient;
import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext;
import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse;
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec;
import org.elasticsearch.test.rest.yaml.section.ExecutableSection;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonList;
import static java.util.Collections.singletonMap;
public class DocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { public class DocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
@ -41,7 +62,12 @@ public class DocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
@ParametersFactory @ParametersFactory
public static Iterable<Object[]> parameters() throws Exception { public static Iterable<Object[]> parameters() throws Exception {
return ESClientYamlSuiteTestCase.createParameters(); List<NamedXContentRegistry.Entry> entries = new ArrayList<>(ExecutableSection.DEFAULT_EXECUTABLE_CONTEXTS.size() + 1);
entries.addAll(ExecutableSection.DEFAULT_EXECUTABLE_CONTEXTS);
entries.add(new NamedXContentRegistry.Entry(ExecutableSection.class,
new ParseField("compare_analyzers"), CompareAnalyzers::parse));
NamedXContentRegistry executeableSectionRegistry = new NamedXContentRegistry(entries);
return ESClientYamlSuiteTestCase.createParameters(executeableSectionRegistry);
} }
@Override @Override
@ -64,5 +90,117 @@ public class DocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
List<HttpHost> hosts, Version esVersion) throws IOException { List<HttpHost> hosts, Version esVersion) throws IOException {
return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion); return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion);
} }
/**
* Compares the the results of running two analyzers against many random
* strings. The goal is to figure out if two anlayzers are "the same" by
* comparing their results. This is far from perfect but should be fairly
* accurate, especially for gross things like missing {@code decimal_digit}
* token filters, and should be fairly fast because it compares a fairly
* small number of tokens.
*/
private static class CompareAnalyzers implements ExecutableSection {
private static ConstructingObjectParser<CompareAnalyzers, XContentLocation> PARSER =
new ConstructingObjectParser<>("test_analyzer", false, (a, location) -> {
String index = (String) a[0];
String first = (String) a[1];
String second = (String) a[2];
return new CompareAnalyzers(location, index, first, second);
});
static {
PARSER.declareString(constructorArg(), new ParseField("index"));
PARSER.declareString(constructorArg(), new ParseField("first"));
PARSER.declareString(constructorArg(), new ParseField("second"));
}
private static CompareAnalyzers parse(XContentParser parser) throws IOException {
XContentLocation location = parser.getTokenLocation();
CompareAnalyzers section = PARSER.parse(parser, location);
assert parser.currentToken() == Token.END_OBJECT : "End of object required";
parser.nextToken(); // throw out the END_OBJECT to conform with other ExecutableSections
return section;
} }
private final XContentLocation location;
private final String index;
private final String first;
private final String second;
private CompareAnalyzers(XContentLocation location, String index, String first, String second) {
this.location = location;
this.index = index;
this.first = first;
this.second = second;
}
@Override
public XContentLocation getLocation() {
return location;
}
@Override
public void execute(ClientYamlTestExecutionContext executionContext) throws IOException {
int size = 100;
int maxLength = 15;
List<String> testText = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
/**
* Build a string with a few unicode sequences separated by
* spaces. The unicode sequences aren't going to be of the same
* code page which is a shame because it makes the entire
* string less realistic. But this still provides a fairly
* nice string to compare.
*/
int spaces = between(0, 5);
StringBuilder b = new StringBuilder((spaces + 1) * maxLength);
b.append(randomRealisticUnicodeOfCodepointLengthBetween(1, maxLength));
for (int t = 0; t < spaces; t++) {
b.append(' ');
b.append(randomRealisticUnicodeOfCodepointLengthBetween(1, maxLength));
}
testText.add(b.toString()
// Don't look up stashed values
.replace("$", "\\$"));
}
Map<String, Object> body = new HashMap<>(2);
body.put("analyzer", first);
body.put("text", testText);
ClientYamlTestResponse response = executionContext.callApi("indices.analyze", singletonMap("index", index),
singletonList(body), emptyMap());
Iterator<?> firstTokens = ((List<?>) response.evaluate("tokens")).iterator();
body.put("analyzer", second);
response = executionContext.callApi("indices.analyze", singletonMap("index", index),
singletonList(body), emptyMap());
Iterator<?> secondTokens = ((List<?>) response.evaluate("tokens")).iterator();
Object previousFirst = null;
Object previousSecond = null;
while (firstTokens.hasNext()) {
if (false == secondTokens.hasNext()) {
fail(second + " has fewer tokens than " + first + ". "
+ first + " has [" + firstTokens.next() + "] but " + second + " is out of tokens. "
+ first + "'s last token was [" + previousFirst + "] and "
+ second + "'s last token was' [" + previousSecond + "]");
}
Map<?, ?> firstToken = (Map<?, ?>) firstTokens.next();
Map<?, ?> secondToken = (Map<?, ?>) secondTokens.next();
String firstText = (String) firstToken.get("token");
String secondText = (String) secondToken.get("token");
// Check the text and produce an error message with the utf8 sequence if they don't match.
if (false == secondText.equals(firstText)) {
fail("text differs: " + first + " was [" + firstText + "] but " + second + " was [" + secondText
+ "]. In utf8 those are\n" + new BytesRef(firstText) + " and\n" + new BytesRef(secondText));
}
// Now check the whole map just in case the text matches but something else differs
assertEquals(firstToken, secondToken);
previousFirst = firstToken;
previousSecond = secondToken;
}
if (secondTokens.hasNext()) {
fail(second + " has more tokens than " + first + ". "
+ second + " has [" + secondTokens.next() + "] but " + first + " is out of tokens. "
+ first + "'s last token was [" + previousFirst + "] and "
+ second + "'s last token was' [" + previousSecond + "]");
}
}
}
}

View File

@ -1,2 +1,2 @@
org.gradle.daemon=false org.gradle.daemon=false
org.gradle.jvmargs=-Xmx1536m org.gradle.jvmargs=-Xmx1792m

View File

@ -1 +0,0 @@
63ff4af3504881744695f6239fcb3e9c0e3240b1

View File

@ -0,0 +1 @@
f72ad4b6474c2d59b0eed0ca84eddd1f99d29129

View File

@ -30,11 +30,17 @@ esplugin {
} }
integTestCluster { integTestCluster {
// Modules who's integration is explicitly tested in integration tests
module project(':modules:parent-join')
module project(':modules:lang-painless')
// Whitelist reindexing from the local node so we can test reindex-from-remote. // Whitelist reindexing from the local node so we can test reindex-from-remote.
setting 'reindex.remote.whitelist', '127.0.0.1:*' setting 'reindex.remote.whitelist', '127.0.0.1:*'
} }
run { run {
// Modules who's integration is explicitly tested in integration tests
module project(':modules:parent-join')
module project(':modules:lang-painless')
// Whitelist reindexing from the local node so we can test reindex-from-remote. // Whitelist reindexing from the local node so we can test reindex-from-remote.
setting 'reindex.remote.whitelist', '127.0.0.1:*' setting 'reindex.remote.whitelist', '127.0.0.1:*'
} }

View File

@ -32,7 +32,6 @@ import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.bulk.Retry; import org.elasticsearch.action.bulk.Retry;
import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure;
import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.ParentTaskAssigningClient; import org.elasticsearch.client.ParentTaskAssigningClient;
@ -41,7 +40,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.IndexFieldMapper; import org.elasticsearch.index.mapper.IndexFieldMapper;
@ -49,6 +47,7 @@ import org.elasticsearch.index.mapper.RoutingFieldMapper;
import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper;
import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper;
import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure;
import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
@ -75,8 +74,8 @@ import static java.lang.Math.min;
import static java.util.Collections.emptyList; import static java.util.Collections.emptyList;
import static java.util.Collections.unmodifiableList; import static java.util.Collections.unmodifiableList;
import static org.elasticsearch.action.bulk.BackoffPolicy.exponentialBackoff; import static org.elasticsearch.action.bulk.BackoffPolicy.exponentialBackoff;
import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES;
import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; import static org.elasticsearch.common.unit.TimeValue.timeValueNanos;
import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.SIZE_ALL_MATCHES;
import static org.elasticsearch.rest.RestStatus.CONFLICT; import static org.elasticsearch.rest.RestStatus.CONFLICT;
import static org.elasticsearch.search.sort.SortBuilders.fieldSort; import static org.elasticsearch.search.sort.SortBuilders.fieldSort;
@ -139,7 +138,7 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
this.mainRequest = mainRequest; this.mainRequest = mainRequest;
this.listener = listener; this.listener = listener;
BackoffPolicy backoffPolicy = buildBackoffPolicy(); BackoffPolicy backoffPolicy = buildBackoffPolicy();
bulkRetry = new Retry(EsRejectedExecutionException.class, BackoffPolicy.wrap(backoffPolicy, worker::countBulkRetry), threadPool); bulkRetry = new Retry(BackoffPolicy.wrap(backoffPolicy, worker::countBulkRetry), threadPool);
scrollSource = buildScrollableResultSource(backoffPolicy); scrollSource = buildScrollableResultSource(backoffPolicy);
scriptApplier = Objects.requireNonNull(buildScriptApplier(), "script applier must not be null"); scriptApplier = Objects.requireNonNull(buildScriptApplier(), "script applier must not be null");
/* /*

View File

@ -19,10 +19,8 @@
package org.elasticsearch.index.reindex.remote; package org.elasticsearch.index.reindex.remote;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType; import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity; import org.apache.http.nio.entity.NStringEntity;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequest;
@ -151,8 +149,7 @@ final class RemoteRequestBuilders {
} }
entity.endObject(); entity.endObject();
BytesRef bytes = BytesReference.bytes(entity).toBytesRef(); request.setJsonEntity(Strings.toString(entity));
request.setEntity(new ByteArrayEntity(bytes.bytes, bytes.offset, bytes.length, ContentType.APPLICATION_JSON));
} catch (IOException e) { } catch (IOException e) {
throw new ElasticsearchException("unexpected error building entity", e); throw new ElasticsearchException("unexpected error building entity", e);
} }
@ -199,7 +196,7 @@ final class RemoteRequestBuilders {
if (remoteVersion.before(Version.fromId(2000099))) { if (remoteVersion.before(Version.fromId(2000099))) {
// Versions before 2.0.0 extract the plain scroll_id from the body // Versions before 2.0.0 extract the plain scroll_id from the body
request.setEntity(new StringEntity(scroll, ContentType.TEXT_PLAIN)); request.setEntity(new NStringEntity(scroll, ContentType.TEXT_PLAIN));
return request; return request;
} }
@ -207,7 +204,7 @@ final class RemoteRequestBuilders {
entity.startObject() entity.startObject()
.field("scroll_id", scroll) .field("scroll_id", scroll)
.endObject(); .endObject();
request.setEntity(new StringEntity(Strings.toString(entity), ContentType.APPLICATION_JSON)); request.setJsonEntity(Strings.toString(entity));
} catch (IOException e) { } catch (IOException e) {
throw new ElasticsearchException("failed to build scroll entity", e); throw new ElasticsearchException("failed to build scroll entity", e);
} }
@ -219,14 +216,14 @@ final class RemoteRequestBuilders {
if (remoteVersion.before(Version.fromId(2000099))) { if (remoteVersion.before(Version.fromId(2000099))) {
// Versions before 2.0.0 extract the plain scroll_id from the body // Versions before 2.0.0 extract the plain scroll_id from the body
request.setEntity(new StringEntity(scroll, ContentType.TEXT_PLAIN)); request.setEntity(new NStringEntity(scroll, ContentType.TEXT_PLAIN));
return request; return request;
} }
try (XContentBuilder entity = JsonXContent.contentBuilder()) { try (XContentBuilder entity = JsonXContent.contentBuilder()) {
entity.startObject() entity.startObject()
.array("scroll_id", scroll) .array("scroll_id", scroll)
.endObject(); .endObject();
request.setEntity(new StringEntity(Strings.toString(entity), ContentType.APPLICATION_JSON)); request.setJsonEntity(Strings.toString(entity));
} catch (IOException e) { } catch (IOException e) {
throw new ElasticsearchException("failed to build clear scroll entity", e); throw new ElasticsearchException("failed to build clear scroll entity", e);
} }

View File

@ -186,7 +186,7 @@ public class RetryTests extends ESIntegTestCase {
bulk.add(client().prepareIndex("source", "test").setSource("foo", "bar " + i)); bulk.add(client().prepareIndex("source", "test").setSource("foo", "bar " + i));
} }
Retry retry = new Retry(EsRejectedExecutionException.class, BackoffPolicy.exponentialBackoff(), client().threadPool()); Retry retry = new Retry(BackoffPolicy.exponentialBackoff(), client().threadPool());
BulkResponse initialBulkResponse = retry.withBackoff(client()::bulk, bulk.request(), client().settings()).actionGet(); BulkResponse initialBulkResponse = retry.withBackoff(client()::bulk, bulk.request(), client().settings()).actionGet();
assertFalse(initialBulkResponse.buildFailureMessage(), initialBulkResponse.hasFailures()); assertFalse(initialBulkResponse.buildFailureMessage(), initialBulkResponse.hasFailures());
client().admin().indices().prepareRefresh("source").get(); client().admin().indices().prepareRefresh("source").get();

View File

@ -1,5 +1,5 @@
--- ---
"Response format search failures": "Response format for search failures":
- do: - do:
index: index:
index: source index: source

View File

@ -433,3 +433,28 @@
- match: { docs.2._index: index2 } - match: { docs.2._index: index2 }
- match: { docs.2._type: type2 } - match: { docs.2._type: type2 }
- match: { docs.2._id: fr_789 } - match: { docs.2._id: fr_789 }
---
"Totally broken scripts report the error properly":
- do:
index:
index: twitter
type: tweet
id: 1
body: { "user": "kimchy" }
- do:
indices.refresh: {}
- do:
catch: request
reindex:
refresh: true
body:
source:
index: twitter
dest:
index: new_twitter
script:
lang: painless
source: syntax errors are fun!
- match: {error.reason: 'compile error'}

View File

@ -1,5 +1,5 @@
--- ---
"Response format search failures": "Response format for search failures":
- do: - do:
index: index:
index: source index: source

View File

@ -421,3 +421,25 @@
term: term:
level: 11 level: 11
- match: { hits.total: 0 } - match: { hits.total: 0 }
---
"Totally broken scripts report the error properly":
- do:
index:
index: twitter
type: tweet
id: 1
body: { "user": "kimchy" }
- do:
indices.refresh: {}
- do:
catch: request
update_by_query:
index: twitter
refresh: true
body:
script:
lang: painless
source: syntax errors are fun!
- match: {error.reason: 'compile error'}

View File

@ -1 +0,0 @@
5f3c053ef858c58c74a687a40f5451d19b69850b

View File

@ -0,0 +1 @@
b4e19c53f29fa9b40bd7ad12ff598e3f08d507a3

View File

@ -1 +0,0 @@
a6e72085f7c2ade43ec0e5f52c227e6f715666ad

View File

@ -0,0 +1 @@
23dd8cb3834f3641d9b3e8bc3d38281389a597bc

View File

@ -1 +0,0 @@
a7daed3dc3a67674862002f315cd9193944de783

View File

@ -0,0 +1 @@
e8119a17448a6f5512ded0bd2a6faa7fc8e70890

View File

@ -1 +0,0 @@
25c93466d0a2c41df0cf98de77d632f3f02fa98d

View File

@ -0,0 +1 @@
336d9ac698066b8cf8a448f193e4a29ef163baa8

View File

@ -1 +0,0 @@
4688aaa48607ac26f6bf2567052019ab3fb2ff5e

View File

@ -0,0 +1 @@
e1e77951a83fc6a9deab884773314992fefa14f3

View File

@ -1 +0,0 @@
ad71de632c9363c3f200cd5a240686256c7db431

View File

@ -0,0 +1 @@
d4da149a16673c6326f4898ad877756259f676f8

View File

@ -1 +0,0 @@
96a630a7c4916358f129f6bac8718108811efe1a

View File

@ -0,0 +1 @@
ab4141b43cc6c2680d5f5a0b5086299f38ebec4d

View File

@ -1,26 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
apply plugin: 'elasticsearch.standalone-rest-test'
apply plugin: 'elasticsearch.rest-test'
integTestCluster {
// Whitelist reindexing from the local node so we can test it.
setting 'reindex.remote.whitelist', '127.0.0.1:*'
}

View File

@ -1,37 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.smoketest;
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
public class SmokeTestReindexWithPainlessClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
public SmokeTestReindexWithPainlessClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws Exception {
return ESClientYamlSuiteTestCase.createParameters();
}
}

View File

@ -1,24 +0,0 @@
---
"Totally broken scripts report the error properly":
- do:
index:
index: twitter
type: tweet
id: 1
body: { "user": "kimchy" }
- do:
indices.refresh: {}
- do:
catch: request
reindex:
refresh: true
body:
source:
index: twitter
dest:
index: new_twitter
script:
lang: painless
source: syntax errors are fun!
- match: {error.reason: 'compile error'}

View File

@ -1,33 +0,0 @@
---
"Timeout":
- skip:
version: "all"
reason: painless doesn't support thread.sleep so we need to come up with a better way
- do:
index:
index: twitter
type: tweet
id: 1
body: { "user": "kimchy" }
- do:
indices.refresh: {}
- do:
catch: request_timeout
reindex:
refresh: true
body:
source:
index: twitter
timeout: 10
query:
script:
lang: painless
# Sleep 100x longer than the timeout. That should cause a timeout!
# Return true causes the document to try to be collected which is what actually triggers the timeout.
script: java.lang.Thread.sleep(1000); return true
dest:
index: new_twitter
- is_true: timed_out
- match: {created: 0}
- match: {noops: 0}

View File

@ -1,21 +0,0 @@
---
"Totally broken scripts report the error properly":
- do:
index:
index: twitter
type: tweet
id: 1
body: { "user": "kimchy" }
- do:
indices.refresh: {}
- do:
catch: request
update_by_query:
index: twitter
refresh: true
body:
script:
lang: painless
source: syntax errors are fun!
- match: {error.reason: 'compile error'}

View File

@ -1,30 +0,0 @@
---
"Timeout":
- skip:
version: "all"
reason: painless doesn't support thread.sleep so we need to come up with a better way
- do:
index:
index: twitter
type: tweet
id: 1
body: { "user": "kimchy" }
- do:
indices.refresh: {}
- do:
catch: request_timeout
update_by_query:
index: twitter
refresh: true
search_timeout: 10ms
body:
query:
script:
lang: painless
# Sleep 100x longer than the timeout. That should cause a timeout!
# Return true causes the document to try to be collected which is what actually triggers the timeout.
script: java.lang.Thread.sleep(1000); return true
- is_true: timed_out
- match: {updated: 0}
- match: {noops: 0}

View File

@ -103,9 +103,13 @@ setup:
--- ---
"Split from 1 to N": "Split from 1 to N":
# - skip:
# version: " - 6.99.99"
# reason: Added in 7.0.0
# uncomment once AwaitsFix is resolved
- skip: - skip:
version: " - 6.99.99" version: "all"
reason: Added in 7.0.0 reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503"
- do: - do:
indices.create: indices.create:
index: source_one_shard index: source_one_shard

View File

@ -1,8 +1,12 @@
--- ---
"Split index ignores target template mapping": "Split index ignores target template mapping":
# - skip:
# version: " - 6.0.99"
# reason: Added in 6.1.0
# uncomment once AwaitsFix is resolved
- skip: - skip:
version: " - 6.0.99" version: "all"
reason: Added in 6.1.0 reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503"
# create index # create index
- do: - do:

View File

@ -1 +0,0 @@
2b2be48f6622c150496e755497e7bdb8daa46030

View File

@ -0,0 +1 @@
f465718b3db829e7660009aac2c1211fd5d74ca0

View File

@ -1 +0,0 @@
6cbafc48e8ac4966377665eb3bbe93f9addf04a5

View File

@ -0,0 +1 @@
d502441e830e1a9d30270442f8e3fd8317fe7bba

View File

@ -1 +0,0 @@
0b06e4f6514256a3f187a9892e520638b9c59e63

View File

@ -0,0 +1 @@
5167fb0a14434cb10ec3224e9e32ca668e9f9ad4

View File

@ -1 +0,0 @@
4c71cef87fe513a7a96c2a7980ed6f7c2b015763

View File

@ -0,0 +1 @@
488aeecf49413b63a404989ae00b07b20951e76e

View File

@ -1 +0,0 @@
665e044d1180100940bccd7e8e41dde48e342da3

View File

@ -0,0 +1 @@
107755edd67cddb3fb9817de50c0bed3a10da19c

View File

@ -1 +0,0 @@
d343bbf5792f5969288b59b51179acd29d04f4ee

View File

@ -0,0 +1 @@
9226fab3b9c6250af52b87061f637c0f8e3114b6

View File

@ -1 +0,0 @@
8915f3c93af3348655bcc204289f9011835738a2

View File

@ -0,0 +1 @@
2b7bf384c1933225972f04224d867ec800f5e3a7

View File

@ -1 +0,0 @@
e7dc67b42eca3b1546a36370b6dcda0f83b2eb7d

View File

@ -0,0 +1 @@
18b770c35db8757dc036b1506870a4ddaad7b1ab

View File

@ -1 +0,0 @@
5946d5e2be276f66e9ff6d6111acabb03a9330d9

View File

@ -0,0 +1 @@
683f6436938c67709d0c665c9e1fdef7bd893e4a

View File

@ -1 +0,0 @@
d9fc5fc63f3d861e5af72e11373368e8a4c6bba6

View File

@ -0,0 +1 @@
1df20ba64b9aa68f1fa9a15c9ff75f87f94dec47

View File

@ -1 +0,0 @@
ee283c0a1a717f3e0915de75864a93d043efaee3

View File

@ -0,0 +1 @@
895ca714fc62b66ba63d43931730cdc4ef56d35f

View File

@ -1 +0,0 @@
e1adf0220a7c052ac81e2919ffac24ac0e5b007c

View File

@ -0,0 +1 @@
95ab7e9421bbeb8229d83ac72700b37a521fdf4f

View File

@ -1 +0,0 @@
6d9306053942c48f43392a634f11a95462b5996e

View File

@ -0,0 +1 @@
773ff8c8425d32609ccec6956759ad377dfb8f6b

View File

@ -1 +0,0 @@
2334e8c5f4d0f98659b30e0c2035296e4aae8ff5

View File

@ -0,0 +1 @@
ea711541e243ee768f950041e6e2843d0cc5e695

View File

@ -1 +0,0 @@
f2b2c454eb7b5d73b9df1390ea4730ce3dd4e463

View File

@ -0,0 +1 @@
2ca005cf25722ba3777ed93f720f40c937081fa6

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.cluster.repositories.get; package org.elasticsearch.action.admin.cluster.repositories.get;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.cluster.metadata.RepositoriesMetaData; import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData;
@ -26,12 +27,15 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
/** /**
* Get repositories response * Get repositories response
*/ */
@ -74,4 +78,9 @@ public class GetRepositoriesResponse extends ActionResponse implements ToXConten
builder.endObject(); builder.endObject();
return builder; return builder;
} }
public static GetRepositoriesResponse fromXContent(XContentParser parser) throws IOException {
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
return new GetRepositoriesResponse(RepositoriesMetaData.fromXContent(parser));
}
} }

View File

@ -54,6 +54,21 @@ public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> {
return this; return this;
} }
/**
* Returns the underlying stats flags.
*/
public CommonStatsFlags flags() {
return flags;
}
/**
* Sets the underlying stats flags.
*/
public IndicesStatsRequest flags(CommonStatsFlags flags) {
this.flags = flags;
return this;
}
/** /**
* Document types to return stats for. Mainly affects {@link #indexing(boolean)} when * Document types to return stats for. Mainly affects {@link #indexing(boolean)} when
* enabled, returning specific indexing stats for those types. * enabled, returning specific indexing stats for those types.

View File

@ -99,65 +99,8 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
throw new ShardNotFoundException(indexShard.shardId()); throw new ShardNotFoundException(indexShard.shardId());
} }
CommonStatsFlags flags = new CommonStatsFlags().clear(); CommonStats commonStats = new CommonStats(indicesService.getIndicesQueryCache(), indexShard, request.flags());
return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), commonStats,
if (request.docs()) { indexShard.commitStats(), indexShard.seqNoStats());
flags.set(CommonStatsFlags.Flag.Docs);
}
if (request.store()) {
flags.set(CommonStatsFlags.Flag.Store);
}
if (request.indexing()) {
flags.set(CommonStatsFlags.Flag.Indexing);
flags.types(request.types());
}
if (request.get()) {
flags.set(CommonStatsFlags.Flag.Get);
}
if (request.search()) {
flags.set(CommonStatsFlags.Flag.Search);
flags.groups(request.groups());
}
if (request.merge()) {
flags.set(CommonStatsFlags.Flag.Merge);
}
if (request.refresh()) {
flags.set(CommonStatsFlags.Flag.Refresh);
}
if (request.flush()) {
flags.set(CommonStatsFlags.Flag.Flush);
}
if (request.warmer()) {
flags.set(CommonStatsFlags.Flag.Warmer);
}
if (request.queryCache()) {
flags.set(CommonStatsFlags.Flag.QueryCache);
}
if (request.fieldData()) {
flags.set(CommonStatsFlags.Flag.FieldData);
flags.fieldDataFields(request.fieldDataFields());
}
if (request.segments()) {
flags.set(CommonStatsFlags.Flag.Segments);
flags.includeSegmentFileSizes(request.includeSegmentFileSizes());
}
if (request.completion()) {
flags.set(CommonStatsFlags.Flag.Completion);
flags.completionDataFields(request.completionFields());
}
if (request.translog()) {
flags.set(CommonStatsFlags.Flag.Translog);
}
if (request.requestCache()) {
flags.set(CommonStatsFlags.Flag.RequestCache);
}
if (request.recovery()) {
flags.set(CommonStatsFlags.Flag.Recovery);
}
return new ShardStats(
indexShard.routingEntry(),
indexShard.shardPath(),
new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats(), indexShard.seqNoStats());
} }
} }

View File

@ -75,7 +75,7 @@ public class QueryExplanation implements Streamable {
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
index = in.readOptionalString(); index = in.readOptionalString();
} else { } else {
index = in.readString(); index = in.readString();
@ -92,7 +92,7 @@ public class QueryExplanation implements Streamable {
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
out.writeOptionalString(index); out.writeOptionalString(index);
} else { } else {
out.writeString(index); out.writeString(index);

View File

@ -23,7 +23,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.Scheduler;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
@ -49,7 +48,7 @@ public final class BulkRequestHandler {
this.consumer = consumer; this.consumer = consumer;
this.listener = listener; this.listener = listener;
this.concurrentRequests = concurrentRequests; this.concurrentRequests = concurrentRequests;
this.retry = new Retry(EsRejectedExecutionException.class, backoffPolicy, scheduler); this.retry = new Retry(backoffPolicy, scheduler);
this.semaphore = new Semaphore(concurrentRequests > 0 ? concurrentRequests : 1); this.semaphore = new Semaphore(concurrentRequests > 0 ? concurrentRequests : 1);
} }

View File

@ -19,13 +19,13 @@
package org.elasticsearch.action.bulk; package org.elasticsearch.action.bulk;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.Scheduler;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -40,12 +40,10 @@ import java.util.function.Predicate;
* Encapsulates synchronous and asynchronous retry logic. * Encapsulates synchronous and asynchronous retry logic.
*/ */
public class Retry { public class Retry {
private final Class<? extends Throwable> retryOnThrowable;
private final BackoffPolicy backoffPolicy; private final BackoffPolicy backoffPolicy;
private final Scheduler scheduler; private final Scheduler scheduler;
public Retry(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Scheduler scheduler) { public Retry(BackoffPolicy backoffPolicy, Scheduler scheduler) {
this.retryOnThrowable = retryOnThrowable;
this.backoffPolicy = backoffPolicy; this.backoffPolicy = backoffPolicy;
this.scheduler = scheduler; this.scheduler = scheduler;
} }
@ -60,7 +58,7 @@ public class Retry {
*/ */
public void withBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BulkRequest bulkRequest, public void withBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BulkRequest bulkRequest,
ActionListener<BulkResponse> listener, Settings settings) { ActionListener<BulkResponse> listener, Settings settings) {
RetryHandler r = new RetryHandler(retryOnThrowable, backoffPolicy, consumer, listener, settings, scheduler); RetryHandler r = new RetryHandler(backoffPolicy, consumer, listener, settings, scheduler);
r.execute(bulkRequest); r.execute(bulkRequest);
} }
@ -81,12 +79,13 @@ public class Retry {
} }
static class RetryHandler implements ActionListener<BulkResponse> { static class RetryHandler implements ActionListener<BulkResponse> {
private static final RestStatus RETRY_STATUS = RestStatus.TOO_MANY_REQUESTS;
private final Logger logger; private final Logger logger;
private final Scheduler scheduler; private final Scheduler scheduler;
private final BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer; private final BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer;
private final ActionListener<BulkResponse> listener; private final ActionListener<BulkResponse> listener;
private final Iterator<TimeValue> backoff; private final Iterator<TimeValue> backoff;
private final Class<? extends Throwable> retryOnThrowable;
// Access only when holding a client-side lock, see also #addResponses() // Access only when holding a client-side lock, see also #addResponses()
private final List<BulkItemResponse> responses = new ArrayList<>(); private final List<BulkItemResponse> responses = new ArrayList<>();
private final long startTimestampNanos; private final long startTimestampNanos;
@ -95,10 +94,8 @@ public class Retry {
private volatile BulkRequest currentBulkRequest; private volatile BulkRequest currentBulkRequest;
private volatile ScheduledFuture<?> scheduledRequestFuture; private volatile ScheduledFuture<?> scheduledRequestFuture;
RetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, RetryHandler(BackoffPolicy backoffPolicy, BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer,
BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, ActionListener<BulkResponse> listener, ActionListener<BulkResponse> listener, Settings settings, Scheduler scheduler) {
Settings settings, Scheduler scheduler) {
this.retryOnThrowable = retryOnThrowable;
this.backoff = backoffPolicy.iterator(); this.backoff = backoffPolicy.iterator();
this.consumer = consumer; this.consumer = consumer;
this.listener = listener; this.listener = listener;
@ -160,9 +157,8 @@ public class Retry {
} }
for (BulkItemResponse bulkItemResponse : bulkItemResponses) { for (BulkItemResponse bulkItemResponse : bulkItemResponses) {
if (bulkItemResponse.isFailed()) { if (bulkItemResponse.isFailed()) {
final Throwable cause = bulkItemResponse.getFailure().getCause(); final RestStatus status = bulkItemResponse.status();
final Throwable rootCause = ExceptionsHelper.unwrapCause(cause); if (status != RETRY_STATUS) {
if (!rootCause.getClass().equals(retryOnThrowable)) {
return false; return false;
} }
} }

View File

@ -543,6 +543,14 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
+ docWriteRequest.opType().getLowercase()); + docWriteRequest.opType().getLowercase());
} }
if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
// Even though the primary waits on all nodes to ack the mapping changes to the master
// (see MappingUpdatedAction.updateMappingOnMaster) we still need to protect against missing mappings
// and wait for them. The reason is concurrent requests. Request r1 which has new field f triggers a
// mapping update. Assume that that update is first applied on the primary, and only later on the replica
// (its happening concurrently). Request r2, which now arrives on the primary and which also has the new
// field f might see the updated mapping (on the primary), and will therefore proceed to be replicated
// to the replica. When it arrives on the replica, theres no guarantee that the replica has already
// applied the new mapping, so there is no other option than to wait.
throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(), throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(),
"Mappings are not available on the replica yet, triggered update: " + result.getRequiredMappingUpdate()); "Mappings are not available on the replica yet, triggered update: " + result.getRequiredMappingUpdate());
} }

View File

@ -44,6 +44,8 @@ import java.util.function.Predicate;
*/ */
public class DiscoveryNode implements Writeable, ToXContentFragment { public class DiscoveryNode implements Writeable, ToXContentFragment {
static final String COORDINATING_ONLY = "coordinating_only";
public static boolean nodeRequiresLocalStorage(Settings settings) { public static boolean nodeRequiresLocalStorage(Settings settings) {
boolean localStorageEnable = Node.NODE_LOCAL_STORAGE_SETTING.get(settings); boolean localStorageEnable = Node.NODE_LOCAL_STORAGE_SETTING.get(settings);
if (localStorageEnable == false && if (localStorageEnable == false &&

View File

@ -148,6 +148,19 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
return nodes.build(); return nodes.build();
} }
/**
* Get a {@link Map} of the coordinating only nodes (nodes which are neither master, nor data, nor ingest nodes) arranged by their ids
*
* @return {@link Map} of the coordinating only nodes arranged by their ids
*/
public ImmutableOpenMap<String, DiscoveryNode> getCoordinatingOnlyNodes() {
ImmutableOpenMap.Builder<String, DiscoveryNode> nodes = ImmutableOpenMap.builder(this.nodes);
nodes.removeAll(masterNodes.keys());
nodes.removeAll(dataNodes.keys());
nodes.removeAll(ingestNodes.keys());
return nodes.build();
}
/** /**
* Get a node by its id * Get a node by its id
* *
@ -297,7 +310,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
* - "_local" or "_master" for the relevant nodes * - "_local" or "_master" for the relevant nodes
* - a node id * - a node id
* - a wild card pattern that will be matched against node names * - a wild card pattern that will be matched against node names
* - a "attr:value" pattern, where attr can be a node role (master, data, ingest etc.) in which case the value can be true of false * - a "attr:value" pattern, where attr can be a node role (master, data, ingest etc.) in which case the value can be true or false,
* or a generic node attribute name in which case value will be treated as a wildcard and matched against the node attribute values. * or a generic node attribute name in which case value will be treated as a wildcard and matched against the node attribute values.
*/ */
public String[] resolveNodes(String... nodes) { public String[] resolveNodes(String... nodes) {
@ -349,6 +362,12 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
} else { } else {
resolvedNodesIds.removeAll(ingestNodes.keys()); resolvedNodesIds.removeAll(ingestNodes.keys());
} }
} else if (DiscoveryNode.COORDINATING_ONLY.equals(matchAttrName)) {
if (Booleans.parseBoolean(matchAttrValue, true)) {
resolvedNodesIds.addAll(getCoordinatingOnlyNodes().keys());
} else {
resolvedNodesIds.removeAll(getCoordinatingOnlyNodes().keys());
}
} else { } else {
for (DiscoveryNode node : this) { for (DiscoveryNode node : this) {
for (Map.Entry<String, String> entry : node.getAttributes().entrySet()) { for (Map.Entry<String, String> entry : node.getAttributes().entrySet()) {

Some files were not shown because too many files have changed in this diff Show More