Merge branch 'master' into doc/hlclient-delete

This commit is contained in:
David Pilato 2017-03-01 11:26:36 +01:00
commit 6c526adaed
170 changed files with 3174 additions and 3107 deletions

24
Vagrantfile vendored
View File

@ -193,7 +193,15 @@ def provision(config,
raise ArgumentError.new('update_command is required') if update_command == 'required'
raise ArgumentError.new('update_tracking_file is required') if update_tracking_file == 'required'
raise ArgumentError.new('install_command is required') if install_command == 'required'
config.vm.provision "bats dependencies", type: "shell", inline: <<-SHELL
config.vm.provider "virtualbox" do |v|
# Give the box more memory and cpu because our tests are beasts!
v.memory = Integer(ENV['VAGRANT_MEMORY'] || 8192)
v.cpus = Integer(ENV['VAGRANT_CPUS'] || 4)
end
config.vm.synced_folder "#{Dir.home}/.gradle/caches", "/home/vagrant/.gradle/caches",
create: true,
owner: "vagrant"
config.vm.provision "dependencies", type: "shell", inline: <<-SHELL
set -e
set -o pipefail
@ -256,6 +264,19 @@ def provision(config,
/tmp/bats/install.sh /usr
rm -rf /tmp/bats
}
installed gradle || {
echo "==> Installing gradle"
curl -o /tmp/gradle.zip -L https://services.gradle.org/distributions/gradle-3.3-bin.zip
unzip /tmp/gradle.zip -d /opt
rm -rf /tmp/gradle.zip
ln -s /opt/gradle-3.3/bin/gradle /usr/bin/gradle
# make nfs mounted gradle home dir writeable
# TODO: also chgrp to vagrant once sles and opensuse have vagrant group...
chown vagrant /home/vagrant/.gradle
}
cat \<\<VARS > /etc/profile.d/elasticsearch_vars.sh
export ZIP=/elasticsearch/distribution/zip/build/distributions
export TAR=/elasticsearch/distribution/tar/build/distributions
@ -265,6 +286,7 @@ export BATS=/project/build/bats
export BATS_UTILS=/project/build/bats/utils
export BATS_TESTS=/project/build/bats/tests
export BATS_ARCHIVES=/project/build/bats/archives
export GRADLE_HOME=/opt/gradle-3.3
VARS
cat \<\<SUDOERS_VARS > /etc/sudoers.d/elasticsearch_vars
Defaults env_keep += "ZIP"

View File

@ -1,14 +1,14 @@
package org.elasticsearch.gradle.vagrant
import org.elasticsearch.gradle.FileContentsTask
import org.gradle.BuildAdapter
import org.gradle.BuildResult
import org.gradle.api.*
import org.gradle.api.artifacts.dsl.RepositoryHandler
import org.gradle.api.execution.TaskExecutionAdapter
import org.gradle.api.internal.artifacts.dependencies.DefaultProjectDependency
import org.gradle.api.tasks.Copy
import org.gradle.api.tasks.Delete
import org.gradle.api.tasks.Exec
import org.gradle.api.tasks.TaskState
class VagrantTestPlugin implements Plugin<Project> {
@ -41,6 +41,7 @@ class VagrantTestPlugin implements Plugin<Project> {
private static final BATS = 'bats'
private static final String BATS_TEST_COMMAND ="cd \$BATS_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS"
private static final String PLATFORM_TEST_COMMAND ="rm -rf ~/elasticsearch && rsync -r /elasticsearch/ ~/elasticsearch && cd ~/elasticsearch && \$GRADLE_HOME/bin/gradle test integTest"
@Override
void apply(Project project) {
@ -123,33 +124,27 @@ class VagrantTestPlugin implements Plugin<Project> {
private static void createBatsConfiguration(Project project) {
project.configurations.create(BATS)
Long seed
String formattedSeed = null
String[] upgradeFromVersions
String maybeTestsSeed = System.getProperty("tests.seed", null);
final long seed
final String formattedSeed
String maybeTestsSeed = System.getProperty("tests.seed")
if (maybeTestsSeed != null) {
List<String> seeds = maybeTestsSeed.tokenize(':')
if (seeds.size() != 0) {
String masterSeed = seeds.get(0)
seed = new BigInteger(masterSeed, 16).longValue()
formattedSeed = maybeTestsSeed
if (maybeTestsSeed.trim().isEmpty()) {
throw new GradleException("explicit tests.seed cannot be empty")
}
}
if (formattedSeed == null) {
String masterSeed = maybeTestsSeed.tokenize(':').get(0)
seed = new BigInteger(masterSeed, 16).longValue()
formattedSeed = maybeTestsSeed
} else {
seed = new Random().nextLong()
formattedSeed = String.format("%016X", seed)
}
String maybeUpdradeFromVersions = System.getProperty("tests.packaging.upgrade.from.versions", null)
if (maybeUpdradeFromVersions != null) {
upgradeFromVersions = maybeUpdradeFromVersions.split(",")
} else {
upgradeFromVersions = getVersionsFile(project)
String upgradeFromVersion = System.getProperty("tests.packaging.upgradeVersion");
if (upgradeFromVersion == null) {
List<String> availableVersions = getVersionsFile(project).readLines('UTF-8')
upgradeFromVersion = availableVersions[new Random(seed).nextInt(availableVersions.size())]
}
String upgradeFromVersion = upgradeFromVersions[new Random(seed).nextInt(upgradeFromVersions.length)]
DISTRIBUTION_ARCHIVES.each {
// Adds a dependency for the current version
project.dependencies.add(BATS, project.dependencies.project(path: ":distribution:${it}", configuration: 'archives'))
@ -163,7 +158,6 @@ class VagrantTestPlugin implements Plugin<Project> {
project.extensions.esvagrant.testSeed = seed
project.extensions.esvagrant.formattedTestSeed = formattedSeed
project.extensions.esvagrant.upgradeFromVersion = upgradeFromVersion
project.extensions.esvagrant.upgradeFromVersions = upgradeFromVersions
}
private static void createCleanTask(Project project) {
@ -254,22 +248,9 @@ class VagrantTestPlugin implements Plugin<Project> {
contents project.extensions.esvagrant.upgradeFromVersion
}
Task vagrantSetUpTask = project.tasks.create('vagrantSetUp')
Task vagrantSetUpTask = project.tasks.create('setupBats')
vagrantSetUpTask.dependsOn 'vagrantCheckVersion'
vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils, copyBatsArchives, createVersionFile, createUpgradeFromFile
vagrantSetUpTask.doFirst {
project.gradle.addBuildListener new BuildAdapter() {
@Override
void buildFinished(BuildResult result) {
if (result.failure) {
println "Reproduce with: gradle packagingTest "
+"-Pvagrant.boxes=${project.extensions.esvagrant.boxes} "
+ "-Dtests.seed=${project.extensions.esvagrant.formattedSeed} "
+ "-Dtests.packaging.upgrade.from.versions=${project.extensions.esvagrant.upgradeFromVersions.join(",")}"
}
}
}
}
}
private static void createUpdateVersionsTask(Project project) {
@ -278,7 +259,7 @@ class VagrantTestPlugin implements Plugin<Project> {
group 'Verification'
doLast {
File versions = getVersionsFile(project)
versions.text = listVersions(project).join('\n') + '\n'
versions.setText(listVersions(project).join('\n') + '\n', 'UTF-8')
}
}
}
@ -288,14 +269,11 @@ class VagrantTestPlugin implements Plugin<Project> {
description 'Update file containing options for the\n "starting" version in the "upgrade from" packaging tests.'
group 'Verification'
doLast {
String maybeUpdateFromVersions = System.getProperty("tests.packaging.upgrade.from.versions", null)
if (maybeUpdateFromVersions == null) {
Set<String> versions = listVersions(project)
Set<String> actualVersions = new TreeSet<>(project.extensions.esvagrant.upgradeFromVersions)
if (!versions.equals(actualVersions)) {
throw new GradleException("out-of-date versions " + actualVersions +
", expected " + versions + "; run gradle vagrantUpdateVersions")
}
Set<String> versions = listVersions(project)
Set<String> actualVersions = new TreeSet<>(getVersionsFile(project).readLines('UTF-8'))
if (!versions.equals(actualVersions)) {
throw new GradleException("out-of-date versions " + actualVersions +
", expected " + versions + "; run gradle vagrantUpdateVersions")
}
}
}
@ -350,6 +328,17 @@ class VagrantTestPlugin implements Plugin<Project> {
}
}
private static void createPlatformTestTask(Project project) {
project.tasks.create('platformTest') {
group 'Verification'
description "Test unit and integ tests on different platforms using vagrant.\n" +
" Specify the vagrant boxes to test using the gradle property 'vagrant.boxes'.\n" +
" 'all' can be used to test all available boxes. The available boxes are: \n" +
" ${BOXES}"
dependsOn 'vagrantCheckVersion'
}
}
private static void createVagrantTasks(Project project) {
createCleanTask(project)
createStopTask(project)
@ -360,6 +349,7 @@ class VagrantTestPlugin implements Plugin<Project> {
createCheckVirtualBoxVersionTask(project)
createPrepareVagrantTestEnvTask(project)
createPackagingTestTask(project)
createPlatformTestTask(project)
}
private static void createVagrantBoxesTasks(Project project) {
@ -377,12 +367,15 @@ class VagrantTestPlugin implements Plugin<Project> {
assert project.tasks.virtualboxCheckVersion != null
Task virtualboxCheckVersion = project.tasks.virtualboxCheckVersion
assert project.tasks.vagrantSetUp != null
Task vagrantSetUp = project.tasks.vagrantSetUp
assert project.tasks.setupBats != null
Task setupBats = project.tasks.setupBats
assert project.tasks.packagingTest != null
Task packagingTest = project.tasks.packagingTest
assert project.tasks.platformTest != null
Task platformTest = project.tasks.platformTest
/*
* We always use the main project.rootDir as Vagrant's current working directory (VAGRANT_CWD)
* so that boxes are not duplicated for every Gradle project that use this VagrantTestPlugin.
@ -409,8 +402,9 @@ class VagrantTestPlugin implements Plugin<Project> {
boxName box
environmentVars vagrantEnvVars
args 'box', 'update', box
dependsOn vagrantCheckVersion, virtualboxCheckVersion, vagrantSetUp
dependsOn vagrantCheckVersion, virtualboxCheckVersion
}
update.mustRunAfter(setupBats)
Task up = project.tasks.create("vagrant${boxTask}#up", VagrantCommandTask) {
boxName box
@ -431,11 +425,6 @@ class VagrantTestPlugin implements Plugin<Project> {
dependsOn update
}
if (project.extensions.esvagrant.boxes.contains(box) == false) {
// we d'ont need tests tasks if this box was not specified
continue;
}
Task smoke = project.tasks.create("vagrant${boxTask}#smoketest", Exec) {
environment vagrantEnvVars
dependsOn up
@ -445,14 +434,58 @@ class VagrantTestPlugin implements Plugin<Project> {
}
vagrantSmokeTest.dependsOn(smoke)
Task packaging = project.tasks.create("vagrant${boxTask}#packagingtest", BatsOverVagrantTask) {
Task packaging = project.tasks.create("vagrant${boxTask}#packagingTest", BatsOverVagrantTask) {
boxName box
environmentVars vagrantEnvVars
dependsOn up, setupBats
finalizedBy halt
command BATS_TEST_COMMAND
}
TaskExecutionAdapter packagingReproListener = new TaskExecutionAdapter() {
@Override
void afterExecute(Task task, TaskState state) {
if (state.failure != null) {
println "REPRODUCE WITH: gradle ${packaging.path} " +
"-Dtests.seed=${project.extensions.esvagrant.formattedTestSeed} "
}
}
}
packaging.doFirst {
project.gradle.addListener(packagingReproListener)
}
packaging.doLast {
project.gradle.removeListener(packagingReproListener)
}
if (project.extensions.esvagrant.boxes.contains(box)) {
packagingTest.dependsOn(packaging)
}
Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) {
boxName box
environmentVars vagrantEnvVars
dependsOn up
finalizedBy halt
command BATS_TEST_COMMAND
args 'ssh', boxName, '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.extensions.esvagrant.formattedTestSeed}"
}
TaskExecutionAdapter platformReproListener = new TaskExecutionAdapter() {
@Override
void afterExecute(Task task, TaskState state) {
if (state.failure != null) {
println "REPRODUCE WITH: gradle ${platform.path} " +
"-Dtests.seed=${project.extensions.esvagrant.formattedTestSeed} "
}
}
}
packaging.doFirst {
project.gradle.addListener(platformReproListener)
}
packaging.doLast {
project.gradle.removeListener(platformReproListener)
}
if (project.extensions.esvagrant.boxes.contains(box)) {
platformTest.dependsOn(platform)
}
packagingTest.dependsOn(packaging)
}
}
}

View File

@ -1,6 +1,6 @@
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
elasticsearch = 6.0.0-alpha1
lucene = 6.5.0-snapshot-f919485
lucene = 6.5.0-snapshot-d00c5ca
# optional dependencies
spatial4j = 0.6

View File

@ -29,8 +29,8 @@ import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
@ -97,6 +97,10 @@ final class Request {
return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null);
}
static Request info() {
return new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
}
static Request bulk(BulkRequest bulkRequest) throws IOException {
Params parameters = Params.builder();
parameters.withTimeout(bulkRequest.timeout());
@ -264,7 +268,7 @@ final class Request {
}
static Request ping() {
return new Request("HEAD", "/", Collections.emptyMap(), null);
return new Request(HttpHead.METHOD_NAME, "/", Collections.emptyMap(), null);
}
static Request update(UpdateRequest updateRequest) throws IOException {

View File

@ -35,6 +35,7 @@ import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.main.MainRequest;
import org.elasticsearch.action.main.MainResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.common.CheckedFunction;
@ -113,6 +114,14 @@ public class RestHighLevelClient {
emptySet(), headers);
}
/**
* Get the cluster info otherwise provided when sending an HTTP request to port 9200
*/
public MainResponse info(Header... headers) throws IOException {
return performRequestAndParseEntity(new MainRequest(), (request) -> Request.info(), MainResponse::fromXContent, emptySet(),
headers);
}
/**
* Retrieves a document by id using the Get API
*

View File

@ -41,7 +41,7 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase {
}
@AfterClass
public static void cleanupClient() throws IOException {
public static void cleanupClient() {
restHighLevelClient = null;
}

View File

@ -19,7 +19,10 @@
package org.elasticsearch.client;
import org.elasticsearch.action.main.MainResponse;
import java.io.IOException;
import java.util.Map;
public class PingAndInfoIT extends ESRestHighLevelClientTestCase {
@ -27,5 +30,22 @@ public class PingAndInfoIT extends ESRestHighLevelClientTestCase {
assertTrue(highLevelClient().ping());
}
//TODO add here integ tests for info api: "GET /" once we have parsing code for MainResponse
@SuppressWarnings("unchecked")
public void testInfo() throws IOException {
MainResponse info = highLevelClient().info();
// compare with what the low level client outputs
Map<String, Object> infoAsMap = entityAsMap(adminClient().performRequest("GET", "/"));
assertEquals(infoAsMap.get("cluster_name"), info.getClusterName().value());
assertEquals(infoAsMap.get("cluster_uuid"), info.getClusterUuid());
// only check node name existence, might be a different one from what was hit by low level client in multi-node cluster
assertNotNull(info.getNodeName());
Map<String, Object> versionMap = (Map<String, Object>) infoAsMap.get("version");
assertEquals(versionMap.get("build_hash"), info.getBuild().shortHash());
assertEquals(versionMap.get("build_date"), info.getBuild().date());
assertEquals(versionMap.get("build_snapshot"), info.getBuild().isSnapshot());
assertEquals(versionMap.get("number"), info.getVersion().toString());
assertEquals(versionMap.get("lucene_version"), info.getVersion().luceneVersion.toString());
}
}

View File

@ -66,6 +66,14 @@ public class RequestTests extends ESTestCase {
assertEquals("HEAD", request.method);
}
public void testInfo() {
Request request = Request.info();
assertEquals("/", request.endpoint);
assertEquals(0, request.params.size());
assertNull(request.entity);
assertEquals("GET", request.method);
}
public void testGet() {
getAndExistsTest(Request::get, "GET");
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.client;
import com.fasterxml.jackson.core.JsonParseException;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
@ -33,15 +34,20 @@ import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicRequestLine;
import org.apache.http.message.BasicStatusLine;
import org.elasticsearch.Build;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.main.MainRequest;
import org.elasticsearch.action.main.MainResponse;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.cbor.CborXContent;
import org.elasticsearch.common.xcontent.smile.SmileXContent;
import org.elasticsearch.rest.RestStatus;
@ -59,6 +65,7 @@ import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.common.xcontent.XContentHelper.toXContent;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.mockito.Matchers.anyMapOf;
import static org.mockito.Matchers.anyObject;
@ -79,7 +86,7 @@ public class RestHighLevelClientTests extends ESTestCase {
private RestHighLevelClient restHighLevelClient;
@Before
public void initClient() throws IOException {
public void initClient() {
restClient = mock(RestClient.class);
restHighLevelClient = new RestHighLevelClient(restClient);
}
@ -115,6 +122,21 @@ public class RestHighLevelClientTests extends ESTestCase {
Matchers.isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
}
public void testInfo() throws IOException {
Header[] headers = RestClientTestUtil.randomHeaders(random(), "Header");
Response response = mock(Response.class);
MainResponse testInfo = new MainResponse("nodeName", Version.CURRENT, new ClusterName("clusterName"), "clusterUuid",
Build.CURRENT, true);
when(response.getEntity()).thenReturn(
new StringEntity(toXContent(testInfo, XContentType.JSON, false).utf8ToString(), ContentType.APPLICATION_JSON));
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
anyObject(), anyVararg())).thenReturn(response);
MainResponse receivedInfo = restHighLevelClient.info(headers);
assertEquals(testInfo, receivedInfo);
verify(restClient).performRequest(eq("GET"), eq("/"), eq(Collections.emptyMap()),
Matchers.isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
}
public void testRequestValidation() {
ActionRequestValidationException validationException = new ActionRequestValidationException();
validationException.addValidationError("validation error");
@ -388,7 +410,7 @@ public class RestHighLevelClientTests extends ESTestCase {
assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage());
}
public void testWrapResponseListenerOnSuccess() throws IOException {
public void testWrapResponseListenerOnSuccess() {
{
TrackingActionListener trackingActionListener = new TrackingActionListener();
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(
@ -414,7 +436,7 @@ public class RestHighLevelClientTests extends ESTestCase {
}
}
public void testWrapResponseListenerOnException() throws IOException {
public void testWrapResponseListenerOnException() {
TrackingActionListener trackingActionListener = new TrackingActionListener();
ResponseListener responseListener = restHighLevelClient.wrapResponseListener(
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet());
@ -543,7 +565,7 @@ public class RestHighLevelClientTests extends ESTestCase {
assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage());
}
public void testNamedXContents() throws IOException {
public void testNamedXContents() {
List<NamedXContentRegistry.Entry> namedXContents = RestHighLevelClient.getNamedXContents();
assertEquals(0, namedXContents.size());
}

View File

@ -289,40 +289,44 @@ public class RestClient implements Closeable {
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
ResponseListener responseListener, Header... headers) {
Objects.requireNonNull(params, "params must not be null");
Map<String, String> requestParams = new HashMap<>(params);
//ignore is a special parameter supported by the clients, shouldn't be sent to es
String ignoreString = requestParams.remove("ignore");
Set<Integer> ignoreErrorCodes;
if (ignoreString == null) {
if (HttpHead.METHOD_NAME.equals(method)) {
//404 never causes error if returned for a HEAD request
ignoreErrorCodes = Collections.singleton(404);
try {
Objects.requireNonNull(params, "params must not be null");
Map<String, String> requestParams = new HashMap<>(params);
//ignore is a special parameter supported by the clients, shouldn't be sent to es
String ignoreString = requestParams.remove("ignore");
Set<Integer> ignoreErrorCodes;
if (ignoreString == null) {
if (HttpHead.METHOD_NAME.equals(method)) {
//404 never causes error if returned for a HEAD request
ignoreErrorCodes = Collections.singleton(404);
} else {
ignoreErrorCodes = Collections.emptySet();
}
} else {
ignoreErrorCodes = Collections.emptySet();
}
} else {
String[] ignoresArray = ignoreString.split(",");
ignoreErrorCodes = new HashSet<>();
if (HttpHead.METHOD_NAME.equals(method)) {
//404 never causes error if returned for a HEAD request
ignoreErrorCodes.add(404);
}
for (String ignoreCode : ignoresArray) {
try {
ignoreErrorCodes.add(Integer.valueOf(ignoreCode));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("ignore value should be a number, found [" + ignoreString + "] instead", e);
String[] ignoresArray = ignoreString.split(",");
ignoreErrorCodes = new HashSet<>();
if (HttpHead.METHOD_NAME.equals(method)) {
//404 never causes error if returned for a HEAD request
ignoreErrorCodes.add(404);
}
for (String ignoreCode : ignoresArray) {
try {
ignoreErrorCodes.add(Integer.valueOf(ignoreCode));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("ignore value should be a number, found [" + ignoreString + "] instead", e);
}
}
}
URI uri = buildUri(pathPrefix, endpoint, requestParams);
HttpRequestBase request = createHttpRequest(method, uri, entity);
setHeaders(request, headers);
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener);
long startTime = System.nanoTime();
performRequestAsync(startTime, nextHost(), request, ignoreErrorCodes, httpAsyncResponseConsumerFactory,
failureTrackingResponseListener);
} catch (Exception e) {
responseListener.onFailure(e);
}
URI uri = buildUri(pathPrefix, endpoint, requestParams);
HttpRequestBase request = createHttpRequest(method, uri, entity);
setHeaders(request, headers);
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener);
long startTime = System.nanoTime();
performRequestAsync(startTime, nextHost(), request, ignoreErrorCodes, httpAsyncResponseConsumerFactory,
failureTrackingResponseListener);
}
private void performRequestAsync(final long startTime, final HostTuple<Iterator<HttpHost>> hostTuple, final HttpRequestBase request,

View File

@ -0,0 +1,84 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
public class RestClientTests extends RestClientTestCase {
public void testPerformAsyncWithUnsupportedMethod() throws Exception {
RestClient.SyncResponseListener listener = new RestClient.SyncResponseListener(10000);
try (RestClient restClient = createRestClient()) {
restClient.performRequestAsync("unsupported", randomAsciiOfLength(5), listener);
listener.get();
fail("should have failed because of unsupported method");
} catch (UnsupportedOperationException exception) {
assertEquals("http method not supported: unsupported", exception.getMessage());
}
}
public void testPerformAsyncWithNullParams() throws Exception {
RestClient.SyncResponseListener listener = new RestClient.SyncResponseListener(10000);
try (RestClient restClient = createRestClient()) {
restClient.performRequestAsync(randomAsciiOfLength(5), randomAsciiOfLength(5), null, listener);
listener.get();
fail("should have failed because of null parameters");
} catch (NullPointerException exception) {
assertEquals("params must not be null", exception.getMessage());
}
}
public void testPerformAsyncWithNullHeaders() throws Exception {
RestClient.SyncResponseListener listener = new RestClient.SyncResponseListener(10000);
try (RestClient restClient = createRestClient()) {
restClient.performRequestAsync("GET", randomAsciiOfLength(5), listener, (Header) null);
listener.get();
fail("should have failed because of null headers");
} catch (NullPointerException exception) {
assertEquals("request header must not be null", exception.getMessage());
}
}
public void testPerformAsyncWithWrongEndpoint() throws Exception {
RestClient.SyncResponseListener listener = new RestClient.SyncResponseListener(10000);
try (RestClient restClient = createRestClient()) {
restClient.performRequestAsync("GET", "::http:///", listener);
listener.get();
fail("should have failed because of wrong endpoint");
} catch (IllegalArgumentException exception) {
assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage());
}
}
private static RestClient createRestClient() {
HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)};
return new RestClient(mock(CloseableHttpAsyncClient.class), randomLongBetween(1_000, 30_000), new Header[]{}, hosts, null, null);
}
}

View File

@ -0,0 +1 @@
9ad2a7bd252cbdb76ac121287e670d75f4db2cd3

View File

@ -1 +0,0 @@
886c1da9adc3347f61ab95ecbf4dbeeaa0e7acb2

View File

@ -0,0 +1 @@
c6a940eff8a87df40262b752ed7b135e448b7873

View File

@ -1 +0,0 @@
df9e94f63ad7d9188f14820c435ea1dc3c28d87a

View File

@ -0,0 +1 @@
6ef5ad88141760c00ea041da1535f3ffc364d67d

View File

@ -1 +0,0 @@
3539f8dc9c3ed8ebe90afcb3daa2e9afcf5108d1

View File

@ -0,0 +1 @@
f15775571fb5762dfc92e00c3909cb8db8ff1d53

View File

@ -1 +0,0 @@
da76338e4f299963da9d7ab33dae7586dfc902c2

View File

@ -0,0 +1 @@
051d793aa64257beead4ccc7432eb5df81d17f23

View File

@ -1 +0,0 @@
f6318d120236c7ac03fca6bf98825b4cb4347fc8

View File

@ -0,0 +1 @@
5bc4cba55670c14ea812ff5de65edad4c312fdf6

View File

@ -1 +0,0 @@
68f045ff272e10c307fe25a1867c2948b614b57c

View File

@ -0,0 +1 @@
68cf08bcd8414a57493debf3a6a509d78a9abb56

View File

@ -1 +0,0 @@
b58a7a15267614a9a14f7cf6257454e0c24b146d

View File

@ -0,0 +1 @@
f5d90756dbeda1218d723b7bea0799c88d621adb

View File

@ -1 +0,0 @@
d5f00fcd00fee6906b563d201bc00bdea7a92baa

View File

@ -0,0 +1 @@
9298e7d1ed96e7beb63d7ccdce1a4502eb0fe484

View File

@ -1 +0,0 @@
2664901a494d87e9f4cef65be14cca918da7c4f5

View File

@ -0,0 +1 @@
918de18963607af69dff38e4773c0bde89c73ae3

View File

@ -1 +0,0 @@
476a79293f9a15ea1ee5f93684587205d03480d1

View File

@ -0,0 +1 @@
a311a7d9f3e9a8fbf3a367a4e2731f9d4579732b

View File

@ -1 +0,0 @@
f4dd70223178cca067b0cade4e58c4d82bec87d6

View File

@ -0,0 +1 @@
693bc4cb0e2e4465e0173c67ed0818071c4b460b

View File

@ -1 +0,0 @@
72c4ec5d811480164db556b54c7a76bd3ea16bd6

View File

@ -0,0 +1 @@
0326f31e63c76d476c23488c7354265cf915350f

View File

@ -1 +0,0 @@
f7af3755fdd09df7c258c655aff03ddef9536a04

View File

@ -0,0 +1 @@
69a3a86e9d045f872408793ea411d49e0c577268

View File

@ -1 +0,0 @@
2bf820109203b990e93a05dade8dcebec6aeb71a

View File

@ -0,0 +1 @@
fabc05ca175150171cf60370877276b933716bcd

View File

@ -1 +0,0 @@
fc1f32923ee68761ee05051f4ef6f4a4ab3acdec

View File

@ -19,9 +19,6 @@
package org.apache.lucene.queryparser.classic;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
@ -33,12 +30,14 @@ import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.GraphQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.automaton.RegExp;
@ -59,6 +58,9 @@ import java.util.List;
import java.util.Map;
import java.util.Objects;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
/**
* A query parser that uses the {@link MapperService} in order to build smarter
* queries based on the mapping information.
@ -747,26 +749,29 @@ public class MapperQueryParser extends AnalyzingQueryParser {
MultiPhraseQuery.Builder builder = new MultiPhraseQuery.Builder((MultiPhraseQuery) q);
builder.setSlop(slop);
return builder.build();
} else if (q instanceof GraphQuery && ((GraphQuery) q).hasPhrase()) {
// we have a graph query that has at least one phrase sub-query
// re-build and set slop on all phrase queries
List<Query> oldQueries = ((GraphQuery) q).getQueries();
Query[] queries = new Query[oldQueries.size()];
for (int i = 0; i < queries.length; i++) {
Query oldQuery = oldQueries.get(i);
if (oldQuery instanceof PhraseQuery) {
queries[i] = addSlopToPhrase((PhraseQuery) oldQuery, slop);
} else {
queries[i] = oldQuery;
}
}
return new GraphQuery(queries);
} else if (q instanceof SpanQuery) {
return addSlopToSpan((SpanQuery) q, slop);
} else {
return q;
}
}
private Query addSlopToSpan(SpanQuery query, int slop) {
if (query instanceof SpanNearQuery) {
return new SpanNearQuery(((SpanNearQuery) query).getClauses(), slop,
((SpanNearQuery) query).isInOrder());
} else if (query instanceof SpanOrQuery) {
SpanQuery[] clauses = new SpanQuery[((SpanOrQuery) query).getClauses().length];
int pos = 0;
for (SpanQuery clause : ((SpanOrQuery) query).getClauses()) {
clauses[pos++] = (SpanQuery) addSlopToSpan(clause, slop);
}
return new SpanOrQuery(clauses);
} else {
return query;
}
}
/**
* Rebuild a phrase query with a slop value
*/

View File

@ -112,6 +112,8 @@ public class Version implements Comparable<Version> {
public static final Version V_5_2_1_UNRELEASED = new Version(V_5_2_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
public static final int V_5_2_2_ID_UNRELEASED = 5020299;
public static final Version V_5_2_2_UNRELEASED = new Version(V_5_2_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
public static final int V_5_2_3_ID_UNRELEASED = 5020399;
public static final Version V_5_2_3_UNRELEASED = new Version(V_5_2_3_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
public static final int V_5_3_0_ID_UNRELEASED = 5030099;
public static final Version V_5_3_0_UNRELEASED = new Version(V_5_3_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
public static final int V_5_4_0_ID_UNRELEASED = 5040099;
@ -138,6 +140,8 @@ public class Version implements Comparable<Version> {
return V_6_0_0_alpha1_UNRELEASED;
case V_5_3_0_ID_UNRELEASED:
return V_5_3_0_UNRELEASED;
case V_5_2_3_ID_UNRELEASED:
return V_5_2_3_UNRELEASED;
case V_5_2_2_ID_UNRELEASED:
return V_5_2_2_UNRELEASED;
case V_5_2_1_ID_UNRELEASED:

View File

@ -88,7 +88,7 @@ public final class TaskOperationFailure implements Writeable, ToXContent {
return status;
}
public Throwable getCause() {
public Exception getCause() {
return reason;
}

View File

@ -0,0 +1,42 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.bulk;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.index.engine.Engine;
/**
* A struct-like holder for a bulk items reponse, result, and the resulting
* replica operation to be executed.
*/
class BulkItemResultHolder {
public final @Nullable DocWriteResponse response;
public final @Nullable Engine.Result operationResult;
public final BulkItemRequest replicaRequest;
BulkItemResultHolder(@Nullable DocWriteResponse response,
@Nullable Engine.Result operationResult,
BulkItemRequest replicaRequest) {
this.response = response;
this.operationResult = operationResult;
this.replicaRequest = replicaRequest;
}
}

View File

@ -0,0 +1,67 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.bulk;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.IndexShard;
import java.util.Objects;
public interface MappingUpdatePerformer {
/**
* Determine if any mappings need to be updated, and update them on the
* master node if necessary. Returnes a failed {@code Engine.IndexResult}
* in the event updating the mappings fails or null if successful.
* Throws a {@code ReplicationOperation.RetryOnPrimaryException} if the
* operation needs to be retried on the primary due to the mappings not
* being present yet, or a different exception if updating the mappings
* on the master failed.
*/
@Nullable
MappingUpdateResult updateMappingsIfNeeded(IndexShard primary, IndexRequest request) throws Exception;
/**
* Class encapsulating the resulting of potentially updating the mapping
*/
class MappingUpdateResult {
@Nullable
public final Engine.Index operation;
@Nullable
public final Exception failure;
MappingUpdateResult(Exception failure) {
Objects.requireNonNull(failure, "failure cannot be null");
this.failure = failure;
this.operation = null;
}
MappingUpdateResult(Engine.Index operation) {
Objects.requireNonNull(operation, "operation cannot be null");
this.operation = operation;
this.failure = null;
}
public boolean isFailed() {
return failure != null;
}
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.bulk;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ExceptionsHelper;
@ -42,9 +43,11 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
@ -65,12 +68,16 @@ import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.Map;
import java.util.Objects;
import java.util.function.LongSupplier;
/** Performs shard-level bulk (index, delete or update) operations */
public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequest, BulkShardRequest, BulkShardResponse> {
public static final String ACTION_NAME = BulkAction.NAME + "[s]";
private static final Logger logger = ESLoggerFactory.getLogger(TransportShardBulkAction.class);
private final UpdateHelper updateHelper;
private final MappingUpdatedAction mappingUpdatedAction;
@ -105,8 +112,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
BulkShardRequest request, IndexShard primary) throws Exception {
final IndexMetaData metaData = primary.indexSettings().getIndexMetaData();
Translog.Location location = null;
final MappingUpdatePerformer mappingUpdater = new ConcreteMappingUpdatePerformer();
for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) {
location = executeBulkItemRequest(metaData, primary, request, location, requestIndex);
location = executeBulkItemRequest(metaData, primary, request, location, requestIndex,
updateHelper, threadPool::absoluteTimeInMillis, mappingUpdater);
}
BulkItemResponse[] responses = new BulkItemResponse[request.items().length];
BulkItemRequest[] items = request.items();
@ -117,57 +126,56 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
return new WritePrimaryResult<>(request, response, location, null, primary, logger);
}
/** Executes bulk item requests and handles request execution exceptions */
private Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard primary,
BulkShardRequest request,
Translog.Location location, int requestIndex) throws Exception {
final DocWriteRequest itemRequest = request.items()[requestIndex].request();
final DocWriteRequest.OpType opType = itemRequest.opType();
final Engine.Result operationResult;
final DocWriteResponse response;
final BulkItemRequest replicaRequest;
switch (itemRequest.opType()) {
case CREATE:
case INDEX:
final IndexRequest indexRequest = (IndexRequest) itemRequest;
Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction);
response = indexResult.hasFailure() ? null :
new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), indexResult.getSeqNo(),
indexResult.getVersion(), indexResult.isCreated());
operationResult = indexResult;
replicaRequest = request.items()[requestIndex];
break;
case UPDATE:
UpdateResultHolder updateResultHolder = executeUpdateRequest(((UpdateRequest) itemRequest),
primary, metaData, request, requestIndex);
operationResult = updateResultHolder.operationResult;
response = updateResultHolder.response;
replicaRequest = updateResultHolder.replicaRequest;
break;
case DELETE:
final DeleteRequest deleteRequest = (DeleteRequest) itemRequest;
Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary);
response = deleteResult.hasFailure() ? null :
new DeleteResponse(request.shardId(), deleteRequest.type(), deleteRequest.id(), deleteResult.getSeqNo(),
deleteResult.getVersion(), deleteResult.isFound());
operationResult = deleteResult;
replicaRequest = request.items()[requestIndex];
break;
default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found");
}
// update the bulk item request because update request execution can mutate the bulk item request
request.items()[requestIndex] = replicaRequest;
private static BulkItemResultHolder executeIndexRequest(final IndexRequest indexRequest,
final BulkItemRequest bulkItemRequest,
final IndexShard primary,
final MappingUpdatePerformer mappingUpdater) throws Exception {
Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdater);
if (indexResult.hasFailure()) {
return new BulkItemResultHolder(null, indexResult, bulkItemRequest);
} else {
IndexResponse response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(),
indexResult.getSeqNo(), indexResult.getVersion(), indexResult.isCreated());
return new BulkItemResultHolder(response, indexResult, bulkItemRequest);
}
}
private static BulkItemResultHolder executeDeleteRequest(final DeleteRequest deleteRequest,
final BulkItemRequest bulkItemRequest,
final IndexShard primary) throws IOException {
Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary);
if (deleteResult.hasFailure()) {
return new BulkItemResultHolder(null, deleteResult, bulkItemRequest);
} else {
DeleteResponse response = new DeleteResponse(primary.shardId(), deleteRequest.type(), deleteRequest.id(),
deleteResult.getSeqNo(), deleteResult.getVersion(), deleteResult.isFound());
return new BulkItemResultHolder(response, deleteResult, bulkItemRequest);
}
}
// Visible for unit testing
static Translog.Location updateReplicaRequest(BulkItemResultHolder bulkItemResult,
final DocWriteRequest.OpType opType,
final Translog.Location originalLocation,
BulkShardRequest request) {
final Engine.Result operationResult = bulkItemResult.operationResult;
final DocWriteResponse response = bulkItemResult.response;
final BulkItemRequest replicaRequest = bulkItemResult.replicaRequest;
if (operationResult == null) { // in case of noop update operation
assert response.getResult() == DocWriteResponse.Result.NOOP
: "only noop update can have null operation";
assert response.getResult() == DocWriteResponse.Result.NOOP : "only noop updates can have a null operation";
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), opType, response));
return originalLocation;
} else if (operationResult.hasFailure() == false) {
location = locationToSync(location, operationResult.getTranslogLocation());
BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response);
replicaRequest.setPrimaryResponse(primaryResponse);
// set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though.
// set a blank ShardInfo so we can safely send it to the replicas. We won't use it in the real response though.
primaryResponse.getResponse().setShardInfo(new ShardInfo());
// The operation was successful, advance the translog
return locationToSync(originalLocation, operationResult.getTranslogLocation());
} else {
DocWriteRequest docWriteRequest = replicaRequest.request();
Exception failure = operationResult.getFailure();
@ -178,15 +186,57 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
}
// if its a conflict failure, and we already executed the request on a primary (and we execute it
// if it's a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
// then just use the response we got from the successful execution
// then just use the response we got from the failed execution
if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) {
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(),
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure)));
replicaRequest.setPrimaryResponse(
new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(),
// Make sure to use request.indox() here, if you
// use docWriteRequest.index() it will use the
// concrete index instead of an alias if used!
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure)));
}
return originalLocation;
}
assert replicaRequest.getPrimaryResponse() != null;
}
/** Executes bulk item requests and handles request execution exceptions */
static Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard primary,
BulkShardRequest request, Translog.Location location,
int requestIndex, UpdateHelper updateHelper,
LongSupplier nowInMillisSupplier,
final MappingUpdatePerformer mappingUpdater) throws Exception {
final DocWriteRequest itemRequest = request.items()[requestIndex].request();
final DocWriteRequest.OpType opType = itemRequest.opType();
final BulkItemResultHolder responseHolder;
switch (itemRequest.opType()) {
case CREATE:
case INDEX:
responseHolder = executeIndexRequest((IndexRequest) itemRequest,
request.items()[requestIndex], primary, mappingUpdater);
break;
case UPDATE:
responseHolder = executeUpdateRequest((UpdateRequest) itemRequest, primary, metaData, request,
requestIndex, updateHelper, nowInMillisSupplier, mappingUpdater);
break;
case DELETE:
responseHolder = executeDeleteRequest((DeleteRequest) itemRequest, request.items()[requestIndex], primary);
break;
default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found");
}
final BulkItemRequest replicaRequest = responseHolder.replicaRequest;
// update the bulk item request because update request execution can mutate the bulk item request
request.items()[requestIndex] = replicaRequest;
// Modify the replica request, if needed, and return a new translog location
location = updateReplicaRequest(responseHolder, opType, location, request);
assert replicaRequest.getPrimaryResponse() != null : "replica request must have a primary response";
return location;
}
@ -194,29 +244,18 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
return ExceptionsHelper.unwrapCause(e) instanceof VersionConflictEngineException;
}
private static class UpdateResultHolder {
final BulkItemRequest replicaRequest;
final Engine.Result operationResult;
final DocWriteResponse response;
private UpdateResultHolder(BulkItemRequest replicaRequest, Engine.Result operationResult,
DocWriteResponse response) {
this.replicaRequest = replicaRequest;
this.operationResult = operationResult;
this.response = response;
}
}
/**
* Executes update request, delegating to a index or delete operation after translation,
* handles retries on version conflict and constructs update response
* NOTE: reassigns bulk item request at <code>requestIndex</code> for replicas to
* execute translated update request (NOOP update is an exception). NOOP updates are
* indicated by returning a <code>null</code> operation in {@link UpdateResultHolder}
* indicated by returning a <code>null</code> operation in {@link BulkItemResultHolder}
* */
private UpdateResultHolder executeUpdateRequest(UpdateRequest updateRequest, IndexShard primary,
IndexMetaData metaData, BulkShardRequest request,
int requestIndex) throws Exception {
private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateRequest, IndexShard primary,
IndexMetaData metaData, BulkShardRequest request,
int requestIndex, UpdateHelper updateHelper,
LongSupplier nowInMillis,
final MappingUpdatePerformer mappingUpdater) throws Exception {
Engine.Result updateOperationResult = null;
UpdateResponse updateResponse = null;
BulkItemRequest replicaRequest = request.items()[requestIndex];
@ -225,7 +264,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
final UpdateHelper.Result translate;
// translate update request
try {
translate = updateHelper.prepare(updateRequest, primary, threadPool::absoluteTimeInMillis);
translate = updateHelper.prepare(updateRequest, primary, nowInMillis);
} catch (Exception failure) {
// we may fail translating a update to index or delete operation
// we use index result to communicate failure while translating update request
@ -239,7 +278,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
IndexRequest indexRequest = translate.action();
MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type());
indexRequest.process(mappingMd, request.index());
updateOperationResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction);
updateOperationResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdater);
break;
case DELETED:
DeleteRequest deleteRequest = translate.action();
@ -300,7 +339,14 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
break; // out of retry loop
}
}
return new UpdateResultHolder(replicaRequest, updateOperationResult, updateResponse);
return new BulkItemResultHolder(updateResponse, updateOperationResult, replicaRequest);
}
static boolean shouldExecuteReplicaItem(final BulkItemRequest request, final int index) {
final BulkItemResponse primaryResponse = request.getPrimaryResponse();
assert primaryResponse != null : "expected primary response to be set for item [" + index + "] request ["+ request.request() +"]";
return primaryResponse.isFailed() == false &&
primaryResponse.getResponse().getResult() != DocWriteResponse.Result.NOOP;
}
@Override
@ -308,9 +354,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
Translog.Location location = null;
for (int i = 0; i < request.items().length; i++) {
BulkItemRequest item = request.items()[i];
assert item.getPrimaryResponse() != null : "expected primary response to be set for item [" + i + "] request ["+ item.request() +"]";
if (item.getPrimaryResponse().isFailed() == false &&
item.getPrimaryResponse().getResponse().getResult() != DocWriteResponse.Result.NOOP) {
if (shouldExecuteReplicaItem(item, i)) {
DocWriteRequest docWriteRequest = item.request();
DocWriteResponse primaryResponse = item.getPrimaryResponse().getResponse();
final Engine.Result operationResult;
@ -352,7 +396,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
return new WriteReplicaResult<>(request, location, null, replica, logger);
}
private Translog.Location locationToSync(Translog.Location current, Translog.Location next) {
private static Translog.Location locationToSync(Translog.Location current, Translog.Location next) {
/* here we are moving forward in the translog with each operation. Under the hood
* this might cross translog files which is ok since from the user perspective
* the translog is like a tape where only the highest location needs to be fsynced
@ -391,7 +435,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
}
/** Utility method to prepare an index operation on primary shards */
private static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
SourceToParse sourceToParse =
SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source(),
request.getContentType()).routing(request.routing()).parent(request.parent());
@ -400,36 +444,12 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
/** Executes index operation on primary shard after updates mapping if dynamic mappings are found */
public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary,
MappingUpdatedAction mappingUpdatedAction) throws Exception {
Engine.Index operation;
try {
operation = prepareIndexOperationOnPrimary(request, primary);
} catch (MapperParsingException | IllegalArgumentException e) {
return new Engine.IndexResult(e, request.version());
MappingUpdatePerformer mappingUpdater) throws Exception {
MappingUpdatePerformer.MappingUpdateResult result = mappingUpdater.updateMappingsIfNeeded(primary, request);
if (result.isFailed()) {
return new Engine.IndexResult(result.failure, request.version());
}
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
final ShardId shardId = primary.shardId();
if (update != null) {
// can throw timeout exception when updating mappings or ISE for attempting to update default mappings
// which are bubbled up
try {
mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update);
} catch (IllegalArgumentException e) {
// throws IAE on conflicts merging dynamic mappings
return new Engine.IndexResult(e, request.version());
}
try {
operation = prepareIndexOperationOnPrimary(request, primary);
} catch (MapperParsingException | IllegalArgumentException e) {
return new Engine.IndexResult(e, request.version());
}
update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {
throw new ReplicationOperation.RetryOnPrimaryException(shardId,
"Dynamic mappings are not available on the node that holds the primary yet");
}
}
return primary.index(operation);
return primary.index(result.operation);
}
private static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary) throws IOException {
@ -445,4 +465,39 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
primaryResponse.getSeqNo(), request.primaryTerm(), version, versionType);
return replica.delete(delete);
}
class ConcreteMappingUpdatePerformer implements MappingUpdatePerformer {
@Nullable
public MappingUpdateResult updateMappingsIfNeeded(IndexShard primary, IndexRequest request) throws Exception {
Engine.Index operation;
try {
operation = prepareIndexOperationOnPrimary(request, primary);
} catch (MapperParsingException | IllegalArgumentException e) {
return new MappingUpdateResult(e);
}
final Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
final ShardId shardId = primary.shardId();
if (update != null) {
// can throw timeout exception when updating mappings or ISE for attempting to update default mappings
// which are bubbled up
try {
mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update);
} catch (IllegalArgumentException e) {
// throws IAE on conflicts merging dynamic mappings
return new MappingUpdateResult(e);
}
try {
operation = prepareIndexOperationOnPrimary(request, primary);
} catch (MapperParsingException | IllegalArgumentException e) {
return new MappingUpdateResult(e);
}
if (operation.parsedDoc().dynamicMappingsUpdate() != null) {
throw new ReplicationOperation.RetryOnPrimaryException(shardId,
"Dynamic mappings are not available on the node that holds the primary yet");
}
}
return new MappingUpdateResult(operation);
}
}
}

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Objects;
public class MainResponse extends ActionResponse implements ToXContentObject {
@ -137,4 +138,26 @@ public class MainResponse extends ActionResponse implements ToXContentObject {
public static MainResponse fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
MainResponse other = (MainResponse) o;
return Objects.equals(nodeName, other.nodeName) &&
Objects.equals(version, other.version) &&
Objects.equals(clusterUuid, other.clusterUuid) &&
Objects.equals(build, other.build) &&
Objects.equals(available, other.available) &&
Objects.equals(clusterName, other.clusterName);
}
@Override
public int hashCode() {
return Objects.hash(nodeName, version, clusterUuid, build, clusterName, available);
}
}

View File

@ -62,7 +62,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -233,48 +232,20 @@ public class SearchPhaseController extends AbstractComponent {
if (result.queryResult().topDocs() instanceof CollapseTopFieldDocs) {
CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) result.queryResult().topDocs();
final Sort sort = new Sort(firstTopDocs.fields);
final CollapseTopFieldDocs[] shardTopDocs = new CollapseTopFieldDocs[numShards];
if (result.size() != shardTopDocs.length) {
// TopDocs#merge can't deal with null shard TopDocs
final CollapseTopFieldDocs empty = new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0],
sort.getSort(), new Object[0], Float.NaN);
Arrays.fill(shardTopDocs, empty);
}
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
// the 'index' field is the position in the resultsArr atomic array
shardTopDocs[sortedResult.index] = (CollapseTopFieldDocs) topDocs;
}
fillTopDocs(shardTopDocs, results, new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0],
sort.getSort(), new Object[0], Float.NaN));
mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs);
} else if (result.queryResult().topDocs() instanceof TopFieldDocs) {
TopFieldDocs firstTopDocs = (TopFieldDocs) result.queryResult().topDocs();
final Sort sort = new Sort(firstTopDocs.fields);
final TopFieldDocs[] shardTopDocs = new TopFieldDocs[resultsArr.length()];
if (result.size() != shardTopDocs.length) {
// TopDocs#merge can't deal with null shard TopDocs
final TopFieldDocs empty = new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN);
Arrays.fill(shardTopDocs, empty);
}
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
// the 'index' field is the position in the resultsArr atomic array
shardTopDocs[sortedResult.index] = (TopFieldDocs) topDocs;
}
mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs);
fillTopDocs(shardTopDocs, results, new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN));
mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs, true);
} else {
final TopDocs[] shardTopDocs = new TopDocs[resultsArr.length()];
if (result.size() != shardTopDocs.length) {
// TopDocs#merge can't deal with null shard TopDocs
Arrays.fill(shardTopDocs, Lucene.EMPTY_TOP_DOCS);
}
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
// the 'index' field is the position in the resultsArr atomic array
shardTopDocs[sortedResult.index] = topDocs;
}
mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs);
fillTopDocs(shardTopDocs, results, Lucene.EMPTY_TOP_DOCS);
mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs, true);
}
ScoreDoc[] scoreDocs = mergedTopDocs.scoreDocs;
@ -314,6 +285,20 @@ public class SearchPhaseController extends AbstractComponent {
return scoreDocs;
}
static <T extends TopDocs> void fillTopDocs(T[] shardTopDocs,
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> results,
T empytTopDocs) {
if (results.size() != shardTopDocs.length) {
// TopDocs#merge can't deal with null shard TopDocs
Arrays.fill(shardTopDocs, empytTopDocs);
}
for (AtomicArray.Entry<? extends QuerySearchResultProvider> resultProvider : results) {
final T topDocs = (T) resultProvider.value.queryResult().topDocs();
assert topDocs != null : "top docs must not be null in a valid result";
// the 'index' field is the position in the resultsArr atomic array
shardTopDocs[resultProvider.index] = topDocs;
}
}
public ScoreDoc[] getLastEmittedDocPerShard(ReducedQueryPhase reducedQueryPhase,
ScoreDoc[] sortedScoreDocs, int numShards) {
ScoreDoc[] lastEmittedDocPerShard = new ScoreDoc[numShards];

View File

@ -100,10 +100,11 @@ public abstract class Terminal {
public final boolean promptYesNo(String prompt, boolean defaultYes) {
String answerPrompt = defaultYes ? " [Y/n]" : " [y/N]";
while (true) {
String answer = readText(prompt + answerPrompt).toLowerCase(Locale.ROOT);
if (answer.isEmpty()) {
String answer = readText(prompt + answerPrompt);
if (answer == null || answer.isEmpty()) {
return defaultYes;
}
answer = answer.toLowerCase(Locale.ROOT);
boolean answerYes = answer.equals("y");
if (answerYes == false && answer.equals("n") == false) {
println("Did not understand answer '" + answer + "'");

View File

@ -172,7 +172,7 @@ public class ClusterModule extends AbstractModule {
addAllocationDecider(deciders, new NodeVersionAllocationDecider(settings));
addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider(settings));
addAllocationDecider(deciders, new FilterAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new SameShardAllocationDecider(settings));
addAllocationDecider(deciders, new SameShardAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new DiskThresholdDecider(settings, clusterSettings));
addAllocationDecider(deciders, new ThrottlingAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new ShardsLimitAllocationDecider(settings, clusterSettings));

View File

@ -23,7 +23,9 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
/**
@ -46,14 +48,23 @@ public class SameShardAllocationDecider extends AllocationDecider {
public static final String NAME = "same_shard";
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING =
Setting.boolSetting("cluster.routing.allocation.same_shard.host", false, Setting.Property.NodeScope);
Setting.boolSetting("cluster.routing.allocation.same_shard.host", false, Property.Dynamic, Property.NodeScope);
private final boolean sameHost;
private volatile boolean sameHost;
public SameShardAllocationDecider(Settings settings) {
public SameShardAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
this.sameHost = CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.get(settings);
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING, this::setSameHost);
}
/**
* Sets the same host setting. {@code true} if allocating the same shard copy to the same host
* should not be allowed, even when multiple nodes are being run on the same host. {@code false}
* otherwise.
*/
private void setSameHost(boolean sameHost) {
this.sameHost = sameHost;
}
@Override

View File

@ -21,13 +21,33 @@ package org.elasticsearch.common.logging;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.Build;
import org.elasticsearch.Version;
import org.elasticsearch.common.SuppressLoggerChecks;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeFormatterBuilder;
import java.time.format.SignStyle;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static java.time.temporal.ChronoField.DAY_OF_MONTH;
import static java.time.temporal.ChronoField.DAY_OF_WEEK;
import static java.time.temporal.ChronoField.HOUR_OF_DAY;
import static java.time.temporal.ChronoField.MINUTE_OF_HOUR;
import static java.time.temporal.ChronoField.MONTH_OF_YEAR;
import static java.time.temporal.ChronoField.SECOND_OF_MINUTE;
import static java.time.temporal.ChronoField.YEAR;
/**
* A logger that logs deprecation notices.
@ -36,14 +56,6 @@ public class DeprecationLogger {
private final Logger logger;
/**
* The "Warning" Header comes from RFC-7234. As the RFC describes, it's generally used for caching purposes, but it can be
* used for <em>any</em> warning.
*
* https://tools.ietf.org/html/rfc7234#section-5.5
*/
public static final String WARNING_HEADER = "Warning";
/**
* This is set once by the {@code Node} constructor, but it uses {@link CopyOnWriteArraySet} to ensure that tests can run in parallel.
* <p>
@ -112,6 +124,114 @@ public class DeprecationLogger {
deprecated(THREAD_CONTEXT, msg, params);
}
/*
* RFC7234 specifies the warning format as warn-code <space> warn-agent <space> "warn-text" [<space> "warn-date"]. Here, warn-code is a
* three-digit number with various standard warn codes specified. The warn code 299 is apt for our purposes as it represents a
* miscellaneous persistent warning (can be presented to a human, or logged, and must not be removed by a cache). The warn-agent is an
* arbitrary token; here we use the Elasticsearch version and build hash. The warn text must be quoted. The warn-date is an optional
* quoted field that can be in a variety of specified date formats; here we use RFC 1123 format.
*/
private static final String WARNING_FORMAT =
String.format(
Locale.ROOT,
"299 Elasticsearch-%s%s-%s ",
Version.CURRENT.toString(),
Build.CURRENT.isSnapshot() ? "-SNAPSHOT" : "",
Build.CURRENT.shortHash()) +
"\"%s\" \"%s\"";
/*
* RFC 7234 section 5.5 specifies that the warn-date is a quoted HTTP-date. HTTP-date is defined in RFC 7234 Appendix B as being from
* RFC 7231 section 7.1.1.1. RFC 7231 specifies an HTTP-date as an IMF-fixdate (or an obs-date referring to obsolete formats). The
* grammar for IMF-fixdate is specified as 'day-name "," SP date1 SP time-of-day SP GMT'. Here, day-name is
* (Mon|Tue|Wed|Thu|Fri|Sat|Sun). Then, date1 is 'day SP month SP year' where day is 2DIGIT, month is
* (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec), and year is 4DIGIT. Lastly, time-of-day is 'hour ":" minute ":" second' where
* hour is 2DIGIT, minute is 2DIGIT, and second is 2DIGIT. Finally, 2DIGIT and 4DIGIT have the obvious definitions.
*/
private static final DateTimeFormatter RFC_7231_DATE_TIME;
static {
final Map<Long, String> dow = new HashMap<>();
dow.put(1L, "Mon");
dow.put(2L, "Tue");
dow.put(3L, "Wed");
dow.put(4L, "Thu");
dow.put(5L, "Fri");
dow.put(6L, "Sat");
dow.put(7L, "Sun");
final Map<Long, String> moy = new HashMap<>();
moy.put(1L, "Jan");
moy.put(2L, "Feb");
moy.put(3L, "Mar");
moy.put(4L, "Apr");
moy.put(5L, "May");
moy.put(6L, "Jun");
moy.put(7L, "Jul");
moy.put(8L, "Aug");
moy.put(9L, "Sep");
moy.put(10L, "Oct");
moy.put(11L, "Nov");
moy.put(12L, "Dec");
RFC_7231_DATE_TIME = new DateTimeFormatterBuilder()
.parseCaseInsensitive()
.parseLenient()
.optionalStart()
.appendText(DAY_OF_WEEK, dow)
.appendLiteral(", ")
.optionalEnd()
.appendValue(DAY_OF_MONTH, 2, 2, SignStyle.NOT_NEGATIVE)
.appendLiteral(' ')
.appendText(MONTH_OF_YEAR, moy)
.appendLiteral(' ')
.appendValue(YEAR, 4)
.appendLiteral(' ')
.appendValue(HOUR_OF_DAY, 2)
.appendLiteral(':')
.appendValue(MINUTE_OF_HOUR, 2)
.optionalStart()
.appendLiteral(':')
.appendValue(SECOND_OF_MINUTE, 2)
.optionalEnd()
.appendLiteral(' ')
.appendOffset("+HHMM", "GMT")
.toFormatter(Locale.getDefault(Locale.Category.FORMAT));
}
private static final ZoneId GMT = ZoneId.of("GMT");
/**
* Regular expression to test if a string matches the RFC7234 specification for warning headers. This pattern assumes that the warn code
* is always 299. Further, this pattern assumes that the warn agent represents a version of Elasticsearch including the build hash.
*/
public static Pattern WARNING_HEADER_PATTERN = Pattern.compile(
"299 " + // warn code
"Elasticsearch-\\d+\\.\\d+\\.\\d+(?:-(?:alpha|beta|rc)\\d+)?(?:-SNAPSHOT)?-(?:[a-f0-9]{7}|Unknown) " + // warn agent
"\"((?:\t| |!|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x80-\\xff]|\\\\|\\\\\")*)\" " + // quoted warning value, captured
// quoted RFC 1123 date format
"\"" + // opening quote
"(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // weekday
"\\d{2} " + // 2-digit day
"(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) " + // month
"\\d{4} " + // 4-digit year
"\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second)
"GMT" + // GMT
"\""); // closing quote
/**
* Extracts the warning value from the value of a warning header that is formatted according to RFC 7234. That is, given a string
* {@code 299 Elasticsearch-6.0.0 "warning value" "Sat, 25 Feb 2017 10:27:43 GMT"}, the return value of this method would be {@code
* warning value}.
*
* @param s the value of a warning header formatted according to RFC 7234.
* @return the extracted warning value
*/
public static String extractWarningValueFromWarningHeader(final String s) {
final Matcher matcher = WARNING_HEADER_PATTERN.matcher(s);
final boolean matches = matcher.matches();
assert matches;
return matcher.group(1);
}
/**
* Logs a deprecated message to the deprecation log, as well as to the local {@link ThreadContext}.
*
@ -120,16 +240,19 @@ public class DeprecationLogger {
* @param params The parameters used to fill in the message, if any exist.
*/
@SuppressLoggerChecks(reason = "safely delegates to logger")
void deprecated(Set<ThreadContext> threadContexts, String message, Object... params) {
Iterator<ThreadContext> iterator = threadContexts.iterator();
void deprecated(final Set<ThreadContext> threadContexts, final String message, final Object... params) {
final Iterator<ThreadContext> iterator = threadContexts.iterator();
if (iterator.hasNext()) {
final String formattedMessage = LoggerMessageFormat.format(message, params);
final String warningHeaderValue = formatWarning(formattedMessage);
assert WARNING_HEADER_PATTERN.matcher(warningHeaderValue).matches();
assert extractWarningValueFromWarningHeader(warningHeaderValue).equals(escape(formattedMessage));
while (iterator.hasNext()) {
try {
iterator.next().addResponseHeader(WARNING_HEADER, formattedMessage);
} catch (IllegalStateException e) {
final ThreadContext next = iterator.next();
next.addResponseHeader("Warning", warningHeaderValue, DeprecationLogger::extractWarningValueFromWarningHeader);
} catch (final IllegalStateException e) {
// ignored; it should be removed shortly
}
}
@ -139,4 +262,25 @@ public class DeprecationLogger {
}
}
/**
* Format a warning string in the proper warning format by prepending a warn code, warn agent, wrapping the warning string in quotes,
* and appending the RFC 7231 date.
*
* @param s the warning string to format
* @return a warning value formatted according to RFC 7234
*/
public static String formatWarning(final String s) {
return String.format(Locale.ROOT, WARNING_FORMAT, escape(s), RFC_7231_DATE_TIME.format(ZonedDateTime.now(GMT)));
}
/**
* Escape backslashes and quotes in the specified string.
*
* @param s the string to escape
* @return the escaped string
*/
public static String escape(String s) {
return s.replaceAll("(\\\\|\")", "\\\\$1");
}
}

View File

@ -42,7 +42,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.IdentityHashMap;
import java.util.List;
import java.util.Map;
@ -377,12 +376,12 @@ public class Setting<T> extends ToXContentToBytes {
if (exists(primary)) {
return get(primary);
}
if (fallbackSetting == null) {
return get(secondary);
}
if (exists(secondary)) {
return get(secondary);
}
if (fallbackSetting == null) {
return get(primary);
}
if (fallbackSetting.exists(primary)) {
return fallbackSetting.get(primary);
}

View File

@ -34,7 +34,9 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@ -257,12 +259,25 @@ public final class ThreadContext implements Closeable, Writeable {
}
/**
* Add the <em>unique</em> response {@code value} for the specified {@code key}.
* <p>
* Any duplicate {@code value} is ignored.
* Add the {@code value} for the specified {@code key} Any duplicate {@code value} is ignored.
*
* @param key the header name
* @param value the header value
*/
public void addResponseHeader(String key, String value) {
threadLocal.set(threadLocal.get().putResponse(key, value));
public void addResponseHeader(final String key, final String value) {
addResponseHeader(key, value, v -> v);
}
/**
* Add the {@code value} for the specified {@code key} with the specified {@code uniqueValue} used for de-duplication. Any duplicate
* {@code value} after applying {@code uniqueValue} is ignored.
*
* @param key the header name
* @param value the header value
* @param uniqueValue the function that produces de-duplication values
*/
public void addResponseHeader(final String key, final String value, final Function<String, String> uniqueValue) {
threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue));
}
/**
@ -396,14 +411,16 @@ public final class ThreadContext implements Closeable, Writeable {
return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders);
}
private ThreadContextStruct putResponse(String key, String value) {
private ThreadContextStruct putResponse(final String key, final String value, final Function<String, String> uniqueValue) {
assert value != null;
final Map<String, List<String>> newResponseHeaders = new HashMap<>(this.responseHeaders);
final List<String> existingValues = newResponseHeaders.get(key);
if (existingValues != null) {
if (existingValues.contains(value)) {
final Set<String> existingUniqueValues = existingValues.stream().map(uniqueValue).collect(Collectors.toSet());
assert existingValues.size() == existingUniqueValues.size();
if (existingUniqueValues.contains(uniqueValue.apply(value))) {
return this;
}

View File

@ -69,8 +69,9 @@ public final class MergeSchedulerConfig {
private volatile int maxMergeCount;
MergeSchedulerConfig(IndexSettings indexSettings) {
setMaxThreadAndMergeCount(indexSettings.getValue(MAX_THREAD_COUNT_SETTING),
indexSettings.getValue(MAX_MERGE_COUNT_SETTING));
int maxThread = indexSettings.getValue(MAX_THREAD_COUNT_SETTING);
int maxMerge = indexSettings.getValue(MAX_MERGE_COUNT_SETTING);
setMaxThreadAndMergeCount(maxThread, maxMerge);
this.autoThrottle = indexSettings.getValue(AUTO_THROTTLE_SETTING);
}

View File

@ -18,6 +18,9 @@
*/
package org.elasticsearch.index.analysis;
import java.io.Reader;
import java.util.regex.Pattern;
import org.apache.lucene.analysis.pattern.PatternReplaceCharFilter;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.regex.Regex;
@ -25,10 +28,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import java.io.Reader;
import java.util.regex.Pattern;
public class PatternReplaceCharFilterFactory extends AbstractCharFilterFactory {
public class PatternReplaceCharFilterFactory extends AbstractCharFilterFactory implements MultiTermAwareComponent {
private final Pattern pattern;
private final String replacement;
@ -56,4 +56,9 @@ public class PatternReplaceCharFilterFactory extends AbstractCharFilterFactory {
public Reader create(Reader tokenStream) {
return new PatternReplaceCharFilter(pattern, replacement, tokenStream);
}
@Override
public Object getMultiTermComponent() {
return this;
}
}

View File

@ -27,13 +27,18 @@ import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.GraphQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.util.QueryBuilder;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Nullable;
@ -49,7 +54,6 @@ import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.support.QueryParsers;
import java.io.IOException;
import java.util.List;
public class MatchQuery {
@ -318,17 +322,6 @@ public class MatchQuery {
public Query createPhrasePrefixQuery(String field, String queryText, int phraseSlop, int maxExpansions) {
final Query query = createFieldQuery(getAnalyzer(), Occur.MUST, field, queryText, true, phraseSlop);
if (query instanceof GraphQuery) {
// we have a graph query, convert inner queries to multi phrase prefix queries
List<Query> oldQueries = ((GraphQuery) query).getQueries();
Query[] queries = new Query[oldQueries.size()];
for (int i = 0; i < queries.length; i++) {
queries[i] = toMultiPhrasePrefix(oldQueries.get(i), phraseSlop, maxExpansions);
}
return new GraphQuery(queries);
}
return toMultiPhrasePrefix(query, phraseSlop, maxExpansions);
}
@ -340,6 +333,9 @@ public class MatchQuery {
boost *= bq.getBoost();
innerQuery = bq.getQuery();
}
if (query instanceof SpanQuery) {
return toSpanQueryPrefix((SpanQuery) query, boost);
}
final MultiPhrasePrefixQuery prefixQuery = new MultiPhrasePrefixQuery();
prefixQuery.setMaxExpansions(maxExpansions);
prefixQuery.setSlop(phraseSlop);
@ -369,6 +365,33 @@ public class MatchQuery {
return query;
}
private Query toSpanQueryPrefix(SpanQuery query, float boost) {
if (query instanceof SpanTermQuery) {
SpanMultiTermQueryWrapper<PrefixQuery> ret =
new SpanMultiTermQueryWrapper<>(new PrefixQuery(((SpanTermQuery) query).getTerm()));
return boost == 1 ? ret : new BoostQuery(ret, boost);
} else if (query instanceof SpanNearQuery) {
SpanNearQuery spanNearQuery = (SpanNearQuery) query;
SpanQuery[] clauses = spanNearQuery.getClauses();
if (clauses[clauses.length-1] instanceof SpanTermQuery) {
clauses[clauses.length-1] = new SpanMultiTermQueryWrapper<>(
new PrefixQuery(((SpanTermQuery) clauses[clauses.length-1]).getTerm())
);
}
SpanNearQuery newQuery = new SpanNearQuery(clauses, spanNearQuery.getSlop(), spanNearQuery.isInOrder());
return boost == 1 ? newQuery : new BoostQuery(newQuery, boost);
} else if (query instanceof SpanOrQuery) {
SpanOrQuery orQuery = (SpanOrQuery) query;
SpanQuery[] clauses = new SpanQuery[orQuery.getClauses().length];
for (int i = 0; i < clauses.length; i++) {
clauses[i] = (SpanQuery) toSpanQueryPrefix(orQuery.getClauses()[i], 1);
}
return boost == 1 ? new SpanOrQuery(clauses) : new BoostQuery(new SpanOrQuery(clauses), boost);
} else {
return query;
}
}
public Query createCommonTermsQuery(String field, String queryText, Occur highFreqOccur, Occur lowFreqOccur, float
maxTermFrequency, MappedFieldType fieldType) {
Query booleanQuery = createBooleanQuery(field, queryText, lowFreqOccur);

View File

@ -20,9 +20,11 @@ package org.elasticsearch.index.shard;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ContextPreservingActionListener;
import org.elasticsearch.action.support.ThreadedActionListener;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.Closeable;
@ -32,6 +34,7 @@ import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Supplier;
public class IndexShardOperationsLock implements Closeable {
private final ShardId shardId;
@ -126,11 +129,13 @@ public class IndexShardOperationsLock implements Closeable {
if (delayedOperations == null) {
delayedOperations = new ArrayList<>();
}
final Supplier<StoredContext> contextSupplier = threadPool.getThreadContext().newRestorableContext(false);
if (executorOnDelay != null) {
delayedOperations.add(
new ThreadedActionListener<>(logger, threadPool, executorOnDelay, onAcquired, forceExecution));
new ThreadedActionListener<>(logger, threadPool, executorOnDelay,
new ContextPreservingActionListener<>(contextSupplier, onAcquired), forceExecution));
} else {
delayedOperations.add(onAcquired);
delayedOperations.add(new ContextPreservingActionListener<>(contextSupplier, onAcquired));
}
return;
}

View File

@ -668,7 +668,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
public final long translogLocation;
public final int size;
Location(long generation, long translogLocation, int size) {
public Location(long generation, long translogLocation, int size) {
this.generation = generation;
this.translogLocation = translogLocation;
this.size = size;

View File

@ -748,7 +748,11 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
} catch (UnsupportedOperationException e) {
// If its a read-only repository, listing blobs by prefix may not be supported (e.g. a URL repository),
// in this case, try reading the latest index generation from the index.latest blob
return readSnapshotIndexLatestBlob();
try {
return readSnapshotIndexLatestBlob();
} catch (NoSuchFileException nsfe) {
return RepositoryData.EMPTY_REPO_GEN;
}
}
}

View File

@ -33,7 +33,6 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -121,6 +120,10 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
public B reduce(List<B> buckets, ReduceContext context) {
long docCount = 0;
// For the per term doc count error we add up the errors from the
// shards that did not respond with the term. To do this we add up
// the errors from the shards that did respond with the terms and
// subtract that from the sum of the error from all shards
long docCountError = 0;
List<InternalAggregations> aggregationsList = new ArrayList<>(buckets.size());
for (B bucket : buckets) {
@ -226,7 +229,15 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
if (terms.getBucketsInternal().size() < getShardSize() || InternalOrder.isTermOrder(order)) {
thisAggDocCountError = 0;
} else if (InternalOrder.isCountDesc(this.order)) {
thisAggDocCountError = terms.getBucketsInternal().get(terms.getBucketsInternal().size() - 1).docCount;
if (terms.getDocCountError() > 0) {
// If there is an existing docCountError for this agg then
// use this as the error for this aggregation
thisAggDocCountError = terms.getDocCountError();
} else {
// otherwise use the doc count of the last term in the
// aggregation
thisAggDocCountError = terms.getBucketsInternal().get(terms.getBucketsInternal().size() - 1).docCount;
}
} else {
thisAggDocCountError = -1;
}
@ -239,7 +250,14 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
}
setDocCountError(thisAggDocCountError);
for (B bucket : terms.getBucketsInternal()) {
bucket.docCountError = thisAggDocCountError;
// If there is already a doc count error for this bucket
// subtract this aggs doc count error from it to make the
// new value for the bucket. This then means that when the
// final error for the bucket is calculated below we account
// for the existing error calculated in a previous reduce.
// Note that if the error is unbounded (-1) this will be fixed
// later in this method.
bucket.docCountError -= thisAggDocCountError;
List<B> bucketList = buckets.get(bucket.getKey());
if (bucketList == null) {
bucketList = new ArrayList<>();
@ -253,12 +271,10 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
final BucketPriorityQueue<B> ordered = new BucketPriorityQueue<>(size, order.comparator(null));
for (List<B> sameTermBuckets : buckets.values()) {
final B b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext);
if (b.docCountError != -1) {
if (sumDocCountError == -1) {
b.docCountError = -1;
} else {
b.docCountError = sumDocCountError - b.docCountError;
}
if (sumDocCountError == -1) {
b.docCountError = -1;
} else {
b.docCountError += sumDocCountError;
}
if (b.docCount >= minDocCount || reduceContext.isFinalReduce() == false) {
B removed = ordered.insertWithOverflow(b);

View File

@ -35,6 +35,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
public class InternalScriptedMetric extends InternalAggregation implements ScriptedMetric {
private final Script reduceScript;
@ -126,4 +127,16 @@ public class InternalScriptedMetric extends InternalAggregation implements Scrip
return builder.field("value", aggregation());
}
@Override
protected boolean doEquals(Object obj) {
InternalScriptedMetric other = (InternalScriptedMetric) obj;
return Objects.equals(reduceScript, other.reduceScript) &&
Objects.equals(aggregation, other.aggregation);
}
@Override
protected int doHashCode() {
return Objects.hash(reduceScript, aggregation);
}
}

View File

@ -190,7 +190,7 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder
if (initScript != null) {
executableInitScript = queryShardContext.getLazyExecutableScript(initScript, ScriptContext.Standard.AGGS);
} else {
executableInitScript = (p) -> null;;
executableInitScript = (p) -> null;
}
Function<Map<String, Object>, SearchScript> searchMapScript = queryShardContext.getLazySearchScript(mapScript,
ScriptContext.Standard.AGGS);

View File

@ -119,7 +119,7 @@ public class InternalTopHits extends InternalAggregation implements TopHits {
shardDocs[i] = topHitsAgg.topDocs;
shardHits[i] = topHitsAgg.searchHits;
}
reducedTopDocs = TopDocs.merge(sort, from, size, (TopFieldDocs[]) shardDocs);
reducedTopDocs = TopDocs.merge(sort, from, size, (TopFieldDocs[]) shardDocs, true);
} else {
shardDocs = new TopDocs[aggregations.size()];
for (int i = 0; i < shardDocs.length; i++) {
@ -127,7 +127,7 @@ public class InternalTopHits extends InternalAggregation implements TopHits {
shardDocs[i] = topHitsAgg.topDocs;
shardHits[i] = topHitsAgg.searchHits;
}
reducedTopDocs = TopDocs.merge(from, size, shardDocs);
reducedTopDocs = TopDocs.merge(from, size, shardDocs, true);
}
final int[] tracker = new int[shardHits.length];

View File

@ -38,11 +38,11 @@ import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.collapse.CollapseBuilder;
import org.elasticsearch.search.SearchExtBuilder;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
import org.elasticsearch.search.collapse.CollapseBuilder;
import org.elasticsearch.search.fetch.StoredFieldsContext;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
@ -314,6 +314,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
* From index to start the search from. Defaults to <tt>0</tt>.
*/
public SearchSourceBuilder from(int from) {
if (from < 0) {
throw new IllegalArgumentException("[from] parameter cannot be negative");
}
this.from = from;
return this;
}

View File

@ -70,7 +70,7 @@ final class ProfileScorer extends Scorer {
}
@Override
public Collection<ChildScorer> getChildren() {
public Collection<ChildScorer> getChildren() throws IOException {
return scorer.getChildren();
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.search.suggest;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput;
@ -48,7 +49,6 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
@ -384,13 +384,14 @@ public class Suggest implements Iterable<Suggest.Suggestion<? extends Entry<? ex
return builder;
}
@SuppressWarnings("unchecked")
public static Suggestion<? extends Entry<? extends Option>> fromXContent(XContentParser parser) throws IOException {
ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation);
String typeAndName = parser.currentName();
// we need to extract the type prefix from the name and throw error if it is not present
int delimiterPos = typeAndName.indexOf(InternalAggregation.TYPED_KEYS_DELIMITER);
String type = null;
String name = null;
String type;
String name;
if (delimiterPos > 0) {
type = typeAndName.substring(0, delimiterPos);
name = typeAndName.substring(delimiterPos + 1);
@ -399,35 +400,17 @@ public class Suggest implements Iterable<Suggest.Suggestion<? extends Entry<? ex
"Cannot parse suggestion response without type information. Set [" + RestSearchAction.TYPED_KEYS_PARAM
+ "] parameter on the request to ensure the type information is added to the response output");
}
Suggestion suggestion = null;
Function<XContentParser, Entry> entryParser = null;
// the "size" parameter and the SortBy for TermSuggestion cannot be parsed from the response, use default values
// TODO investigate if we can use NamedXContentRegistry instead of this switch
switch (type) {
case Suggestion.NAME:
suggestion = new Suggestion(name, -1);
entryParser = Suggestion.Entry::fromXContent;
break;
case PhraseSuggestion.NAME:
suggestion = new PhraseSuggestion(name, -1);
entryParser = PhraseSuggestion.Entry::fromXContent;
break;
case TermSuggestion.NAME:
suggestion = new TermSuggestion(name, -1, SortBy.SCORE);
entryParser = TermSuggestion.Entry::fromXContent;
break;
case CompletionSuggestion.NAME:
suggestion = new CompletionSuggestion(name, -1);
entryParser = CompletionSuggestion.Entry::fromXContent;
break;
default:
throw new ParsingException(parser.getTokenLocation(), "Unknown suggestion type [{}]", type);
}
return parser.namedObject(Suggestion.class, type, name);
}
protected static <E extends Suggestion.Entry<?>> void parseEntries(XContentParser parser, Suggestion<E> suggestion,
CheckedFunction<XContentParser, E, IOException> entryParser)
throws IOException {
ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser::getTokenLocation);
while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) {
suggestion.addTerm(entryParser.apply(parser));
}
return suggestion;
}
/**
@ -584,6 +567,7 @@ public class Suggest implements Iterable<Suggest.Suggestion<? extends Entry<? ex
}
}
@SuppressWarnings("unchecked")
protected O newOption(){
return (O) new Option();
}

View File

@ -184,8 +184,10 @@ public class CompletionSuggester extends Suggester<CompletionSuggestionContext>
private final SuggestDocPriorityQueue pq;
private final Map<Integer, SuggestDoc> scoreDocMap;
// TODO: expose dup removal
TopDocumentsCollector(int num) {
super(1); // TODO hack, we don't use the underlying pq, so we allocate a size of 1
super(1, false); // TODO hack, we don't use the underlying pq, so we allocate a size of 1
this.num = num;
this.scoreDocMap = new LinkedHashMap<>(num);
this.pq = new SuggestDocPriorityQueue(num);

View File

@ -94,6 +94,12 @@ public final class CompletionSuggestion extends Suggest.Suggestion<CompletionSug
return getOptions().size() > 0;
}
public static CompletionSuggestion fromXContent(XContentParser parser, String name) throws IOException {
CompletionSuggestion suggestion = new CompletionSuggestion(name, -1);
parseEntries(parser, suggestion, CompletionSuggestion.Entry::fromXContent);
return suggestion;
}
private static final class OptionPriorityQueue extends org.apache.lucene.util.PriorityQueue<Entry.Option> {
private final Comparator<Suggest.Suggestion.Entry.Option> comparator;

View File

@ -60,10 +60,13 @@ public class PhraseSuggestion extends Suggest.Suggestion<PhraseSuggestion.Entry>
return new Entry();
}
public static PhraseSuggestion fromXContent(XContentParser parser, String name) throws IOException {
PhraseSuggestion suggestion = new PhraseSuggestion(name, -1);
parseEntries(parser, suggestion, PhraseSuggestion.Entry::fromXContent);
return suggestion;
}
public static class Entry extends Suggestion.Entry<Suggestion.Entry.Option> {
static class Fields {
static final String CUTOFF_SCORE = "cutoff_score";
}
protected double cutoffScore = Double.MIN_VALUE;

View File

@ -132,6 +132,13 @@ public class TermSuggestion extends Suggestion<TermSuggestion.Entry> {
sort.writeTo(out);
}
public static TermSuggestion fromXContent(XContentParser parser, String name) throws IOException {
// the "size" parameter and the SortBy for TermSuggestion cannot be parsed from the response, use default values
TermSuggestion suggestion = new TermSuggestion(name, -1, SortBy.SCORE);
parseEntries(parser, suggestion, TermSuggestion.Entry::fromXContent);
return suggestion;
}
@Override
protected Entry newEntry() {
return new Entry();
@ -140,8 +147,7 @@ public class TermSuggestion extends Suggestion<TermSuggestion.Entry> {
/**
* Represents a part from the suggest text with suggested options.
*/
public static class Entry extends
org.elasticsearch.search.suggest.Suggest.Suggestion.Entry<TermSuggestion.Entry.Option> {
public static class Entry extends org.elasticsearch.search.suggest.Suggest.Suggestion.Entry<TermSuggestion.Entry.Option> {
public Entry(Text text, int offset, int length) {
super(text, offset, length);
@ -235,7 +241,7 @@ public class TermSuggestion extends Suggestion<TermSuggestion.Entry> {
PARSER.declareFloat(constructorArg(), Suggestion.Entry.Option.SCORE);
}
public static final Option fromXContent(XContentParser parser) {
public static Option fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
}

View File

@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.1.jar}" {
//// Very special jar permissions:
//// These are dangerous permissions that we don't want to grant to everything.
grant codeBase "${codebase.lucene-core-6.5.0-snapshot-f919485.jar}" {
grant codeBase "${codebase.lucene-core-6.5.0-snapshot-d00c5ca.jar}" {
// needed to allow MMapDirectory's "unmap hack" (die unmap hack, die)
// java 8 package
permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";
@ -42,7 +42,7 @@ grant codeBase "${codebase.lucene-core-6.5.0-snapshot-f919485.jar}" {
permission java.lang.RuntimePermission "accessDeclaredMembers";
};
grant codeBase "${codebase.lucene-misc-6.5.0-snapshot-f919485.jar}" {
grant codeBase "${codebase.lucene-misc-6.5.0-snapshot-d00c5ca.jar}" {
// needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper
permission java.nio.file.LinkPermission "hard";
};

View File

@ -33,7 +33,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" {
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
};
grant codeBase "${codebase.lucene-test-framework-6.5.0-snapshot-f919485.jar}" {
grant codeBase "${codebase.lucene-test-framework-6.5.0-snapshot-d00c5ca.jar}" {
// needed by RamUsageTester
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
// needed for testing hardlinks in StoreRecoveryTests since we install MockFS

View File

@ -0,0 +1,493 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.bulk;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
import org.elasticsearch.action.support.replication.ReplicationOperation;
import org.elasticsearch.action.update.UpdateHelper;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardTestCase;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.action.bulk.TransportShardBulkAction;
import java.io.IOException;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.Matchers.containsString;
public class TransportShardBulkActionTests extends IndexShardTestCase {
private final ShardId shardId = new ShardId("index", "_na_", 0);
private final Settings idxSettings = Settings.builder()
.put("index.number_of_shards", 1)
.put("index.number_of_replicas", 0)
.put("index.version.created", Version.CURRENT.id)
.build();
private IndexMetaData indexMetaData() throws IOException {
return IndexMetaData.builder("index")
.putMapping("type", "{\"properties\": {\"foo\": {\"type\": \"text\"}}}")
.settings(idxSettings)
.primaryTerm(0, 1).build();
}
public void testShouldExecuteReplicaItem() throws Exception {
// Successful index request should be replicated
DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar");
DocWriteResponse response = new IndexResponse(shardId, "type", "id", 1, 1, randomBoolean());
BulkItemRequest request = new BulkItemRequest(0, writeRequest);
request.setPrimaryResponse(new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, response));
assertTrue(TransportShardBulkAction.shouldExecuteReplicaItem(request, 0));
// Failed index requests should not be replicated (for now!)
writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar");
response = new IndexResponse(shardId, "type", "id", 1, 1, randomBoolean());
request = new BulkItemRequest(0, writeRequest);
request.setPrimaryResponse(
new BulkItemResponse(0, DocWriteRequest.OpType.INDEX,
new BulkItemResponse.Failure("test", "type", "id", new IllegalArgumentException("i died"))));
assertFalse(TransportShardBulkAction.shouldExecuteReplicaItem(request, 0));
// NOOP requests should not be replicated
writeRequest = new UpdateRequest("index", "type", "id");
response = new UpdateResponse(shardId, "type", "id", 1, DocWriteResponse.Result.NOOP);
request = new BulkItemRequest(0, writeRequest);
request.setPrimaryResponse(new BulkItemResponse(0, DocWriteRequest.OpType.UPDATE, response));
assertFalse(TransportShardBulkAction.shouldExecuteReplicaItem(request, 0));
}
public void testExecuteBulkIndexRequest() throws Exception {
IndexMetaData metaData = indexMetaData();
IndexShard shard = newStartedShard(true);
BulkItemRequest[] items = new BulkItemRequest[1];
boolean create = randomBoolean();
DocWriteRequest writeRequest = new IndexRequest("index", "type", "id")
.source(Requests.INDEX_CONTENT_TYPE, "foo", "bar")
.create(create);
BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest);
items[0] = primaryRequest;
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
Translog.Location location = new Translog.Location(0, 0, 0);
UpdateHelper updateHelper = null;
Translog.Location newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest,
location, 0, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer());
// Translog should change, since there were no problems
assertThat(newLocation, not(location));
BulkItemResponse primaryResponse = bulkShardRequest.items()[0].getPrimaryResponse();
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(create ? DocWriteRequest.OpType.CREATE : DocWriteRequest.OpType.INDEX));
assertFalse(primaryResponse.isFailed());
// Assert that the document actually made it there
assertDocCount(shard, 1);
writeRequest = new IndexRequest("index", "type", "id")
.source(Requests.INDEX_CONTENT_TYPE, "foo", "bar")
.create(true);
primaryRequest = new BulkItemRequest(0, writeRequest);
items[0] = primaryRequest;
bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
Translog.Location secondLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest,
newLocation, 0, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer());
// Translog should not change, since the document was not indexed due to a version conflict
assertThat(secondLocation, equalTo(newLocation));
BulkItemRequest replicaRequest = bulkShardRequest.items()[0];
primaryResponse = bulkShardRequest.items()[0].getPrimaryResponse();
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.CREATE));
// Should be failed since the document already exists
assertTrue(primaryResponse.isFailed());
BulkItemResponse.Failure failure = primaryResponse.getFailure();
assertThat(failure.getIndex(), equalTo("index"));
assertThat(failure.getType(), equalTo("type"));
assertThat(failure.getId(), equalTo("id"));
assertThat(failure.getCause().getClass(), equalTo(VersionConflictEngineException.class));
assertThat(failure.getCause().getMessage(),
containsString("version conflict, document already exists (current version [1])"));
assertThat(failure.getStatus(), equalTo(RestStatus.CONFLICT));
assertThat(replicaRequest, equalTo(primaryRequest));
// Assert that the document count is still 1
assertDocCount(shard, 1);
closeShards(shard);
}
public void testExecuteBulkIndexRequestWithRejection() throws Exception {
IndexMetaData metaData = indexMetaData();
IndexShard shard = newStartedShard(true);
BulkItemRequest[] items = new BulkItemRequest[1];
DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar");
items[0] = new BulkItemRequest(0, writeRequest);
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
Translog.Location location = new Translog.Location(0, 0, 0);
UpdateHelper updateHelper = null;
// Pretend the mappings haven't made it to the node yet, and throw a rejection
Exception err = new ReplicationOperation.RetryOnPrimaryException(shardId, "rejection");
try {
TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, location,
0, updateHelper, threadPool::absoluteTimeInMillis, new ThrowingMappingUpdatePerformer(err));
fail("should have thrown a retry exception");
} catch (ReplicationOperation.RetryOnPrimaryException e) {
assertThat(e, equalTo(err));
}
closeShards(shard);
}
public void testExecuteBulkIndexRequestWithConflictingMappings() throws Exception {
IndexMetaData metaData = indexMetaData();
IndexShard shard = newStartedShard(true);
BulkItemRequest[] items = new BulkItemRequest[1];
DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar");
items[0] = new BulkItemRequest(0, writeRequest);
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
Translog.Location location = new Translog.Location(0, 0, 0);
UpdateHelper updateHelper = null;
// Return a mapping conflict (IAE) when trying to update the mapping
Exception err = new IllegalArgumentException("mapping conflict");
Translog.Location newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest,
location, 0, updateHelper, threadPool::absoluteTimeInMillis, new FailingMappingUpdatePerformer(err));
// Translog shouldn't change, as there were conflicting mappings
assertThat(newLocation, equalTo(location));
BulkItemResponse primaryResponse = bulkShardRequest.items()[0].getPrimaryResponse();
// Since this was not a conflict failure, the primary response
// should be filled out with the failure information
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.INDEX));
assertTrue(primaryResponse.isFailed());
assertThat(primaryResponse.getFailureMessage(), containsString("mapping conflict"));
BulkItemResponse.Failure failure = primaryResponse.getFailure();
assertThat(failure.getIndex(), equalTo("index"));
assertThat(failure.getType(), equalTo("type"));
assertThat(failure.getId(), equalTo("id"));
assertThat(failure.getCause(), equalTo(err));
assertThat(failure.getStatus(), equalTo(RestStatus.BAD_REQUEST));
closeShards(shard);
}
public void testExecuteBulkDeleteRequest() throws Exception {
IndexMetaData metaData = indexMetaData();
IndexShard shard = newStartedShard(true);
BulkItemRequest[] items = new BulkItemRequest[1];
DocWriteRequest writeRequest = new DeleteRequest("index", "type", "id");
items[0] = new BulkItemRequest(0, writeRequest);
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
Translog.Location location = new Translog.Location(0, 0, 0);
UpdateHelper updateHelper = null;
Translog.Location newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest,
location, 0, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer());
// Translog changes, even though the document didn't exist
assertThat(newLocation, not(location));
BulkItemRequest replicaRequest = bulkShardRequest.items()[0];
DocWriteRequest replicaDeleteRequest = replicaRequest.request();
BulkItemResponse primaryResponse = replicaRequest.getPrimaryResponse();
DeleteResponse response = primaryResponse.getResponse();
// Any version can be matched on replica
assertThat(replicaDeleteRequest.version(), equalTo(Versions.MATCH_ANY));
assertThat(replicaDeleteRequest.versionType(), equalTo(VersionType.INTERNAL));
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.DELETE));
assertFalse(primaryResponse.isFailed());
assertThat(response.getResult(), equalTo(DocWriteResponse.Result.NOT_FOUND));
assertThat(response.getShardId(), equalTo(shard.shardId()));
assertThat(response.getIndex(), equalTo("index"));
assertThat(response.getType(), equalTo("type"));
assertThat(response.getId(), equalTo("id"));
assertThat(response.getVersion(), equalTo(1L));
assertThat(response.getSeqNo(), equalTo(0L));
assertThat(response.forcedRefresh(), equalTo(false));
// Now do the same after indexing the document, it should now find and delete the document
indexDoc(shard, "type", "id", "{\"foo\": \"bar\"}");
writeRequest = new DeleteRequest("index", "type", "id");
items[0] = new BulkItemRequest(0, writeRequest);
bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
location = newLocation;
newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest,
location, 0, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer());
// Translog changes, because the document was deleted
assertThat(newLocation, not(location));
replicaRequest = bulkShardRequest.items()[0];
replicaDeleteRequest = replicaRequest.request();
primaryResponse = replicaRequest.getPrimaryResponse();
response = primaryResponse.getResponse();
// Any version can be matched on replica
assertThat(replicaDeleteRequest.version(), equalTo(Versions.MATCH_ANY));
assertThat(replicaDeleteRequest.versionType(), equalTo(VersionType.INTERNAL));
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.DELETE));
assertFalse(primaryResponse.isFailed());
assertThat(response.getResult(), equalTo(DocWriteResponse.Result.DELETED));
assertThat(response.getShardId(), equalTo(shard.shardId()));
assertThat(response.getIndex(), equalTo("index"));
assertThat(response.getType(), equalTo("type"));
assertThat(response.getId(), equalTo("id"));
assertThat(response.getVersion(), equalTo(3L));
assertThat(response.getSeqNo(), equalTo(2L));
assertThat(response.forcedRefresh(), equalTo(false));
assertDocCount(shard, 0);
closeShards(shard);
}
public void testNoopUpdateReplicaRequest() throws Exception {
DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value");
BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest);
DocWriteResponse noopUpdateResponse = new UpdateResponse(shardId, "index", "id", 0, DocWriteResponse.Result.NOOP);
BulkItemResultHolder noopResults = new BulkItemResultHolder(noopUpdateResponse, null, replicaRequest);
Translog.Location location = new Translog.Location(0, 0, 0);
BulkItemRequest[] items = new BulkItemRequest[0];
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
Translog.Location newLocation = TransportShardBulkAction.updateReplicaRequest(noopResults,
DocWriteRequest.OpType.UPDATE, location, bulkShardRequest);
BulkItemResponse primaryResponse = replicaRequest.getPrimaryResponse();
// Basically nothing changes in the request since it's a noop
assertThat(newLocation, equalTo(location));
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.UPDATE));
assertThat(primaryResponse.getResponse(), equalTo(noopUpdateResponse));
assertThat(primaryResponse.getResponse().getResult(), equalTo(DocWriteResponse.Result.NOOP));
}
public void testUpdateReplicaRequestWithFailure() throws Exception {
DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value");
BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest);
Exception err = new ElasticsearchException("I'm dead <(x.x)>");
Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0);
BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult, replicaRequest);
Translog.Location location = new Translog.Location(0, 0, 0);
BulkItemRequest[] items = new BulkItemRequest[0];
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
Translog.Location newLocation = TransportShardBulkAction.updateReplicaRequest(failedResults,
DocWriteRequest.OpType.UPDATE, location, bulkShardRequest);
BulkItemResponse primaryResponse = replicaRequest.getPrimaryResponse();
// Since this was not a conflict failure, the primary response
// should be filled out with the failure information
assertThat(newLocation, equalTo(location));
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.INDEX));
assertTrue(primaryResponse.isFailed());
assertThat(primaryResponse.getFailureMessage(), containsString("I'm dead <(x.x)>"));
BulkItemResponse.Failure failure = primaryResponse.getFailure();
assertThat(failure.getIndex(), equalTo("index"));
assertThat(failure.getType(), equalTo("type"));
assertThat(failure.getId(), equalTo("id"));
assertThat(failure.getCause(), equalTo(err));
assertThat(failure.getStatus(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
}
public void testUpdateReplicaRequestWithConflictFailure() throws Exception {
DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value");
BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest);
Exception err = new VersionConflictEngineException(shardId, "type", "id", "I'm conflicted <(;_;)>");
Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0);
BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult, replicaRequest);
Translog.Location location = new Translog.Location(0, 0, 0);
BulkItemRequest[] items = new BulkItemRequest[0];
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
Translog.Location newLocation = TransportShardBulkAction.updateReplicaRequest(failedResults,
DocWriteRequest.OpType.UPDATE, location, bulkShardRequest);
BulkItemResponse primaryResponse = replicaRequest.getPrimaryResponse();
// Since this was not a conflict failure, the primary response
// should be filled out with the failure information
assertThat(newLocation, equalTo(location));
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.INDEX));
assertTrue(primaryResponse.isFailed());
assertThat(primaryResponse.getFailureMessage(), containsString("I'm conflicted <(;_;)>"));
BulkItemResponse.Failure failure = primaryResponse.getFailure();
assertThat(failure.getIndex(), equalTo("index"));
assertThat(failure.getType(), equalTo("type"));
assertThat(failure.getId(), equalTo("id"));
assertThat(failure.getCause(), equalTo(err));
assertThat(failure.getStatus(), equalTo(RestStatus.CONFLICT));
}
public void testUpdateReplicaRequestWithSuccess() throws Exception {
DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value");
BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest);
boolean created = randomBoolean();
Translog.Location resultLocation = new Translog.Location(42, 42, 42);
Engine.IndexResult indexResult = new FakeResult(1, 1, created, resultLocation);
DocWriteResponse indexResponse = new IndexResponse(shardId, "index", "id", 1, 1, created);
BulkItemResultHolder goodResults = new BulkItemResultHolder(indexResponse, indexResult, replicaRequest);
Translog.Location originalLocation = new Translog.Location(21, 21, 21);
BulkItemRequest[] items = new BulkItemRequest[0];
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items);
Translog.Location newLocation = TransportShardBulkAction.updateReplicaRequest(goodResults,
DocWriteRequest.OpType.INDEX, originalLocation, bulkShardRequest);
BulkItemResponse primaryResponse = replicaRequest.getPrimaryResponse();
// Check that the translog is successfully advanced
assertThat(newLocation, equalTo(resultLocation));
// Since this was not a conflict failure, the primary response
// should be filled out with the failure information
assertThat(primaryResponse.getItemId(), equalTo(0));
assertThat(primaryResponse.getId(), equalTo("id"));
assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.INDEX));
DocWriteResponse response = primaryResponse.getResponse();
assertThat(response.status(), equalTo(created ? RestStatus.CREATED : RestStatus.OK));
}
/**
* Fake IndexResult that has a settable translog location
*/
private static class FakeResult extends Engine.IndexResult {
private final Translog.Location location;
protected FakeResult(long version, long seqNo, boolean created, Translog.Location location) {
super(version, seqNo, created);
this.location = location;
}
@Override
public Translog.Location getTranslogLocation() {
return this.location;
}
}
/** Doesn't perform any mapping updates */
public static class NoopMappingUpdatePerformer implements MappingUpdatePerformer {
public MappingUpdatePerformer.MappingUpdateResult updateMappingsIfNeeded(IndexShard primary,
IndexRequest request) throws Exception {
Engine.Index operation = TransportShardBulkAction.prepareIndexOperationOnPrimary(request, primary);
return new MappingUpdatePerformer.MappingUpdateResult(operation);
}
}
/** Always returns the given failure */
private class FailingMappingUpdatePerformer implements MappingUpdatePerformer {
private final Exception e;
FailingMappingUpdatePerformer(Exception e) {
this.e = e;
}
public MappingUpdatePerformer.MappingUpdateResult updateMappingsIfNeeded(IndexShard primary,
IndexRequest request) throws Exception {
return new MappingUpdatePerformer.MappingUpdateResult(e);
}
}
/** Always throw the given exception */
private class ThrowingMappingUpdatePerformer implements MappingUpdatePerformer {
private final Exception e;
ThrowingMappingUpdatePerformer(Exception e) {
this.e = e;
}
public MappingUpdatePerformer.MappingUpdateResult updateMappingsIfNeeded(IndexShard primary,
IndexRequest request) throws Exception {
throw e;
}
}
}

View File

@ -35,6 +35,7 @@ import java.io.IOException;
import java.util.Date;
import static org.elasticsearch.common.xcontent.XContentHelper.toXContent;
import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
public class MainResponseTests extends ESTestCase {
@ -89,4 +90,44 @@ public class MainResponseTests extends ESTestCase {
+ "}", builder.string());
}
public void testEqualsAndHashcode() {
MainResponse original = createTestItem();
checkEqualsAndHashCode(original, MainResponseTests::copy, MainResponseTests::mutate);
}
private static MainResponse copy(MainResponse o) {
return new MainResponse(o.getNodeName(), o.getVersion(), o.getClusterName(), o.getClusterUuid(), o.getBuild(), o.isAvailable());
}
private static MainResponse mutate(MainResponse o) {
String clusterUuid = o.getClusterUuid();
boolean available = o.isAvailable();
Build build = o.getBuild();
Version version = o.getVersion();
String nodeName = o.getNodeName();
ClusterName clusterName = o.getClusterName();
switch (randomIntBetween(0, 5)) {
case 0:
clusterUuid = clusterUuid + randomAsciiOfLength(5);
break;
case 1:
nodeName = nodeName + randomAsciiOfLength(5);
break;
case 2:
available = !available;
break;
case 3:
// toggle the snapshot flag of the original Build parameter
build = new Build(build.shortHash(), build.date(), !build.isSnapshot());
break;
case 4:
version = randomValueOtherThan(version, () -> VersionUtils.randomVersion(random()));
break;
case 5:
clusterName = new ClusterName(clusterName + randomAsciiOfLength(5));
break;
}
return new MainResponse(nodeName, version, clusterName, clusterUuid, build, available);
}
}

View File

@ -21,6 +21,7 @@ package org.elasticsearch.action.search;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.util.BigArrays;
@ -347,4 +348,31 @@ public class SearchPhaseControllerTests extends ESTestCase {
}
}
}
public void testFillTopDocs() {
final int maxIters = randomIntBetween(5, 15);
for (int iters = 0; iters < maxIters; iters++) {
TopDocs[] topDocs = new TopDocs[randomIntBetween(2, 100)];
int numShards = topDocs.length;
AtomicArray<QuerySearchResultProvider> resultProviderAtomicArray = generateQueryResults(numShards, Collections.emptyList(),
2, randomBoolean());
if (randomBoolean()) {
int maxNull = randomIntBetween(1, topDocs.length - 1);
for (int i = 0; i < maxNull; i++) {
resultProviderAtomicArray.set(randomIntBetween(0, resultProviderAtomicArray.length() - 1), null);
}
}
SearchPhaseController.fillTopDocs(topDocs, resultProviderAtomicArray.asList(), Lucene.EMPTY_TOP_DOCS);
for (int i = 0; i < topDocs.length; i++) {
assertNotNull(topDocs[i]);
if (topDocs[i] == Lucene.EMPTY_TOP_DOCS) {
assertNull(resultProviderAtomicArray.get(i));
} else {
assertNotNull(resultProviderAtomicArray.get(i));
assertNotNull(resultProviderAtomicArray.get(i).queryResult());
assertSame(resultProviderAtomicArray.get(i).queryResult().topDocs(), topDocs[i]);
}
}
}
}
}

View File

@ -52,6 +52,8 @@ public class TerminalTests extends ESTestCase {
assertTrue(terminal.promptYesNo("Answer?", true));
terminal.addTextInput("");
assertFalse(terminal.promptYesNo("Answer?", false));
terminal.addTextInput(null);
assertFalse(terminal.promptYesNo("Answer?", false));
}
public void testPromptYesNoReprompt() throws Exception {

View File

@ -24,16 +24,15 @@ import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.transport.MockTransportClient;
import org.elasticsearch.transport.TransportService;
import org.hamcrest.Matchers;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collector;
import java.util.stream.Collectors;
import static org.elasticsearch.client.transport.TransportClient.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL;
public class NodeDisconnectIT extends ESIntegTestCase {
public void testNotifyOnDisconnect() throws IOException {
@ -41,7 +40,10 @@ public class NodeDisconnectIT extends ESIntegTestCase {
final Set<DiscoveryNode> disconnectedNodes = Collections.synchronizedSet(new HashSet<>());
try (TransportClient client = new MockTransportClient(Settings.builder()
.put("cluster.name", internalCluster().getClusterName()).build(), Collections.emptySet(), (n, e) -> disconnectedNodes.add(n))) {
.put("cluster.name", internalCluster().getClusterName())
.put(CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.getKey(), "1h") // disable sniffing for better control
.build(),
Collections.emptySet(), (n, e) -> disconnectedNodes.add(n))) {
for (TransportService service : internalCluster().getInstances(TransportService.class)) {
client.addTransportAddress(service.boundAddress().publishAddress());
}

View File

@ -56,11 +56,12 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
public class FilterAllocationDeciderTests extends ESAllocationTestCase {
public void testFilterInitialRecovery() {
FilterAllocationDecider filterAllocationDecider = new FilterAllocationDecider(Settings.EMPTY,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
FilterAllocationDecider filterAllocationDecider = new FilterAllocationDecider(Settings.EMPTY, clusterSettings);
AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY,
Arrays.asList(filterAllocationDecider,
new SameShardAllocationDecider(Settings.EMPTY), new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY)));
new SameShardAllocationDecider(Settings.EMPTY, clusterSettings),
new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY)));
AllocationService service = new AllocationService(Settings.builder().build(), allocationDeciders,
new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
ClusterState state = createInitialClusterState(service, Settings.builder().put("index.routing.allocation.initial_recovery._id",

View File

@ -37,6 +37,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.gateway.TestGatewayAllocator;
import org.hamcrest.Matchers;
@ -58,8 +59,9 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase {
public void testRandomDecisions() {
RandomAllocationDecider randomAllocationDecider = new RandomAllocationDecider(random());
AllocationService strategy = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,
new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY), new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY),
randomAllocationDecider))), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY),
randomAllocationDecider))), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
int indices = scaledRandomIntBetween(1, 20);
Builder metaBuilder = MetaData.builder();
int maxNumReplicas = 1;

View File

@ -40,6 +40,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
@ -98,7 +99,8 @@ public class SameShardRoutingTests extends ESAllocationTestCase {
}
public void testForceAllocatePrimaryOnSameNodeNotAllowed() {
SameShardAllocationDecider decider = new SameShardAllocationDecider(Settings.EMPTY);
SameShardAllocationDecider decider = new SameShardAllocationDecider(
Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomIntBetween(2, 4), 1);
Index index = clusterState.getMetaData().index("idx").getIndex();
ShardRouting primaryShard = clusterState.routingTable().index(index).shard(0).primaryShard();

View File

@ -92,9 +92,10 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
new HashSet<>(Arrays.asList(
new SameShardAllocationDecider(Settings.EMPTY),
new SameShardAllocationDecider(Settings.EMPTY, clusterSettings),
makeDecider(diskSettings))));
ClusterInfoService cis = new ClusterInfoService() {
@ -187,7 +188,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
deciders = new AllocationDeciders(Settings.EMPTY,
new HashSet<>(Arrays.asList(
new SameShardAllocationDecider(Settings.EMPTY),
new SameShardAllocationDecider(Settings.EMPTY, clusterSettings),
makeDecider(diskSettings))));
strategy = new AllocationService(Settings.builder()
@ -217,7 +218,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
deciders = new AllocationDeciders(Settings.EMPTY,
new HashSet<>(Arrays.asList(
new SameShardAllocationDecider(Settings.EMPTY),
new SameShardAllocationDecider(Settings.EMPTY, clusterSettings),
makeDecider(diskSettings))));
strategy = new AllocationService(Settings.builder()
@ -279,9 +280,10 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
new HashSet<>(Arrays.asList(
new SameShardAllocationDecider(Settings.EMPTY),
new SameShardAllocationDecider(Settings.EMPTY, clusterSettings),
makeDecider(diskSettings))));
ClusterInfoService cis = new ClusterInfoService() {
@ -414,7 +416,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
deciders = new AllocationDeciders(Settings.EMPTY,
new HashSet<>(Arrays.asList(
new SameShardAllocationDecider(Settings.EMPTY),
new SameShardAllocationDecider(Settings.EMPTY, clusterSettings),
makeDecider(diskSettings))));
strategy = new AllocationService(Settings.builder()
@ -444,7 +446,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
deciders = new AllocationDeciders(Settings.EMPTY,
new HashSet<>(Arrays.asList(
new SameShardAllocationDecider(Settings.EMPTY),
new SameShardAllocationDecider(Settings.EMPTY, clusterSettings),
makeDecider(diskSettings))));
strategy = new AllocationService(Settings.builder()
@ -535,7 +537,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
new HashSet<>(Arrays.asList(
new SameShardAllocationDecider(Settings.EMPTY),
new SameShardAllocationDecider(
Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
),
makeDecider(diskSettings))));
ClusterInfoService cis = new ClusterInfoService() {
@ -605,7 +609,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
new HashSet<>(Arrays.asList(
new SameShardAllocationDecider(Settings.EMPTY),
new SameShardAllocationDecider(
Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
),
makeDecider(diskSettings))));
ClusterInfoService cis = new ClusterInfoService() {
@ -710,7 +716,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
DiskThresholdDecider decider = makeDecider(diskSettings);
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY), decider)));
new HashSet<>(Arrays.asList(new SameShardAllocationDecider(
Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
), decider)));
ClusterInfoService cis = new ClusterInfoService() {
@Override
@ -913,7 +921,10 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
}
};
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(
new SameShardAllocationDecider(Settings.EMPTY), diskThresholdDecider
new SameShardAllocationDecider(
Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
),
diskThresholdDecider
)));
AllocationService strategy = new AllocationService(Settings.builder()
.put("cluster.routing.allocation.node_concurrent_recoveries", 10)
@ -1011,7 +1022,10 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
};
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(
new SameShardAllocationDecider(Settings.EMPTY), diskThresholdDecider
new SameShardAllocationDecider(
Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
),
diskThresholdDecider
)));
AllocationService strategy = new AllocationService(Settings.builder()

View File

@ -1,77 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESIntegTestCase;
import java.util.Set;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
/**
* Simple integration for {@link EnableAllocationDecider} there is a more exhaustive unittest in
* {@link EnableAllocationTests} this test is meant to check if the actual update of the settings
* works as expected.
*/
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
public class EnableAllocationDeciderIT extends ESIntegTestCase {
public void testEnableRebalance() throws InterruptedException {
final String firstNode = internalCluster().startNode();
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)).get();
// we test with 2 shards since otherwise it's pretty fragile if there are difference in the num or shards such that
// all shards are relocated to the second node which is not what we want here. It's solely a test for the settings to take effect
final int numShards = 2;
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards)));
assertAcked(prepareCreate("test_1").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards)));
ensureGreen();
assertAllShardsOnNodes("test", firstNode);
assertAllShardsOnNodes("test_1", firstNode);
final String secondNode = internalCluster().startNode();
// prevent via index setting but only on index test
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)).get();
client().admin().cluster().prepareReroute().get();
ensureGreen();
assertAllShardsOnNodes("test", firstNode);
assertAllShardsOnNodes("test_1", firstNode);
// now enable the index test to relocate since index settings override cluster settings
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL)).get();
logger.info("--> balance index [test]");
client().admin().cluster().prepareReroute().get();
ensureGreen("test");
Set<String> test = assertAllShardsOnNodes("test", firstNode, secondNode);
assertThat("index: [test] expected to be rebalanced on both nodes", test.size(), equalTo(2));
// flip the cluster wide setting such that we can also balance for index test_1 eventually we should have one shard of each index on each node
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL)).get();
logger.info("--> balance index [test_1]");
client().admin().cluster().prepareReroute().get();
ensureGreen("test_1");
Set<String> test_1 = assertAllShardsOnNodes("test_1", firstNode, secondNode);
assertThat("index: [test_1] expected to be rebalanced on both nodes", test_1.size(), equalTo(2));
test = assertAllShardsOnNodes("test", firstNode, secondNode);
assertThat("index: [test] expected to be rebalanced on both nodes", test.size(), equalTo(2));
}
}

View File

@ -0,0 +1,123 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESIntegTestCase;
import java.util.Set;
import static org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
/**
* An integration test for testing updating shard allocation/routing settings and
* ensuring the updated settings take effect as expected.
*/
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
public class UpdateShardAllocationSettingsIT extends ESIntegTestCase {
/**
* Tests that updating the {@link EnableAllocationDecider} related settings works as expected.
*/
public void testEnableRebalance() throws InterruptedException {
final String firstNode = internalCluster().startNode();
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE))
.get();
// we test with 2 shards since otherwise it's pretty fragile if there are difference in the num or shards such that
// all shards are relocated to the second node which is not what we want here. It's solely a test for the settings to take effect
final int numShards = 2;
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards)));
assertAcked(prepareCreate("test_1").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards)));
ensureGreen();
assertAllShardsOnNodes("test", firstNode);
assertAllShardsOnNodes("test_1", firstNode);
final String secondNode = internalCluster().startNode();
// prevent via index setting but only on index test
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder()
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE))
.get();
client().admin().cluster().prepareReroute().get();
ensureGreen();
assertAllShardsOnNodes("test", firstNode);
assertAllShardsOnNodes("test_1", firstNode);
// now enable the index test to relocate since index settings override cluster settings
client().admin().indices().prepareUpdateSettings("test")
.setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(),
randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL))
.get();
logger.info("--> balance index [test]");
client().admin().cluster().prepareReroute().get();
ensureGreen("test");
Set<String> test = assertAllShardsOnNodes("test", firstNode, secondNode);
assertThat("index: [test] expected to be rebalanced on both nodes", test.size(), equalTo(2));
// flip the cluster wide setting such that we can also balance for index
// test_1 eventually we should have one shard of each index on each node
client().admin().cluster().prepareUpdateSettings()
.setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(),
randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL))
.get();
logger.info("--> balance index [test_1]");
client().admin().cluster().prepareReroute().get();
ensureGreen("test_1");
Set<String> test_1 = assertAllShardsOnNodes("test_1", firstNode, secondNode);
assertThat("index: [test_1] expected to be rebalanced on both nodes", test_1.size(), equalTo(2));
test = assertAllShardsOnNodes("test", firstNode, secondNode);
assertThat("index: [test] expected to be rebalanced on both nodes", test.size(), equalTo(2));
}
/**
* Tests that updating the {@link SameShardAllocationDecider#CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING} setting works as expected.
*/
public void testUpdateSameHostSetting() {
internalCluster().startNodes(2);
// same same_host to true, since 2 nodes are started on the same host,
// only primaries should be assigned
client().admin().cluster().prepareUpdateSettings().setTransientSettings(
Settings.builder().put(CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey(), true)
).get();
final String indexName = "idx";
client().admin().indices().prepareCreate(indexName).setSettings(
Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
).get();
ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
assertFalse("replica should be unassigned",
clusterState.getRoutingTable().index(indexName).shardsWithState(ShardRoutingState.UNASSIGNED).isEmpty());
// now, update the same_host setting to allow shards to be allocated to multiple nodes on
// the same host - the replica should get assigned
client().admin().cluster().prepareUpdateSettings().setTransientSettings(
Settings.builder().put(CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey(), false)
).get();
clusterState = client().admin().cluster().prepareState().get().getState();
assertTrue("all shards should be assigned",
clusterState.getRoutingTable().index(indexName).shardsWithState(ShardRoutingState.UNASSIGNED).isEmpty());
}
}

View File

@ -18,9 +18,11 @@
*/
package org.elasticsearch.common.logging;
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.hamcrest.RegexMatcher;
import java.io.IOException;
import java.util.Collections;
@ -28,17 +30,22 @@ import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.IntStream;
import static org.hamcrest.Matchers.not;
import static org.elasticsearch.common.logging.DeprecationLogger.WARNING_HEADER_PATTERN;
import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.not;
/**
* Tests {@link DeprecationLogger}
*/
public class DeprecationLoggerTests extends ESTestCase {
private static final RegexMatcher warningValueMatcher = matches(WARNING_HEADER_PATTERN.pattern());
private final DeprecationLogger logger = new DeprecationLogger(Loggers.getLogger(getClass()));
@Override
@ -48,43 +55,42 @@ public class DeprecationLoggerTests extends ESTestCase {
}
public void testAddsHeaderWithThreadContext() throws IOException {
String msg = "A simple message [{}]";
String param = randomAsciiOfLengthBetween(1, 5);
String formatted = LoggerMessageFormat.format(msg, (Object)param);
try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) {
Set<ThreadContext> threadContexts = Collections.singleton(threadContext);
final Set<ThreadContext> threadContexts = Collections.singleton(threadContext);
logger.deprecated(threadContexts, msg, param);
final String param = randomAsciiOfLengthBetween(1, 5);
logger.deprecated(threadContexts, "A simple message [{}]", param);
Map<String, List<String>> responseHeaders = threadContext.getResponseHeaders();
final Map<String, List<String>> responseHeaders = threadContext.getResponseHeaders();
assertEquals(1, responseHeaders.size());
assertEquals(formatted, responseHeaders.get(DeprecationLogger.WARNING_HEADER).get(0));
assertThat(responseHeaders.size(), equalTo(1));
final List<String> responses = responseHeaders.get("Warning");
assertThat(responses, hasSize(1));
assertThat(responses.get(0), warningValueMatcher);
assertThat(responses.get(0), containsString("\"A simple message [" + param + "]\""));
}
}
public void testAddsCombinedHeaderWithThreadContext() throws IOException {
String msg = "A simple message [{}]";
String param = randomAsciiOfLengthBetween(1, 5);
String formatted = LoggerMessageFormat.format(msg, (Object)param);
String formatted2 = randomAsciiOfLengthBetween(1, 10);
try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) {
Set<ThreadContext> threadContexts = Collections.singleton(threadContext);
final Set<ThreadContext> threadContexts = Collections.singleton(threadContext);
logger.deprecated(threadContexts, msg, param);
logger.deprecated(threadContexts, formatted2);
final String param = randomAsciiOfLengthBetween(1, 5);
logger.deprecated(threadContexts, "A simple message [{}]", param);
final String second = randomAsciiOfLengthBetween(1, 10);
logger.deprecated(threadContexts, second);
Map<String, List<String>> responseHeaders = threadContext.getResponseHeaders();
final Map<String, List<String>> responseHeaders = threadContext.getResponseHeaders();
assertEquals(1, responseHeaders.size());
List<String> responses = responseHeaders.get(DeprecationLogger.WARNING_HEADER);
final List<String> responses = responseHeaders.get("Warning");
assertEquals(2, responses.size());
assertEquals(formatted, responses.get(0));
assertEquals(formatted2, responses.get(1));
assertThat(responses.get(0), warningValueMatcher);
assertThat(responses.get(0), containsString("\"A simple message [" + param + "]\""));
assertThat(responses.get(1), warningValueMatcher);
assertThat(responses.get(1), containsString("\"" + second + "\""));
}
}
@ -93,28 +99,30 @@ public class DeprecationLoggerTests extends ESTestCase {
final String unexpected = "testCannotRemoveThreadContext";
try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) {
// NOTE: by adding it to the logger, we allow any concurrent test to write to it (from their own threads)
DeprecationLogger.setThreadContext(threadContext);
logger.deprecated(expected);
Map<String, List<String>> responseHeaders = threadContext.getResponseHeaders();
List<String> responses = responseHeaders.get(DeprecationLogger.WARNING_HEADER);
{
final Map<String, List<String>> responseHeaders = threadContext.getResponseHeaders();
final List<String> responses = responseHeaders.get("Warning");
// ensure it works (note: concurrent tests may be adding to it, but in different threads, so it should have no impact)
assertThat(responses, hasSize(atLeast(1)));
assertThat(responses, hasItem(equalTo(expected)));
assertThat(responses, hasSize(1));
assertThat(responses.get(0), warningValueMatcher);
assertThat(responses.get(0), containsString(expected));
}
DeprecationLogger.removeThreadContext(threadContext);
logger.deprecated(unexpected);
responseHeaders = threadContext.getResponseHeaders();
responses = responseHeaders.get(DeprecationLogger.WARNING_HEADER);
{
final Map<String, List<String>> responseHeaders = threadContext.getResponseHeaders();
final List<String> responses = responseHeaders.get("Warning");
assertThat(responses, hasSize(atLeast(1)));
assertThat(responses, hasItem(expected));
assertThat(responses, not(hasItem(unexpected)));
assertThat(responses, hasSize(1));
assertThat(responses.get(0), warningValueMatcher);
assertThat(responses.get(0), containsString(expected));
assertThat(responses.get(0), not(containsString(unexpected)));
}
}
}
@ -158,4 +166,28 @@ public class DeprecationLoggerTests extends ESTestCase {
}
}
public void testWarningValueFromWarningHeader() throws InterruptedException {
final String s = randomAsciiOfLength(16);
final String first = DeprecationLogger.formatWarning(s);
assertThat(DeprecationLogger.extractWarningValueFromWarningHeader(first), equalTo(s));
}
public void testEscape() {
assertThat(DeprecationLogger.escape("\\"), equalTo("\\\\"));
assertThat(DeprecationLogger.escape("\""), equalTo("\\\""));
assertThat(DeprecationLogger.escape("\\\""), equalTo("\\\\\\\""));
assertThat(DeprecationLogger.escape("\"foo\\bar\""),equalTo("\\\"foo\\\\bar\\\""));
// test that characters other than '\' and '"' are left unchanged
String chars = "\t !" + range(0x23, 0x5b) + range(0x5d, 0x73) + range(0x80, 0xff);
final String s = new CodepointSetGenerator(chars.toCharArray()).ofCodePointsLength(random(), 16, 16);
assertThat(DeprecationLogger.escape(s), equalTo(s));
}
private String range(int lowerInclusive, int upperInclusive) {
return IntStream
.range(lowerInclusive, upperInclusive + 1)
.collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append)
.toString();
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase;
@ -178,6 +179,14 @@ public class ThreadContextTests extends ESTestCase {
threadContext.addResponseHeader("foo", "bar");
}
final String value = DeprecationLogger.formatWarning("qux");
threadContext.addResponseHeader("baz", value, DeprecationLogger::extractWarningValueFromWarningHeader);
// pretend that another thread created the same response at a different time
if (randomBoolean()) {
final String duplicateValue = DeprecationLogger.formatWarning("qux");
threadContext.addResponseHeader("baz", duplicateValue, DeprecationLogger::extractWarningValueFromWarningHeader);
}
threadContext.addResponseHeader("Warning", "One is the loneliest number");
threadContext.addResponseHeader("Warning", "Two can be as bad as one");
if (expectThird) {
@ -186,11 +195,14 @@ public class ThreadContextTests extends ESTestCase {
final Map<String, List<String>> responseHeaders = threadContext.getResponseHeaders();
final List<String> foo = responseHeaders.get("foo");
final List<String> baz = responseHeaders.get("baz");
final List<String> warnings = responseHeaders.get("Warning");
final int expectedWarnings = expectThird ? 3 : 2;
assertThat(foo, hasSize(1));
assertThat(baz, hasSize(1));
assertEquals("bar", foo.get(0));
assertEquals(value, baz.get(0));
assertThat(warnings, hasSize(expectedWarnings));
assertThat(warnings, hasItem(equalTo("One is the loneliest number")));
assertThat(warnings, hasItem(equalTo("Two can be as bad as one")));

View File

@ -42,6 +42,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.set.Sets;
@ -209,7 +210,9 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
*/
public void testThrottleWhenAllocatingToMatchingNode() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(new AllocationDeciders(Settings.EMPTY,
Arrays.asList(new TestAllocateDecision(Decision.YES), new SameShardAllocationDecider(Settings.EMPTY),
Arrays.asList(new TestAllocateDecision(Decision.YES),
new SameShardAllocationDecider(
Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)),
new AllocationDecider(Settings.EMPTY) {
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {

View File

@ -0,0 +1,182 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.appender.AbstractAppender;
import org.apache.logging.log4j.core.filter.RegexFilter;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase;
import static org.elasticsearch.common.util.concurrent.EsExecutors.PROCESSORS_SETTING;
import static org.elasticsearch.index.IndexSettingsTests.newIndexMeta;
import static org.elasticsearch.index.MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING;
import static org.elasticsearch.index.MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING;
import static org.hamcrest.core.StringContains.containsString;
public class MergeSchedulerSettingsTests extends ESTestCase {
private static class MockAppender extends AbstractAppender {
public boolean sawUpdateMaxThreadCount;
public boolean sawUpdateAutoThrottle;
MockAppender(final String name) throws IllegalAccessException {
super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null);
}
@Override
public void append(LogEvent event) {
String message = event.getMessage().getFormattedMessage();
if (event.getLevel() == Level.TRACE && event.getLoggerName().endsWith("lucene.iw")) {
}
if (event.getLevel() == Level.INFO
&& message.contains("updating [index.merge.scheduler.max_thread_count] from [10000] to [1]")) {
sawUpdateMaxThreadCount = true;
}
if (event.getLevel() == Level.INFO
&& message.contains("updating [index.merge.scheduler.auto_throttle] from [true] to [false]")) {
sawUpdateAutoThrottle = true;
}
}
@Override
public boolean ignoreExceptions() {
return false;
}
}
public void testUpdateAutoThrottleSettings() throws Exception {
MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings");
mockAppender.start();
final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings");
Loggers.addAppender(settingsLogger, mockAppender);
Loggers.setLevel(settingsLogger, Level.TRACE);
try {
Settings.Builder builder = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
.put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2")
.put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2")
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1")
.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "2")
.put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), "true");
IndexSettings settings = new IndexSettings(newIndexMeta("index", builder.build()), Settings.EMPTY);
assertEquals(settings.getMergeSchedulerConfig().isAutoThrottle(), true);
builder.put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), "false");
settings.updateIndexMetaData(newIndexMeta("index", builder.build()));
// Make sure we log the change:
assertTrue(mockAppender.sawUpdateAutoThrottle);
assertEquals(settings.getMergeSchedulerConfig().isAutoThrottle(), false);
} finally {
Loggers.removeAppender(settingsLogger, mockAppender);
mockAppender.stop();
Loggers.setLevel(settingsLogger, (Level) null);
}
}
// #6882: make sure we can change index.merge.scheduler.max_thread_count live
public void testUpdateMergeMaxThreadCount() throws Exception {
MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings");
mockAppender.start();
final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings");
Loggers.addAppender(settingsLogger, mockAppender);
Loggers.setLevel(settingsLogger, Level.TRACE);
try {
Settings.Builder builder = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
.put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2")
.put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2")
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "10000")
.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000");
IndexSettings settings = new IndexSettings(newIndexMeta("index", builder.build()), Settings.EMPTY);
assertEquals(settings.getMergeSchedulerConfig().getMaxMergeCount(), 10000);
assertEquals(settings.getMergeSchedulerConfig().getMaxThreadCount(), 10000);
settings.updateIndexMetaData(newIndexMeta("index", builder.build()));
assertFalse(mockAppender.sawUpdateMaxThreadCount);
builder.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1");
settings.updateIndexMetaData(newIndexMeta("index", builder.build()));
// Make sure we log the change:
assertTrue(mockAppender.sawUpdateMaxThreadCount);
} finally {
Loggers.removeAppender(settingsLogger, mockAppender);
mockAppender.stop();
Loggers.setLevel(settingsLogger, (Level) null);
}
}
private static IndexMetaData createMetaData(int maxThreadCount, int maxMergeCount, int numProc) {
Settings.Builder builder = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
if (maxThreadCount != -1) {
builder.put(MAX_THREAD_COUNT_SETTING.getKey(), maxThreadCount);
}
if (maxMergeCount != -1) {
builder.put(MAX_MERGE_COUNT_SETTING.getKey(), maxMergeCount);
}
if (numProc != -1) {
builder.put(PROCESSORS_SETTING.getKey(), numProc);
}
return newIndexMeta("index", builder.build());
}
public void testMaxThreadAndMergeCount() {
IllegalArgumentException exc =
expectThrows(IllegalArgumentException.class,
() -> new MergeSchedulerConfig(new IndexSettings(createMetaData(10, 4, -1), Settings.EMPTY)));
assertThat(exc.getMessage(), containsString("maxThreadCount (= 10) should be <= maxMergeCount (= 4)"));
IndexSettings settings = new IndexSettings(createMetaData(-1, -1, 2), Settings.EMPTY);
assertEquals(1, settings.getMergeSchedulerConfig().getMaxThreadCount());
assertEquals(6, settings.getMergeSchedulerConfig().getMaxMergeCount());
settings = new IndexSettings(createMetaData(4, 10, -1), Settings.EMPTY);
assertEquals(4, settings.getMergeSchedulerConfig().getMaxThreadCount());
assertEquals(10, settings.getMergeSchedulerConfig().getMaxMergeCount());
IndexMetaData newMetaData = createMetaData(15, 20, -1);
settings.updateIndexMetaData(newMetaData);
assertEquals(15, settings.getMergeSchedulerConfig().getMaxThreadCount());
assertEquals(20, settings.getMergeSchedulerConfig().getMaxMergeCount());
settings.updateIndexMetaData(createMetaData(40, 50, -1));
assertEquals(40, settings.getMergeSchedulerConfig().getMaxThreadCount());
assertEquals(50, settings.getMergeSchedulerConfig().getMaxMergeCount());
settings.updateIndexMetaData(createMetaData(40, -1, -1));
assertEquals(40, settings.getMergeSchedulerConfig().getMaxThreadCount());
assertEquals(45, settings.getMergeSchedulerConfig().getMaxMergeCount());
final IndexSettings finalSettings = settings;
exc = expectThrows(IllegalArgumentException.class,
() -> finalSettings.updateIndexMetaData(createMetaData(40, 30, -1)));
assertThat(exc.getMessage(), containsString("maxThreadCount (= 40) should be <= maxMergeCount (= 30)"));
exc = expectThrows(IllegalArgumentException.class,
() -> finalSettings.updateIndexMetaData(createMetaData(-1, 3, 8)));
assertThat(exc.getMessage(), containsString("maxThreadCount (= 4) should be <= maxMergeCount (= 3)"));
}
}

View File

@ -24,11 +24,11 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.queryparser.classic.MapperQueryParser;
import org.apache.lucene.queryparser.classic.QueryParserSettings;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.GraphQuery;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.MultiTermQuery;
@ -40,7 +40,9 @@ import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.automaton.TooComplexToDeterminizeException;
import org.elasticsearch.common.ParsingException;
@ -396,26 +398,24 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
// simple multi-term
Query query = queryParser.parse("guinea pig");
Query expectedQuery = new GraphQuery(
new BooleanQuery.Builder()
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), Occur.MUST))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), Occur.MUST))
.build(),
new TermQuery(new Term(STRING_FIELD_NAME, "cavy"))
);
Query expectedQuery = new BooleanQuery.Builder()
.add(new BooleanQuery.Builder()
.add(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), Occur.MUST)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), Occur.MUST).build(), defaultOp)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "cavy")), defaultOp)
.build();
assertThat(query, Matchers.equalTo(expectedQuery));
// simple with additional tokens
query = queryParser.parse("that guinea pig smells");
expectedQuery = new BooleanQuery.Builder()
.add(new TermQuery(new Term(STRING_FIELD_NAME, "that")), defaultOp)
.add(new GraphQuery(
new BooleanQuery.Builder()
.add(new BooleanQuery.Builder()
.add(new BooleanQuery.Builder()
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), Occur.MUST))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), Occur.MUST))
.build(),
new TermQuery(new Term(STRING_FIELD_NAME, "cavy"))
), defaultOp)
.build(), Occur.SHOULD)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "cavy")), Occur.SHOULD).build(), defaultOp)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "smells")), defaultOp)
.build();
assertThat(query, Matchers.equalTo(expectedQuery));
@ -423,70 +423,62 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
// complex
query = queryParser.parse("+that -(guinea pig) +smells");
expectedQuery = new BooleanQuery.Builder()
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "that")), BooleanClause.Occur.MUST))
.add(new BooleanClause(new GraphQuery(
new BooleanQuery.Builder()
.add(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), Occur.MUST)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), Occur.MUST)
.build(),
new TermQuery(new Term(STRING_FIELD_NAME, "cavy"))
), BooleanClause.Occur.MUST_NOT))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "smells")), BooleanClause.Occur.MUST))
.build();
.add(new TermQuery(new Term(STRING_FIELD_NAME, "that")), Occur.MUST)
.add(new BooleanQuery.Builder()
.add(new BooleanQuery.Builder()
.add(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), Occur.MUST)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), Occur.MUST)
.build(), defaultOp)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "cavy")), defaultOp)
.build(), Occur.MUST_NOT)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "smells")), Occur.MUST)
.build();
assertThat(query, Matchers.equalTo(expectedQuery));
// no paren should cause guinea and pig to be treated as separate tokens
// no parent should cause guinea and pig to be treated as separate tokens
query = queryParser.parse("+that -guinea pig +smells");
expectedQuery = new BooleanQuery.Builder()
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "that")), BooleanClause.Occur.MUST))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), BooleanClause.Occur.MUST_NOT))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), defaultOp))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "smells")), BooleanClause.Occur.MUST))
.add(new TermQuery(new Term(STRING_FIELD_NAME, "that")), BooleanClause.Occur.MUST)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), BooleanClause.Occur.MUST_NOT)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), defaultOp)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "smells")), BooleanClause.Occur.MUST)
.build();
assertThat(query, Matchers.equalTo(expectedQuery));
// phrase
// span query
query = queryParser.parse("\"that guinea pig smells\"");
expectedQuery = new BooleanQuery.Builder()
.add(new SpanNearQuery.Builder(STRING_FIELD_NAME, true)
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "that")))
.addClause(new SpanOrQuery(
new SpanNearQuery.Builder(STRING_FIELD_NAME, true)
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "guinea")))
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "pig"))).build(),
new SpanTermQuery(new Term(STRING_FIELD_NAME, "cavy"))))
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "smells")))
.build(), Occur.SHOULD)
.setDisableCoord(true)
.add(new BooleanClause(new GraphQuery(
new PhraseQuery.Builder()
.add(new Term(STRING_FIELD_NAME, "that"))
.add(new Term(STRING_FIELD_NAME, "guinea"))
.add(new Term(STRING_FIELD_NAME, "pig"))
.add(new Term(STRING_FIELD_NAME, "smells"))
.build(),
new PhraseQuery.Builder()
.add(new Term(STRING_FIELD_NAME, "that"))
.add(new Term(STRING_FIELD_NAME, "cavy"))
.add(new Term(STRING_FIELD_NAME, "smells"))
.build()
), BooleanClause.Occur.SHOULD)).build();
.build();
assertThat(query, Matchers.equalTo(expectedQuery));
// phrase with slop
// span query with slop
query = queryParser.parse("\"that guinea pig smells\"~2");
expectedQuery = new BooleanQuery.Builder()
.add(new SpanNearQuery.Builder(STRING_FIELD_NAME, true)
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "that")))
.addClause(new SpanOrQuery(
new SpanNearQuery.Builder(STRING_FIELD_NAME, true)
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "guinea")))
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "pig"))).build(),
new SpanTermQuery(new Term(STRING_FIELD_NAME, "cavy"))))
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "smells")))
.setSlop(2)
.build(),
Occur.SHOULD)
.setDisableCoord(true)
.add(new BooleanClause(new GraphQuery(
new PhraseQuery.Builder()
.add(new Term(STRING_FIELD_NAME, "that"))
.add(new Term(STRING_FIELD_NAME, "guinea"))
.add(new Term(STRING_FIELD_NAME, "pig"))
.add(new Term(STRING_FIELD_NAME, "smells"))
.setSlop(2)
.build(),
new PhraseQuery.Builder()
.add(new Term(STRING_FIELD_NAME, "that"))
.add(new Term(STRING_FIELD_NAME, "cavy"))
.add(new Term(STRING_FIELD_NAME, "smells"))
.setSlop(2)
.build()
), BooleanClause.Occur.SHOULD)).build();
.build();
assertThat(query, Matchers.equalTo(expectedQuery));
}
}

View File

@ -25,12 +25,14 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.GraphQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
@ -138,27 +140,27 @@ public class SimpleQueryParserTests extends ESTestCase {
// phrase will pick it up
query = parser.parse("\"guinea pig\"");
expectedQuery = new GraphQuery(
new PhraseQuery("field1", "guinea", "pig"),
new TermQuery(new Term("field1", "cavy")));
SpanTermQuery span1 = new SpanTermQuery(new Term("field1", "guinea"));
SpanTermQuery span2 = new SpanTermQuery(new Term("field1", "pig"));
expectedQuery = new SpanOrQuery(
new SpanNearQuery(new SpanQuery[] { span1, span2 }, 0, true),
new SpanTermQuery(new Term("field1", "cavy")));
assertThat(query, equalTo(expectedQuery));
// phrase with slop
query = parser.parse("big \"guinea pig\"~2");
query = parser.parse("big \"tiny guinea pig\"~2");
expectedQuery = new BooleanQuery.Builder()
.add(new BooleanClause(new TermQuery(new Term("field1", "big")), defaultOp))
.add(new BooleanClause(new GraphQuery(
new PhraseQuery.Builder()
.add(new Term("field1", "guinea"))
.add(new Term("field1", "pig"))
.setSlop(2)
.build(),
new TermQuery(new Term("field1", "cavy"))), defaultOp))
.add(new TermQuery(new Term("field1", "big")), defaultOp)
.add(new SpanNearQuery(new SpanQuery[] {
new SpanTermQuery(new Term("field1", "tiny")),
new SpanOrQuery(
new SpanNearQuery(new SpanQuery[] { span1, span2 }, 0, true),
new SpanTermQuery(new Term("field1", "cavy"))
)
}, 2, true), defaultOp)
.build();
assertThat(query, equalTo(expectedQuery));
}
}

View File

@ -31,6 +31,7 @@ import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.bulk.BulkShardResponse;
import org.elasticsearch.action.bulk.TransportShardBulkActionTests;
import org.elasticsearch.action.bulk.TransportSingleItemBulkWriteAction;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
@ -549,7 +550,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
*/
protected IndexResponse indexOnPrimary(IndexRequest request, IndexShard primary) throws Exception {
final Engine.IndexResult indexResult = executeIndexRequestOnPrimary(request, primary,
null);
new TransportShardBulkActionTests.NoopMappingUpdatePerformer());
request.primaryTerm(primary.getPrimaryTerm());
TransportWriteActionTestHelper.performPostWriteActions(primary, request, indexResult.getTranslogLocation(), logger);
return new IndexResponse(
@ -591,5 +592,4 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
replica.getTranslog().sync();
}
}
}

View File

@ -79,13 +79,11 @@ public class ESToParentBlockJoinQueryTests extends ESTestCase {
new PhraseQuery("body", "term"), // rewrites to a TermQuery
new QueryBitSetProducer(new TermQuery(new Term("is", "parent"))),
ScoreMode.Avg, "nested");
assertEquals(q, q.rewrite(new MultiReader()));
// do this once LUCENE-7685 is addressed
// Query expected = new ESToParentBlockJoinQuery(
// new TermQuery(new Term("body", "term")),
// new QueryBitSetProducer(new TermQuery(new Term("is", "parent"))),
// ScoreMode.Avg, "nested");
// Query rewritten = q.rewrite(new MultiReader());
// assertEquals(expected, rewritten);
Query expected = new ESToParentBlockJoinQuery(
new TermQuery(new Term("body", "term")),
new QueryBitSetProducer(new TermQuery(new Term("is", "parent"))),
ScoreMode.Avg, "nested");
Query rewritten = q.rewrite(new MultiReader());
assertEquals(expected, rewritten);
}
}

View File

@ -18,9 +18,10 @@
*/
package org.elasticsearch.index.shard;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
@ -35,7 +36,8 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
import java.util.function.Supplier;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
@ -146,7 +148,57 @@ public class IndexShardOperationsLockTests extends ESTestCase {
block.acquire(future, ThreadPool.Names.GENERIC, true);
assertFalse(future.isDone());
}
future.get(1, TimeUnit.MINUTES).close();
future.get(1, TimeUnit.HOURS).close();
}
/**
* Tests that the ThreadContext is restored when a operation is executed after it has been delayed due to a block
*/
public void testThreadContextPreservedIfBlock() throws ExecutionException, InterruptedException, TimeoutException {
final ThreadContext context = threadPool.getThreadContext();
final Function<ActionListener<Releasable>, Boolean> contextChecker = (listener) -> {
if ("bar".equals(context.getHeader("foo")) == false) {
listener.onFailure(new IllegalStateException("context did not have value [bar] for header [foo]. Actual value [" +
context.getHeader("foo") + "]"));
} else if ("baz".equals(context.getTransient("bar")) == false) {
listener.onFailure(new IllegalStateException("context did not have value [baz] for transient [bar]. Actual value [" +
context.getTransient("bar") + "]"));
} else {
return true;
}
return false;
};
PlainActionFuture<Releasable> future = new PlainActionFuture<Releasable>() {
@Override
public void onResponse(Releasable releasable) {
if (contextChecker.apply(this)) {
super.onResponse(releasable);
}
}
};
PlainActionFuture<Releasable> future2 = new PlainActionFuture<Releasable>() {
@Override
public void onResponse(Releasable releasable) {
if (contextChecker.apply(this)) {
super.onResponse(releasable);
}
}
};
try (Releasable releasable = blockAndWait()) {
// we preserve the thread context here so that we have a different context in the call to acquire than the context present
// when the releasable is closed
try (ThreadContext.StoredContext ignore = context.newStoredContext(false)) {
context.putHeader("foo", "bar");
context.putTransient("bar", "baz");
// test both with and without a executor name
block.acquire(future, ThreadPool.Names.GENERIC, true);
block.acquire(future2, null, true);
}
assertFalse(future.isDone());
}
future.get(1, TimeUnit.HOURS).close();
future2.get(1, TimeUnit.HOURS).close();
}
protected Releasable blockAndWait() throws InterruptedException {

View File

@ -117,15 +117,15 @@ public class ClusterStateChanges extends AbstractComponent {
public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool threadPool) {
super(Settings.builder().put(PATH_HOME_SETTING.getKey(), "dummy").build());
ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
allocationService = new AllocationService(settings, new AllocationDeciders(settings,
new HashSet<>(Arrays.asList(new SameShardAllocationDecider(settings),
new HashSet<>(Arrays.asList(new SameShardAllocationDecider(settings, clusterSettings),
new ReplicaAfterPrimaryActiveAllocationDecider(settings),
new RandomAllocationDeciderTests.RandomAllocationDecider(getRandom())))),
new TestGatewayAllocator(), new BalancedShardsAllocator(settings),
EmptyClusterInfoService.INSTANCE);
shardFailedClusterStateTaskExecutor = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocationService, null, logger);
shardStartedClusterStateTaskExecutor = new ShardStateAction.ShardStartedClusterStateTaskExecutor(allocationService, logger);
ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
ActionFilters actionFilters = new ActionFilters(Collections.emptySet());
IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings);
DestructiveOperations destructiveOperations = new DestructiveOperations(settings, clusterSettings);

View File

@ -42,7 +42,7 @@ import static org.hamcrest.Matchers.equalTo;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
public class IndexPrimaryRelocationIT extends ESIntegTestCase {
private static final int RELOCATION_COUNT = 25;
private static final int RELOCATION_COUNT = 15;
@TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.index.shard:TRACE,org.elasticsearch.cluster.service:TRACE")
public void testPrimaryRelocationWhileIndexing() throws Exception {
@ -88,7 +88,10 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase {
if (indexingThread.isAlive() == false) { // indexing process aborted early, no need for more relocations as test has already failed
break;
}
if (i > 0 && i % 5 == 0) {
logger.info("--> [iteration {}] flushing index", i);
client().admin().indices().prepareFlush("test").get();
}
}
finished.set(true);
indexingThread.join();

View File

@ -19,26 +19,14 @@
package org.elasticsearch.indices.settings;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.appender.AbstractAppender;
import org.apache.logging.log4j.core.filter.RegexFilter;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.MergePolicyConfig;
import org.elasticsearch.index.MergeSchedulerConfig;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.plugins.Plugin;
@ -237,189 +225,6 @@ public class UpdateSettingsIT extends ESIntegTestCase {
}
private static class MockAppender extends AbstractAppender {
public boolean sawUpdateMaxThreadCount;
public boolean sawUpdateAutoThrottle;
MockAppender(final String name) throws IllegalAccessException {
super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null);
}
@Override
public void append(LogEvent event) {
String message = event.getMessage().getFormattedMessage();
if (event.getLevel() == Level.TRACE && event.getLoggerName().endsWith("lucene.iw")) {
}
if (event.getLevel() == Level.INFO
&& message.contains("updating [index.merge.scheduler.max_thread_count] from [10000] to [1]")) {
sawUpdateMaxThreadCount = true;
}
if (event.getLevel() == Level.INFO
&& message.contains("updating [index.merge.scheduler.auto_throttle] from [true] to [false]")) {
sawUpdateAutoThrottle = true;
}
}
@Override
public boolean ignoreExceptions() {
return false;
}
}
public void testUpdateAutoThrottleSettings() throws Exception {
MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings");
mockAppender.start();
Logger rootLogger = LogManager.getRootLogger();
Loggers.addAppender(rootLogger, mockAppender);
Level savedLevel = rootLogger.getLevel();
Loggers.setLevel(rootLogger, Level.TRACE);
try {
// No throttling at first, only 1 non-replicated shard, force lots of merging:
assertAcked(prepareCreate("test")
.setSettings(Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
.put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2")
.put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2")
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1")
.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "2")
.put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), "true")));
// Disable auto throttle:
client()
.admin()
.indices()
.prepareUpdateSettings("test")
.setSettings(Settings.builder().put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), "false"))
.get();
// if a node has processed the cluster state update but not yet returned from the update task, it might still log messages;
// these log messages will race with the stopping of the appender so we wait to ensure these tasks are done processing
assertBusy(() -> {
for (final ClusterService service : internalCluster().getInstances(ClusterService.class)) {
assertThat(service.numberOfPendingTasks(), equalTo(0));
}
});
// Make sure we log the change:
assertTrue(mockAppender.sawUpdateAutoThrottle);
// Make sure setting says it is in fact changed:
GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
assertThat(getSettingsResponse.getSetting("test", MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey()), equalTo("false"));
} finally {
Loggers.setLevel(rootLogger, savedLevel);
Loggers.removeAppender(rootLogger, mockAppender);
// don't call stop here some node might still use this reference at this point causing tests to fail.
// this is only relevant in integ tests, unittest can control what uses a logger and what doesn't
// mockAppender.stop();
}
}
public void testInvalidMergeMaxThreadCount() throws IllegalAccessException {
CreateIndexRequestBuilder createBuilder = prepareCreate("test")
.setSettings(Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
.put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2")
.put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2")
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "100")
.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10")
);
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class,
() -> createBuilder.get());
assertThat(exc.getMessage(), equalTo("maxThreadCount (= 100) should be <= maxMergeCount (= 10)"));
assertAcked(prepareCreate("test")
.setSettings(Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
.put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2")
.put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2")
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "100")
.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "100")
));
{
UpdateSettingsRequestBuilder updateBuilder =
client()
.admin()
.indices()
.prepareUpdateSettings("test")
.setSettings(Settings.builder().put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1000"));
exc = expectThrows(IllegalArgumentException.class,
() -> updateBuilder.get());
assertThat(exc.getMessage(), equalTo("maxThreadCount (= 1000) should be <= maxMergeCount (= 100)"));
}
{
UpdateSettingsRequestBuilder updateBuilder =
client()
.admin()
.indices()
.prepareUpdateSettings("test")
.setSettings(Settings.builder().put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10"));
exc = expectThrows(IllegalArgumentException.class,
() -> updateBuilder.get());
assertThat(exc.getMessage(), equalTo("maxThreadCount (= 100) should be <= maxMergeCount (= 10)"));
}
}
// #6882: make sure we can change index.merge.scheduler.max_thread_count live
public void testUpdateMergeMaxThreadCount() throws Exception {
MockAppender mockAppender = new MockAppender("testUpdateMergeMaxThreadCount");
mockAppender.start();
Logger rootLogger = LogManager.getRootLogger();
Level savedLevel = rootLogger.getLevel();
Loggers.addAppender(rootLogger, mockAppender);
Loggers.setLevel(rootLogger, Level.TRACE);
try {
assertAcked(prepareCreate("test")
.setSettings(Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
.put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2")
.put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2")
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "10000")
.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000")));
assertFalse(mockAppender.sawUpdateMaxThreadCount);
// Now make a live change to reduce allowed merge threads:
client()
.admin()
.indices()
.prepareUpdateSettings("test")
.setSettings(Settings.builder().put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1"))
.get();
// if a node has processed the cluster state update but not yet returned from the update task, it might still log messages;
// these log messages will race with the stopping of the appender so we wait to ensure these tasks are done processing
assertBusy(() -> {
for (final ClusterService service : internalCluster().getInstances(ClusterService.class)) {
assertThat(service.numberOfPendingTasks(), equalTo(0));
}
});
// Make sure we log the change:
assertTrue(mockAppender.sawUpdateMaxThreadCount);
// Make sure setting says it is in fact changed:
GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
assertThat(getSettingsResponse.getSetting("test", MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey()), equalTo("1"));
} finally {
Loggers.setLevel(rootLogger, savedLevel);
Loggers.removeAppender(rootLogger, mockAppender);
// don't call stop here some node might still use this reference at this point causing tests to fail.
// this is only relevant in integ tests, unittest can control what uses a logger and what doesn't
// mockAppender.stop();
}
}
public void testUpdateSettingsWithBlocks() {
createIndex("test");
ensureGreen("test");

View File

@ -125,13 +125,8 @@ public abstract class AggregatorTestCase extends ESTestCase {
SearchLookup searchLookup = new SearchLookup(mapperService, ifds, new String[]{"type"});
when(searchContext.lookup()).thenReturn(searchLookup);
QueryShardContext queryShardContext = mock(QueryShardContext.class);
for (MappedFieldType fieldType : fieldTypes) {
when(queryShardContext.fieldMapper(fieldType.name())).thenReturn(fieldType);
when(queryShardContext.getForField(fieldType)).then(invocation -> fieldType.fielddataBuilder().build(
indexSettings, fieldType, new IndexFieldDataCache.None(), circuitBreakerService, mock(MapperService.class)));
when(searchContext.getQueryShardContext()).thenReturn(queryShardContext);
}
QueryShardContext queryShardContext = queryShardContextMock(fieldTypes, indexSettings, circuitBreakerService);
when(searchContext.getQueryShardContext()).thenReturn(queryShardContext);
@SuppressWarnings("unchecked")
A aggregator = (A) aggregationBuilder.build(searchContext, null).create(null, true);
@ -145,6 +140,20 @@ public abstract class AggregatorTestCase extends ESTestCase {
return mock(MapperService.class);
}
/**
* sub-tests that need a more complex mock can overwrite this
*/
protected QueryShardContext queryShardContextMock(MappedFieldType[] fieldTypes, IndexSettings indexSettings,
CircuitBreakerService circuitBreakerService) {
QueryShardContext queryShardContext = mock(QueryShardContext.class);
for (MappedFieldType fieldType : fieldTypes) {
when(queryShardContext.fieldMapper(fieldType.name())).thenReturn(fieldType);
when(queryShardContext.getForField(fieldType)).then(invocation -> fieldType.fielddataBuilder().build(indexSettings, fieldType,
new IndexFieldDataCache.None(), circuitBreakerService, mock(MapperService.class)));
}
return queryShardContext;
}
protected <A extends InternalAggregation, C extends Aggregator> A search(IndexSearcher searcher,
Query query,
AggregationBuilder builder,

View File

@ -0,0 +1,67 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.WrapperQueryBuilder;
import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregator;
import org.elasticsearch.search.aggregations.bucket.filters.InternalFilters;
import org.elasticsearch.test.ESSingleNodeTestCase;
import java.io.IOException;
public class FiltersAggsRewriteIT extends ESSingleNodeTestCase {
public void testWrapperQueryIsRewritten() throws IOException {
createIndex("test", Settings.EMPTY, "test", "title", "type=text");
client().prepareIndex("test", "test", "1").setSource("title", "foo bar baz").get();
client().prepareIndex("test", "test", "2").setSource("title", "foo foo foo").get();
client().prepareIndex("test", "test", "3").setSource("title", "bar baz bax").get();
client().admin().indices().prepareRefresh("test").get();
XContentType xContentType = randomFrom(XContentType.values());
BytesReference bytesReference;
try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType)) {
builder.startObject();
{
builder.startObject("terms");
{
builder.array("title", "foo");
}
builder.endObject();
}
builder.endObject();
bytesReference = builder.bytes();
}
FiltersAggregationBuilder builder = new FiltersAggregationBuilder("titles", new FiltersAggregator.KeyedFilter("titleterms",
new WrapperQueryBuilder(bytesReference)));
SearchResponse searchResponse = client().prepareSearch("test").setSize(0).addAggregation(builder).get();
assertEquals(3, searchResponse.getHits().getTotalHits());
InternalFilters filters = searchResponse.getAggregations().get("titles");
assertEquals(1, filters.getBuckets().size());
assertEquals(2, filters.getBuckets().get(0).getDocCount());
}
}

View File

@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.test.AbstractWireSerializingTestCase;
@ -36,9 +37,7 @@ public abstract class InternalAggregationTestCase<T extends InternalAggregation>
private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(
new SearchModule(Settings.EMPTY, false, emptyList()).getNamedWriteables());
protected abstract T createTestInstance(String name,
List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData);
protected abstract T createTestInstance(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData);
/** Return an instance on an unmapped field. */
protected T createUnmappedInstance(String name,
@ -57,21 +56,30 @@ public abstract class InternalAggregationTestCase<T extends InternalAggregation>
inputs.add(t);
toReduce.add(t);
}
if (randomBoolean()) {
// we leave at least one in the list
List<InternalAggregation> internalAggregations = randomSubsetOf(randomIntBetween(1, toReduceSize), toReduce);
InternalAggregation.ReduceContext context = new InternalAggregation.ReduceContext(null, null, false);
ScriptService mockScriptService = mockScriptService();
if (randomBoolean() && toReduce.size() > 1) {
// we leave at least the first element in the list
List<InternalAggregation> internalAggregations = randomSubsetOf(randomIntBetween(1, toReduceSize - 1),
toReduce.subList(1, toReduceSize));
InternalAggregation.ReduceContext context = new InternalAggregation.ReduceContext(null, mockScriptService, false);
@SuppressWarnings("unchecked")
T reduced = (T) inputs.get(0).reduce(internalAggregations, context);
toReduce.removeAll(internalAggregations);
toReduce.add(reduced);
}
InternalAggregation.ReduceContext context = new InternalAggregation.ReduceContext(null, null, true);
InternalAggregation.ReduceContext context = new InternalAggregation.ReduceContext(null, mockScriptService, true);
@SuppressWarnings("unchecked")
T reduced = (T) inputs.get(0).reduce(toReduce, context);
assertReduced(reduced, inputs);
}
/**
* overwrite in tests that need it
*/
protected ScriptService mockScriptService() {
return null;
}
protected abstract void assertReduced(T reduced, List<T> inputs);
@Override

Some files were not shown because too many files have changed in this diff Show More