[TEST] Add basic Backwards Compatibility Tests

This commit add a basic infrastructure as well as primitive tests
to ensure version backwards compatibility between the current
development trunk and an arbitrary previous version. The compatibility
tests are simple unit tests derived from a base class that starts
and manages nodes from a provided elasticsearch release package.

Use the following commandline executes all backwards compatiblity tests
in isolation:

```
mvn test -Dtests.bwc=true -Dtests.bwc.version=1.2.1 -Dtests.class=org.elasticsearch.bwcompat.*
```

These tests run basic checks like rolling upgrades and
routing/searching/get etc. against the specified version. The version
must be present in the `./backwards` folder as
`./backwards/elasticsearch-x.y.z`
This commit is contained in:
Simon Willnauer 2014-05-14 17:52:02 +02:00
parent 93b56eb004
commit 4dfa822e1b
17 changed files with 1518 additions and 447 deletions

1
.gitignore vendored
View File

@ -12,6 +12,7 @@ target/
docs/html/
docs/build.log
/tmp/
backwards/
## eclipse ignores (use 'mvn eclipse:eclipse' to build eclipse projects)
## The only configuration files which are not ignored are certain files in

View File

@ -176,6 +176,24 @@ even if tests are passing.
mvn test -Dtests.output=always
------------------------------
== Backwards Compatibility Tests
Running backwards compatibility tests is disabled by default since it
requires a release version of elasticsearch to be present on the test system.
To run backwards compatibiilty tests untar or unzip a release and run the tests
with the following command:
---------------------------------------------------------------------------
mvn test -Dtests.bwc=true -Dtests.bwc.version=x.y.z -Dtests.bwc.path=/path/to/elasticsearch
---------------------------------------------------------------------------
If the elasticsearch release is placed under `./backwards/elasticsearch-x.y.z` the path
can be omitted:
---------------------------------------------------------------------------
mvn test -Dtests.bwc=true -Dtests.bwc.version=x.y.z
---------------------------------------------------------------------------
== Testing the REST layer
The available integration tests make use of the java API to communicate with
@ -213,3 +231,5 @@ cluster by specifying the `tests.cluster` property, which if present needs to co
comma separated list of nodes to connect to (e.g. localhost:9300). A transport client will
be created based on that and used for all the before|after test operations, and to extract
the http addresses of the nodes so that REST requests can be sent to them.

View File

@ -36,6 +36,7 @@
<tests.shuffle>true</tests.shuffle>
<tests.output>onerror</tests.output>
<tests.client.ratio></tests.client.ratio>
<tests.bwc.path>${project.basedir}/backwards</tests.bwc.path>
<es.logger.level>INFO</es.logger.level>
<tests.heap.size>512m</tests.heap.size>
<tests.topn>5</tests.topn>
@ -468,6 +469,9 @@
<systemProperties>
<java.io.tmpdir>.</java.io.tmpdir> <!-- we use '.' since this is different per JVM-->
<!-- RandomizedTesting library system properties -->
<tests.bwc>${tests.bwc}</tests.bwc>
<tests.bwc.path>${tests.bwc.path}</tests.bwc.path>
<tests.bwc.version>${tests.bwc.version}</tests.bwc.version>
<tests.jvm.argline>${tests.jvm.argline}</tests.jvm.argline>
<tests.processors>${tests.processors}</tests.processors>
<tests.appendseed>${tests.appendseed}</tests.appendseed>

View File

@ -36,6 +36,8 @@ public class DiscoveryModule extends AbstractModule implements SpawnModules {
private final Settings settings;
public static final String DISCOVERY_TYPE_KEY = "discovery.type";
public DiscoveryModule(Settings settings) {
this.settings = settings;
}
@ -48,7 +50,7 @@ public class DiscoveryModule extends AbstractModule implements SpawnModules {
} else {
defaultDiscoveryModule = ZenDiscoveryModule.class;
}
return ImmutableList.of(Modules.createModule(settings.getAsClass("discovery.type", defaultDiscoveryModule, "org.elasticsearch.discovery.", "DiscoveryModule"), settings));
return ImmutableList.of(Modules.createModule(settings.getAsClass(DISCOVERY_TYPE_KEY, defaultDiscoveryModule, "org.elasticsearch.discovery.", "DiscoveryModule"), settings));
}
@Override

View File

@ -67,6 +67,32 @@ import java.util.logging.Logger;
// NOTE: this class is in o.a.lucene.util since it uses some classes that are related
// to the test framework that didn't make sense to copy but are package private access
public abstract class AbstractRandomizedTest extends RandomizedTest {
/**
* Annotation for backwards compat tests
*/
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
@TestGroup(enabled = false, sysProperty = TESTS_BACKWARDS_COMPATIBILITY)
public @interface BackwardsCompatibilityTest {
}
/**
* Key used to set the path for the elasticsearch executable used to run backwards compatibility tests from
* via the commandline -D{@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}
*/
public static final String TESTS_BACKWARDS_COMPATIBILITY = "tests.bwc";
public static final String TESTS_BACKWARDS_COMPATIBILITY_VERSION = "tests.bwc.version";
/**
* Key used to set the path for the elasticsearch executable used to run backwards compatibility tests from
* via the commandline -D{@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}
*/
public static final String TESTS_BACKWARDS_COMPATIBILITY_PATH = "tests.bwc.path";
/**
* Annotation for integration tests
*/

View File

@ -0,0 +1,105 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bwcompat;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.junit.Test;
import java.io.IOException;
import java.util.List;
import java.util.Locale;
import java.util.concurrent.ExecutionException;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
/**
*/
@ElasticsearchIntegrationTest.ClusterScope(numDataNodes = 0, scope = ElasticsearchIntegrationTest.Scope.SUITE, numClientNodes = 0, transportClientRatio = 0.0)
public class BasicAnalysisBackwardCompatibilityTests extends ElasticsearchBackwardsCompatIntegrationTest {
/**
* Simple upgrade test for analyzers to make sure they analyze to the same tokens after upgrade
* TODO we need this for random tokenizers / tokenfilters as well
*/
@Test
public void testAnalyzerTokensAfterUpgrade() throws IOException, ExecutionException, InterruptedException {
int numFields = randomIntBetween(PreBuiltAnalyzers.values().length, PreBuiltAnalyzers.values().length * 10);
StringBuilder builder = new StringBuilder();
String[] fields = new String[numFields * 2];
int fieldId = 0;
for (int i = 0; i < fields.length; i++) {
fields[i++] = "field_" + fieldId++;
String analyzer = RandomPicks.randomFrom(getRandom(), PreBuiltAnalyzers.values()).name().toLowerCase(Locale.ROOT);
fields[i] = "type=string,analyzer=" + analyzer;
}
assertAcked(prepareCreate("test")
.addMapping("type", fields)
.setSettings(indexSettings()));
ensureYellow();
InputOutput[] inout = new InputOutput[numFields];
for (int i = 0; i < numFields; i++) {
String input = randomRealisticUnicodeOfCodepointLengthBetween(1, 100);
AnalyzeResponse test = client().admin().indices().prepareAnalyze("test", input).setField("field_" + i).get();
inout[i] = new InputOutput(test, input, "field_" + i);
}
logClusterState();
boolean upgraded;
do {
logClusterState();
upgraded = backwardsCluster().upgradeOneNode();
ensureYellow();
} while (upgraded);
for (int i = 0; i < inout.length; i++) {
InputOutput inputOutput = inout[i];
AnalyzeResponse test = client().admin().indices().prepareAnalyze("test", inputOutput.input).setField(inputOutput.field).get();
List<AnalyzeResponse.AnalyzeToken> tokens = test.getTokens();
List<AnalyzeResponse.AnalyzeToken> expectedTokens = inputOutput.response.getTokens();
assertThat("size mismatch field: " + fields[i*2] + " analyzer: " + fields[i*2 + 1] + " input: " + inputOutput.input, expectedTokens.size(), equalTo(tokens.size()));
for (int j = 0; j < tokens.size(); j++) {
String msg = "failed for term: " + expectedTokens.get(j).getTerm() + " field: " + fields[i*2] + " analyzer: " + fields[i*2 + 1] + " input: " + inputOutput.input;
assertThat(msg, expectedTokens.get(j).getTerm(), equalTo(tokens.get(j).getTerm()));
assertThat(msg, expectedTokens.get(j).getPosition(), equalTo(tokens.get(j).getPosition()));
assertThat(msg, expectedTokens.get(j).getStartOffset(), equalTo(tokens.get(j).getStartOffset()));
assertThat(msg, expectedTokens.get(j).getEndOffset(), equalTo(tokens.get(j).getEndOffset()));
assertThat(msg, expectedTokens.get(j).getType(), equalTo(tokens.get(j).getType()));
}
}
}
private static final class InputOutput {
final AnalyzeResponse response;
final String input;
final String field;
public InputOutput(AnalyzeResponse response, String input, String field) {
this.response = response;
this.input = input;
this.field = field;
}
}
}

View File

@ -0,0 +1,266 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bwcompat;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.apache.lucene.util.English;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
import org.elasticsearch.action.count.CountResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
/**
*/
public class BasicBackwardsCompatibilityTest extends ElasticsearchBackwardsCompatIntegrationTest {
/**
* Basic test using Index & Realtime Get with external versioning. This test ensures routing works correctly across versions.
*/
@Test
public void testExternalVersion() throws Exception {
createIndex("test");
final boolean routing = randomBoolean();
int numDocs = randomIntBetween(10, 20);
for (int i = 0; i < numDocs; i++) {
String id = Integer.toString(i);
String routingKey = routing ? randomRealisticUnicodeOfLength(10) : null;
client().prepareIndex("test", "type1", id).setRouting(routingKey).setVersion(1).setVersionType(VersionType.EXTERNAL).setSource("field1", English.intToEnglish(i)).get();
GetResponse get = client().prepareGet("test", "type1", id).setRouting(routingKey).get();
assertThat("Document with ID " +id + " should exist but doesn't", get.isExists(), is(true));
assertThat(get.getVersion(), equalTo(1l));
client().prepareIndex("test", "type1", id).setRouting(routingKey).setVersion(2).setVersionType(VersionType.EXTERNAL).setSource("field1", English.intToEnglish(i)).get();
get = client().prepareGet("test", "type1", id).setRouting(routingKey).get();
assertThat("Document with ID " +id + " should exist but doesn't", get.isExists(), is(true));
assertThat(get.getVersion(), equalTo(2l));
}
}
/**
* Basic test using Index & Realtime Get with internal versioning. This test ensures routing works correctly across versions.
*/
@Test
public void testInternalVersion() throws Exception {
createIndex("test");
final boolean routing = randomBoolean();
int numDocs = randomIntBetween(10, 20);
for (int i = 0; i < numDocs; i++) {
String routingKey = routing ? randomRealisticUnicodeOfLength(10) : null;
String id = Integer.toString(i);
assertThat(id, client().prepareIndex("test", "type1", id).setRouting(routingKey).setSource("field1", English.intToEnglish(i)).get().isCreated(), is(true));
GetResponse get = client().prepareGet("test", "type1", id).setRouting(routingKey).get();
assertThat("Document with ID " +id + " should exist but doesn't", get.isExists(), is(true));
assertThat(get.getVersion(), equalTo(1l));
client().prepareIndex("test", "type1", id).setRouting(routingKey).setSource("field1", English.intToEnglish(i)).execute().actionGet();
get = client().prepareGet("test", "type1", id).setRouting(routingKey).get();
assertThat("Document with ID " +id + " should exist but doesn't", get.isExists(), is(true));
assertThat(get.getVersion(), equalTo(2l));
}
}
/**
* Very basic bw compat test with a mixed version cluster random indexing and lookup by ID via term query
*/
@Test
public void testIndexAndSearch() throws Exception {
createIndex("test");
int numDocs = randomIntBetween(10, 20);
List<IndexRequestBuilder> builder = new ArrayList<>();
for (int i = 0; i < numDocs; i++) {
String id = Integer.toString(i);
builder.add(client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(i), "the_id", id));
}
indexRandom(true, builder);
for (int i = 0; i < numDocs; i++) {
String id = Integer.toString(i);
assertHitCount(client().prepareSearch().setQuery(QueryBuilders.termQuery("the_id", id)).get(), 1);
}
}
@Test
public void testRecoverFromPreviousVersion() throws ExecutionException, InterruptedException {
assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings())));
ensureYellow();
assertAllShardsOnNodes("test", backwardsCluster().backwardsNodePattern());
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", randomRealisticUnicodeOfLength(10) + String.valueOf(i)).setSource("field1", English.intToEnglish(i));
}
indexRandom(true, docs);
CountResponse countResponse = client().prepareCount().get();
assertHitCount(countResponse, numDocs);
backwardsCluster().allowOnlyNewNodes("test");
ensureYellow("test");// move all shards to the new node
final int numIters = randomIntBetween(10, 20);
for (int i = 0; i < numIters; i++) {
countResponse = client().prepareCount().get();
assertHitCount(countResponse, numDocs);
}
}
/**
* Test that ensures that we will never recover from a newer to an older version (we are not forward compatible)
*/
@Test
public void testNoRecoveryFromNewNodes() throws ExecutionException, InterruptedException {
assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().backwardsNodePattern()).put(indexSettings())));
if (backwardsCluster().numNewDataNodes() == 0) {
backwardsCluster().startNewNode();
}
ensureYellow();
assertAllShardsOnNodes("test", backwardsCluster().newNodePattern());
if (randomBoolean()) {
backwardsCluster().allowOnAllNodes("test");
}
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", randomRealisticUnicodeOfLength(10) + String.valueOf(i)).setSource("field1", English.intToEnglish(i));
}
indexRandom(true, docs);
backwardsCluster().allowOnAllNodes("test");
while(ensureYellow() != ClusterHealthStatus.GREEN) {
backwardsCluster().startNewNode();
}
assertAllShardsOnNodes("test", backwardsCluster().newNodePattern());
CountResponse countResponse = client().prepareCount().get();
assertHitCount(countResponse, numDocs);
final int numIters = randomIntBetween(10, 20);
for (int i = 0; i < numIters; i++) {
countResponse = client().prepareCount().get();
assertHitCount(countResponse, numDocs);
}
}
public void assertAllShardsOnNodes(String index, String pattern) {
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
if (shardRouting.currentNodeId() != null && index.equals(shardRouting.getIndex())) {
String name = clusterState.nodes().get(shardRouting.currentNodeId()).name();
assertThat("Allocated on new node: " + name, Regex.simpleMatch(pattern, name), is(true));
}
}
}
}
}
/**
* Upgrades a single node to the current version
*/
@Test
public void testIndexUpgradeSingleNode() throws Exception {
assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings())));
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i));
}
indexRandom(true, docs);
assertAllShardsOnNodes("test", backwardsCluster().backwardsNodePattern());
client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none")).get();
backwardsCluster().allowOnAllNodes("test");
CountResponse countResponse = client().prepareCount().get();
assertHitCount(countResponse, numDocs);
backwardsCluster().upgradeOneNode();
ensureYellow("test");
if (randomBoolean()) {
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i));
}
indexRandom(true, docs);
}
client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all")).get();
final int numIters = randomIntBetween(10, 20);
for (int i = 0; i < numIters; i++) {
countResponse = client().prepareCount().get();
assertHitCount(countResponse, numDocs);
}
}
/**
* Test that allocates an index on one or more old nodes and then do a rolling upgrade
* one node after another is shut down and restarted from a newer version and we verify
* that all documents are still around after each nodes upgrade.
*/
@Test
public void testIndexRollingUpgrade() throws Exception {
String[] indices = new String[randomIntBetween(1,3)];
for (int i = 0; i < indices.length; i++) {
indices[i] = "test" + i;
assertAcked(prepareCreate(indices[i]).setSettings(ImmutableSettings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings())));
}
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
String[] indexForDoc = new String[docs.length];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex(indexForDoc[i] = RandomPicks.randomFrom(getRandom(), indices), "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i));
}
indexRandom(true, docs);
for (int i = 0; i < indices.length; i++) {
assertAllShardsOnNodes(indices[i], backwardsCluster().backwardsNodePattern());
}
client().admin().indices().prepareUpdateSettings(indices).setSettings(ImmutableSettings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none")).get();
backwardsCluster().allowOnAllNodes(indices);
logClusterState();
boolean upgraded;
do {
logClusterState();
CountResponse countResponse = client().prepareCount().get();
assertHitCount(countResponse, numDocs);
upgraded = backwardsCluster().upgradeOneNode();
ensureYellow();
countResponse = client().prepareCount().get();
assertHitCount(countResponse, numDocs);
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex(indexForDoc[i], "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i));
}
indexRandom(true, docs);
} while (upgraded);
client().admin().indices().prepareUpdateSettings(indices).setSettings(ImmutableSettings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all")).get();
CountResponse countResponse = client().prepareCount().get();
assertHitCount(countResponse, numDocs);
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.get;
import org.apache.lucene.util.English;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
@ -33,6 +34,7 @@ import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.junit.Test;
@ -156,11 +158,6 @@ public class GetActionTests extends ElasticsearchIntegrationTest {
@Test
public void simpleMultiGetTests() throws Exception {
try {
client().admin().indices().prepareDelete("test").execute().actionGet();
} catch (Exception e) {
// fine
}
client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1)).execute().actionGet();
ensureGreen();

View File

@ -0,0 +1,235 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test;
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Iterators;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import com.google.common.base.Predicate;
import com.google.common.collect.Collections2;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.internal.InternalClient;
import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.client.FilterClient;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.Random;
/**
* A test cluster implementation that holds a fixed set of external nodes as well as a TestCluster
* which is used to run mixed version clusters in tests like backwards compatibility tests.
* Note: this is an experimental API
*/
public class CompositeTestCluster extends ImmutableTestCluster {
private final TestCluster cluster;
private final ExternalNode[] externalNodes;
private final ExternalClient client = new ExternalClient();
private static final String NODE_PREFIX = "external_";
public CompositeTestCluster(TestCluster cluster, int numExternalNodes, ExternalNode externalNode) throws IOException {
this.cluster = cluster;
this.externalNodes = new ExternalNode[numExternalNodes];
for (int i = 0; i < externalNodes.length; i++) {
externalNodes[i] = externalNode;
}
}
@Override
public synchronized void afterTest() {
cluster.afterTest();
}
@Override
public synchronized void beforeTest(Random random, double transportClientRatio) throws IOException {
super.beforeTest(random, transportClientRatio);
cluster.beforeTest(random, transportClientRatio);
Settings defaultSettings = cluster.getDefaultSettings();
final Client client = cluster.size() > 0 ? cluster.client() : cluster.clientNodeClient();
for (int i = 0; i < externalNodes.length; i++) {
if (!externalNodes[i].running()) {
try {
externalNodes[i] = externalNodes[i].start(client, defaultSettings, NODE_PREFIX + i, cluster.getClusterName());
} catch (InterruptedException e) {
Thread.interrupted();
return;
}
}
externalNodes[i].reset(random.nextLong());
}
}
private Collection<ExternalNode> runningNodes() {
return Collections2.filter(Arrays.asList(externalNodes), new Predicate<ExternalNode>() {
@Override
public boolean apply(ExternalNode input) {
return input.running();
}
});
}
/**
* Upgrades one external running node to a node from the version running the tests. Commonly this is used
* to move from a node with version N-1 to a node running version N. This works seamless since they will
* share the same data directory. This method will return <tt>true</tt> iff a node got upgraded otherwise if no
* external node is running it returns <tt>false</tt>
*/
public synchronized boolean upgradeOneNode() throws InterruptedException, IOException {
return upgradeOneNode(ImmutableSettings.EMPTY);
}
/**
* Upgrades one external running node to a node from the version running the tests. Commonly this is used
* to move from a node with version N-1 to a node running version N. This works seamless since they will
* share the same data directory. This method will return <tt>true</tt> iff a node got upgraded otherwise if no
* external node is running it returns <tt>false</tt>
*/
public synchronized boolean upgradeOneNode(Settings nodeSettings) throws InterruptedException, IOException {
Collection<ExternalNode> runningNodes = runningNodes();
if (!runningNodes.isEmpty()) {
ExternalNode externalNode = RandomPicks.randomFrom(random, runningNodes);
externalNode.stop();
String s = cluster.startNode(nodeSettings);
ExternalNode.waitForNode(cluster.client(), s);
return true;
}
return false;
}
/**
* Returns the a simple pattern that matches all "new" nodes in the cluster.
*/
public String newNodePattern() {
return cluster.nodePrefix() + "*";
}
/**
* Returns the a simple pattern that matches all "old" / "backwardss" nodes in the cluster.
*/
public String backwardsNodePattern() {
return NODE_PREFIX + "*";
}
/**
* Allows allocation of shards of the given indices on all nodes in the cluster.
*/
public void allowOnAllNodes(String... index) {
Settings build = ImmutableSettings.builder().put("index.routing.allocation.exclude._name", "").build();
client().admin().indices().prepareUpdateSettings(index).setSettings(build).execute().actionGet();
}
/**
* Allows allocation of shards of the given indices only on "new" nodes in the cluster.
* Note: if a shard is allocated on an "old" node and can't be allocated on a "new" node it will only be removed it can
* be allocated on some other "new" node.
*/
public void allowOnlyNewNodes(String... index) {
Settings build = ImmutableSettings.builder().put("index.routing.allocation.exclude._name", backwardsNodePattern()).build();
client().admin().indices().prepareUpdateSettings(index).setSettings(build).execute().actionGet();
}
/**
* Starts a current version data node
*/
public void startNewNode() {
cluster.startNode();
}
@Override
public synchronized Client client() {
return client;
}
@Override
public synchronized int size() {
return runningNodes().size() + cluster.size();
}
@Override
public int numDataNodes() {
return runningNodes().size() + cluster.numDataNodes();
}
@Override
public int numBenchNodes() {
return cluster.numBenchNodes();
}
@Override
public InetSocketAddress[] httpAddresses() {
return cluster.httpAddresses();
}
@Override
public void close() throws IOException {
try {
IOUtils.close(externalNodes);
} finally {
IOUtils.close(cluster);
}
}
@Override
public boolean hasFilterCache() {
return true;
}
@Override
public synchronized Iterator<Client> iterator() {
return Iterators.singletonIterator(client());
}
/**
* Returns the number of current version data nodes in the cluster
*/
public int numNewDataNodes() {
return cluster.numDataNodes();
}
/**
* Returns the number of former version data nodes in the cluster
*/
public int numBackwardsDataNodes() {
return runningNodes().size();
}
private synchronized InternalClient internalClient() {
Collection<ExternalNode> externalNodes = runningNodes();
return random.nextBoolean() && !externalNodes.isEmpty() ? (InternalClient) RandomPicks.randomFrom(random, externalNodes).getClient() : (InternalClient) cluster.client();
}
private final class ExternalClient extends FilterClient {
public ExternalClient() {
super(null);
}
@Override
protected InternalClient delegate() {
return internalClient();
}
}
}

View File

@ -0,0 +1,118 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test;
import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.zen.ZenDiscoveryModule;
import org.elasticsearch.transport.TransportModule;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.netty.NettyTransportModule;
import org.junit.Ignore;
import java.io.File;
import java.io.IOException;
/**
* Abstract base class for backwards compatibility tests. Subclasses of this class
* can run tests against a mixed version cluster. A subset of the nodes in the cluster
* are started in dedicated process running off a full fledged elasticsearch release.
* Nodes can be "upgraded" from the "backwards" node to an "new" node where "new" nodes
* version corresponds to current version.
* The purpose of this test class is to run tests in scenarios where clusters are in an
* intermediate state during a rolling upgrade as well as upgrade situations. The clients
* accessed via #client() are random clients to the nodes in the cluster which might
* execute requests on the "new" as well as the "old" nodes.
* <p>
* Note: this base class is still experimental and might have bugs or leave external processes running behind.
* </p>
* Backwards compatibility tests are disabled by default via {@link BackwardsCompatibilityTest} annotation.
* The following system variables control the test execution:
* <ul>
* <li>
* <tt>{@value #TESTS_BACKWARDS_COMPATIBILITY}</tt> enables / disables
* tests annotated with {@link BackwardsCompatibilityTest} (defaults to
* <tt>false</tt>)
* </li>
* <li>
* <tt>{@value #TESTS_BACKWARDS_COMPATIBILITY_VERSION}</tt>
* sets the version to run the external nodes from formatted as <i>X.Y.Z</i>.
* The tests class will try to locate a release folder <i>elasticsearch-X.Y.Z</i>
* within path passed via {@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}
* depending on this system variable.
* </li>
* <li>
* <tt>{@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}</tt> the path to the
* elasticsearch releases to run backwards compatibility tests against.
* </li>
* </ul>
*
*/
// the transportClientRatio is tricky here since we don't fully control the cluster nodes
@ElasticsearchBackwardsCompatIntegrationTest.BackwardsCompatibilityTest
@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 0, maxNumDataNodes = 2, scope = ElasticsearchIntegrationTest.Scope.SUITE, numClientNodes = 0, transportClientRatio = 0.0)
@Ignore
public abstract class ElasticsearchBackwardsCompatIntegrationTest extends ElasticsearchIntegrationTest {
private static File backwardsCompatibilityPath() {
String path = System.getProperty(TESTS_BACKWARDS_COMPATIBILITY_PATH);
String version = System.getProperty(TESTS_BACKWARDS_COMPATIBILITY_VERSION);
if (path == null || path.isEmpty() || version == null || version.isEmpty()) {
throw new IllegalArgumentException("Invalid Backwards tests location path:" + path + " version: " + version);
}
File file = new File(path, "elasticsearch-" + version);
if (!file.isDirectory()) {
throw new IllegalArgumentException("Backwards tests location is not a directory: " + file.getAbsolutePath());
}
return file;
}
public CompositeTestCluster backwardsCluster() {
return (CompositeTestCluster) immutableCluster();
}
protected ImmutableTestCluster buildTestCluster(Scope scope) throws IOException {
ImmutableTestCluster cluster = super.buildTestCluster(scope);
return new CompositeTestCluster((TestCluster) cluster, between(minExternalNodes(), maxExternalNodes()), new ExternalNode(backwardsCompatibilityPath(), randomLong()));
}
protected int minExternalNodes() {
return 1;
}
protected int maxExternalNodes() {
return 2;
}
@Override
protected int maximumNumberOfReplicas() {
return 1;
}
protected Settings nodeSettings(int nodeOrdinal) {
return ImmutableSettings.builder()
.put(TransportModule.TRANSPORT_TYPE_KEY, NettyTransportModule.class) // run same transport / disco as external
.put(DiscoveryModule.DISCOVERY_TYPE_KEY, ZenDiscoveryModule.class)
.put("gateway.type", "local") // we require local gateway to mimic upgrades of nodes
.put("discovery.type", "zen") // zen is needed since we start external nodes
.put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, TransportService.class.getName())
.build();
}
}

View File

@ -27,6 +27,7 @@ import com.google.common.base.Predicate;
import com.google.common.collect.Lists;
import org.apache.lucene.store.StoreRateLimiting;
import org.apache.lucene.util.AbstractRandomizedTest;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
@ -100,9 +101,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.test.TestCluster.clusterName;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
import static org.hamcrest.Matchers.emptyIterable;
import static org.hamcrest.Matchers.equalTo;
@ -489,11 +488,9 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
return testCluster;
}
private void clearClusters() throws IOException {
private static void clearClusters() throws IOException {
if (!clusters.isEmpty()) {
for (ImmutableTestCluster cluster : clusters.values()) {
cluster.close();
}
IOUtils.close(clusters.values());
clusters.clear();
}
}
@ -595,7 +592,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
}
protected int maximumNumberOfReplicas() {
return immutableCluster().numDataNodes() - 1;
return Math.max(0, immutableCluster().numDataNodes() - 1);
}
protected int numberOfReplicas() {
@ -845,7 +842,15 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
return actionGet.getStatus();
}
private void ensureClusterSizeConsistency() {
/**
* Prints the current cluster state as info logging.
*/
public void logClusterState() {
logger.debug("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
}
void ensureClusterSizeConsistency() {
logger.trace("Check consistency for [{}] nodes", immutableCluster().size());
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(immutableCluster().size())).get());
}
@ -1307,7 +1312,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
return ImmutableSettings.EMPTY;
}
private TestCluster buildTestCluster(Scope scope) {
protected ImmutableTestCluster buildTestCluster(Scope scope) throws IOException {
long currentClusterSeed = randomLong();
NodeSettingsSource nodeSettingsSource = new NodeSettingsSource() {
@ -1426,6 +1431,8 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
} finally {
INSTANCE = null;
}
} else {
clearClusters();
}
}

View File

@ -0,0 +1,224 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test;
import com.google.common.base.Predicate;
import org.apache.lucene.util.Constants;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Random;
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
/**
* Simple helper class to start external nodes to be used within a test cluster
*/
final class ExternalNode implements Closeable {
private final File path;
private final Random random;
private Process process;
private NodeInfo nodeInfo;
private final String clusterName;
private TransportClient client;
ExternalNode(File path, long seed) {
this(path, null, seed);
}
ExternalNode(File path, String clusterName, long seed) {
if (!path.isDirectory()) {
throw new IllegalArgumentException("path must be a directory");
}
this.path = path;
this.clusterName = clusterName;
this.random = new Random(seed);
}
synchronized ExternalNode start(Client localNode, Settings settings, String nodeName, String clusterName) throws IOException, InterruptedException {
ExternalNode externalNode = new ExternalNode(path, clusterName, random.nextLong());
externalNode.startInternal(localNode, settings, nodeName, clusterName);
return externalNode;
}
synchronized void startInternal(Client client, Settings settings, String nodeName, String clusterName) throws IOException, InterruptedException {
if (process != null) {
throw new IllegalStateException("Already started");
}
List<String> params = new ArrayList<>();
if (!Constants.WINDOWS) {
params.add("bin/elasticsearch");
} else {
params.add("bin/elasticsearch.bat");
}
params.add("-Des.cluster.name=" + clusterName);
params.add("-Des.node.name=" + nodeName);
for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
switch (entry.getKey()) {
case "cluster.name":
case "node.name":
case "path.home":
case "node.mode":
case "gateway.type":
continue;
default:
params.add("-Des." + entry.getKey() + "=" + entry.getValue());
}
}
params.add("-Des.gateway.type=local");
params.add("-Des.path.home=" + new File("").getAbsolutePath());
ProcessBuilder builder = new ProcessBuilder(params);
builder.directory(path);
builder.inheritIO();
boolean success = false;
try {
process = builder.start();
this.nodeInfo = null;
if (waitForNode(client, nodeName)) {
nodeInfo = nodeInfo(client, nodeName);
assert nodeInfo != null;
} else {
throw new IllegalStateException("Node [" + nodeName + "] didn't join the cluster");
}
success = true;
} finally {
if (!success) {
stop();
}
}
}
static boolean waitForNode(final Client client, final String name) throws InterruptedException {
return ElasticsearchTestCase.awaitBusy(new Predicate<Object>() {
@Override
public boolean apply(java.lang.Object input) {
final NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().get();
final NodeInfo[] nodes = nodeInfos.getNodes();
for (NodeInfo info : nodes) {
if (name.equals(info.getNode().getName())) {
return true;
}
}
return false;
}
});
}
static NodeInfo nodeInfo(final Client client, final String nodeName) {
final NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().get();
final NodeInfo[] nodes = nodeInfos.getNodes();
for (NodeInfo info : nodes) {
if (nodeName.equals(info.getNode().getName())) {
return info;
}
}
return null;
}
synchronized TransportAddress getTransportAddress() {
if (nodeInfo == null) {
throw new IllegalStateException("Node has not started yet");
}
return nodeInfo.getTransport().getAddress().publishAddress();
}
TransportAddress address() {
if (nodeInfo == null) {
throw new IllegalStateException("Node has not started yet");
}
return nodeInfo.getTransport().getAddress().publishAddress();
}
synchronized Client getClient() {
if (nodeInfo == null) {
throw new IllegalStateException("Node has not started yet");
}
if (client == null) {
TransportAddress addr = nodeInfo.getTransport().getAddress().publishAddress();
TransportClient client = new TransportClient(settingsBuilder().put("client.transport.nodes_sampler_interval", "1s")
.put("name", "transport_client_" + nodeInfo.getNode().name())
.put(ClusterName.SETTING, clusterName).put("client.transport.sniff", false).build());
client.addTransportAddress(addr);
this.client = client;
}
return client;
}
synchronized void reset(long seed) {
this.random.setSeed(seed);
}
synchronized void stop() {
if (running()) {
try {
if (nodeInfo != null && random.nextBoolean()) {
// sometimes shut down gracefully
getClient().admin().cluster().prepareNodesShutdown(this.nodeInfo.getNode().id()).setExit(random.nextBoolean()).setDelay("0s").get();
}
if (this.client != null) {
client.close();
}
} finally {
process.destroy();
try {
process.waitFor();
} catch (InterruptedException e) {
Thread.interrupted();
}
process = null;
nodeInfo = null;
}
}
}
synchronized boolean running() {
return process != null;
}
@Override
public void close() {
stop();
}
synchronized String getName() {
if (nodeInfo == null) {
throw new IllegalStateException("Node has not started yet");
}
return nodeInfo.getNode().getName();
}
}

View File

@ -32,6 +32,8 @@ import org.elasticsearch.indices.IndexMissingException;
import org.elasticsearch.indices.IndexTemplateMissingException;
import org.elasticsearch.repositories.RepositoryMissingException;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Random;
@ -43,9 +45,9 @@ import static org.junit.Assert.assertThat;
* Base test cluster that exposes the basis to run tests against any elasticsearch cluster, whose layout
* (e.g. number of nodes) is predefined and cannot be changed during the tests execution
*/
public abstract class ImmutableTestCluster implements Iterable<Client> {
public abstract class ImmutableTestCluster implements Iterable<Client>, Closeable {
private final ESLogger logger = Loggers.getLogger(getClass());
protected final ESLogger logger = Loggers.getLogger(getClass());
protected Random random;
@ -54,7 +56,7 @@ public abstract class ImmutableTestCluster implements Iterable<Client> {
/**
* This method should be executed before each test to reset the cluster to its initial state.
*/
public void beforeTest(Random random, double transportClientRatio) {
public void beforeTest(Random random, double transportClientRatio) throws IOException {
assert transportClientRatio >= 0.0 && transportClientRatio <= 1.0;
logger.debug("Reset test cluster with transport client ratio: [{}]", transportClientRatio);
this.transportClientRatio = transportClientRatio;
@ -113,7 +115,7 @@ public abstract class ImmutableTestCluster implements Iterable<Client> {
/**
* Closes the current cluster
*/
public abstract void close();
public abstract void close() throws IOException;
/**
* Deletes the given indices from the tests cluster. If no index name is passed to this method
@ -202,4 +204,5 @@ public abstract class ImmutableTestCluster implements Iterable<Client> {
* Return whether or not this cluster can cache filters.
*/
public abstract boolean hasFilterCache();
}

View File

@ -81,6 +81,7 @@ import org.junit.Assert;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.*;
import java.util.concurrent.ExecutorService;
@ -125,6 +126,8 @@ public final class TestCluster extends ImmutableTestCluster {
*/
public static final String SETTING_CLUSTER_NODE_SEED = "test.cluster.node.seed";
private static final String NODE_PREFIX = "node_";
private static final boolean ENABLE_MOCK_MODULES = systemPropertyAsBoolean(TESTS_ENABLE_MOCK_MODULES, true);
static final int DEFAULT_MIN_NUM_DATA_NODES = 2;
@ -225,7 +228,7 @@ public final class TestCluster extends ImmutableTestCluster {
if (numOfDataPaths > 0) {
StringBuilder dataPath = new StringBuilder();
for (int i = 0; i < numOfDataPaths; i++) {
dataPath.append("data/d").append(i).append(',');
dataPath.append(new File("data/d"+i).getAbsolutePath()).append(',');
}
builder.put("path.data", dataPath.toString());
}
@ -273,7 +276,7 @@ public final class TestCluster extends ImmutableTestCluster {
//.put("index.store.type", random.nextInt(10) == 0 ? MockRamIndexStoreModule.class.getName() : MockFSIndexStoreModule.class.getName())
// decrease the routing schedule so new nodes will be added quickly - some random value between 30 and 80 ms
.put("cluster.routing.schedule", (30 + random.nextInt(50)) + "ms")
// default to non gateway
// default to non gateway
.put("gateway.type", "none")
.put(SETTING_CLUSTER_NODE_SEED, seed);
if (ENABLE_MOCK_MODULES && usually(random)) {
@ -462,7 +465,14 @@ public final class TestCluster extends ImmutableTestCluster {
}
private String buildNodeName(int id) {
return "node_" + id;
return NODE_PREFIX + id;
}
/**
* Returns the common node name prefix for this test cluster.
*/
public String nodePrefix() {
return NODE_PREFIX;
}
@Override
@ -518,7 +528,9 @@ public final class TestCluster extends ImmutableTestCluster {
if (randomNodeAndClient != null) {
return randomNodeAndClient.client(random);
}
startNodeClient(ImmutableSettings.EMPTY);
int nodeId = nextNodeId.getAndIncrement();
Settings settings = getSettings(nodeId, random.nextLong(), ImmutableSettings.EMPTY);
startNodeClient(settings);
return getRandomNodeAndClient(new ClientNodePredicate()).client(random);
}
@ -578,7 +590,6 @@ public final class TestCluster extends ImmutableTestCluster {
@Override
public void close() {
ensureOpen();
if (this.open.compareAndSet(true, false)) {
IOUtils.closeWhileHandlingException(nodes.values());
nodes.clear();
@ -698,7 +709,6 @@ public final class TestCluster extends ImmutableTestCluster {
nodeClient = null;
}
node.close();
}
}
@ -751,7 +761,7 @@ public final class TestCluster extends ImmutableTestCluster {
}
@Override
public synchronized void beforeTest(Random random, double transportClientRatio) {
public synchronized void beforeTest(Random random, double transportClientRatio) throws IOException {
super.beforeTest(random, transportClientRatio);
reset(true);
}
@ -1138,7 +1148,12 @@ public final class TestCluster extends ImmutableTestCluster {
public synchronized void startNodeClient(Settings settings) {
ensureOpen(); // currently unused
startNode(settingsBuilder().put(settings).put("node.client", true));
Builder builder = settingsBuilder().put(settings).put("node.client", true).put("node.data", false);
if (size() == 0) {
// if we are the first node - don't wait for a state
builder.put("discovery.initial_state_timeout", 0);
}
startNode(builder);
}
/**
@ -1440,4 +1455,8 @@ public final class TestCluster extends ImmutableTestCluster {
}
}
public Settings getDefaultSettings() {
return defaultSettings;
}
}

View File

@ -0,0 +1,455 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test.client;
import org.elasticsearch.action.*;
import org.elasticsearch.action.bench.*;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.count.CountRequest;
import org.elasticsearch.action.count.CountRequestBuilder;
import org.elasticsearch.action.count.CountResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteRequestBuilder;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest;
import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder;
import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse;
import org.elasticsearch.action.explain.ExplainRequest;
import org.elasticsearch.action.explain.ExplainRequestBuilder;
import org.elasticsearch.action.explain.ExplainResponse;
import org.elasticsearch.action.get.*;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.mlt.MoreLikeThisRequest;
import org.elasticsearch.action.mlt.MoreLikeThisRequestBuilder;
import org.elasticsearch.action.percolate.*;
import org.elasticsearch.action.search.*;
import org.elasticsearch.action.suggest.SuggestRequest;
import org.elasticsearch.action.suggest.SuggestRequestBuilder;
import org.elasticsearch.action.suggest.SuggestResponse;
import org.elasticsearch.action.termvector.*;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateRequestBuilder;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.AdminClient;
import org.elasticsearch.client.internal.InternalClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
/**
*/
public abstract class FilterClient implements InternalClient {
private final InternalClient in;
public FilterClient(InternalClient in) {
this.in = in;
}
protected InternalClient delegate() {
return this.in;
}
@Override
public void close() {
delegate().close();
}
@Override
public AdminClient admin() {
return delegate().admin();
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
Action<Request, Response, RequestBuilder> action, Request request) {
return delegate().execute(action, request);
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
delegate().execute(action, request, listener);
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
Action<Request, Response, RequestBuilder> action) {
return delegate().prepareExecute(action);
}
@Override
public ActionFuture<IndexResponse> index(IndexRequest request) {
return delegate().index(request);
}
@Override
public void index(IndexRequest request, ActionListener<IndexResponse> listener) {
delegate().index(request, listener);
}
@Override
public IndexRequestBuilder prepareIndex() {
return delegate().prepareIndex();
}
@Override
public ActionFuture<UpdateResponse> update(UpdateRequest request) {
return delegate().update(request);
}
@Override
public void update(UpdateRequest request, ActionListener<UpdateResponse> listener) {
delegate().update(request, listener);
}
@Override
public UpdateRequestBuilder prepareUpdate() {
return delegate().prepareUpdate();
}
@Override
public UpdateRequestBuilder prepareUpdate(String index, String type, String id) {
return delegate().prepareUpdate(index, type, id);
}
@Override
public IndexRequestBuilder prepareIndex(String index, String type) {
return delegate().prepareIndex(index, type);
}
@Override
public IndexRequestBuilder prepareIndex(String index, String type, String id) {
return delegate().prepareIndex(index, type, id);
}
@Override
public ActionFuture<DeleteResponse> delete(DeleteRequest request) {
return delegate().delete(request);
}
@Override
public void delete(DeleteRequest request, ActionListener<DeleteResponse> listener) {
delegate().delete(request, listener);
}
@Override
public DeleteRequestBuilder prepareDelete() {
return delegate().prepareDelete();
}
@Override
public DeleteRequestBuilder prepareDelete(String index, String type, String id) {
return delegate().prepareDelete(index, type, id);
}
@Override
public ActionFuture<BulkResponse> bulk(BulkRequest request) {
return delegate().bulk(request);
}
@Override
public void bulk(BulkRequest request, ActionListener<BulkResponse> listener) {
delegate().bulk(request, listener);
}
@Override
public BulkRequestBuilder prepareBulk() {
return delegate().prepareBulk();
}
@Override
public ActionFuture<DeleteByQueryResponse> deleteByQuery(DeleteByQueryRequest request) {
return delegate().deleteByQuery(request);
}
@Override
public void deleteByQuery(DeleteByQueryRequest request, ActionListener<DeleteByQueryResponse> listener) {
delegate().deleteByQuery(request, listener);
}
@Override
public DeleteByQueryRequestBuilder prepareDeleteByQuery(String... indices) {
return delegate().prepareDeleteByQuery(indices);
}
@Override
public ActionFuture<GetResponse> get(GetRequest request) {
return delegate().get(request);
}
@Override
public void get(GetRequest request, ActionListener<GetResponse> listener) {
delegate().get(request, listener);
}
@Override
public GetRequestBuilder prepareGet() {
return delegate().prepareGet();
}
@Override
public GetRequestBuilder prepareGet(String index, String type, String id) {
return delegate().prepareGet(index, type, id);
}
@Override
public ActionFuture<MultiGetResponse> multiGet(MultiGetRequest request) {
return delegate().multiGet(request);
}
@Override
public void multiGet(MultiGetRequest request, ActionListener<MultiGetResponse> listener) {
delegate().multiGet(request, listener);
}
@Override
public MultiGetRequestBuilder prepareMultiGet() {
return delegate().prepareMultiGet();
}
@Override
public ActionFuture<CountResponse> count(CountRequest request) {
return delegate().count(request);
}
@Override
public void count(CountRequest request, ActionListener<CountResponse> listener) {
delegate().count(request, listener);
}
@Override
public CountRequestBuilder prepareCount(String... indices) {
return delegate().prepareCount(indices);
}
@Override
public ActionFuture<SuggestResponse> suggest(SuggestRequest request) {
return delegate().suggest(request);
}
@Override
public void suggest(SuggestRequest request, ActionListener<SuggestResponse> listener) {
delegate().suggest(request, listener);
}
@Override
public SuggestRequestBuilder prepareSuggest(String... indices) {
return delegate().prepareSuggest(indices);
}
@Override
public ActionFuture<SearchResponse> search(SearchRequest request) {
return delegate().search(request);
}
@Override
public void search(SearchRequest request, ActionListener<SearchResponse> listener) {
delegate().search(request, listener);
}
@Override
public SearchRequestBuilder prepareSearch(String... indices) {
return delegate().prepareSearch(indices);
}
@Override
public ActionFuture<SearchResponse> searchScroll(SearchScrollRequest request) {
return delegate().searchScroll(request);
}
@Override
public void searchScroll(SearchScrollRequest request, ActionListener<SearchResponse> listener) {
delegate().searchScroll(request, listener);
}
@Override
public SearchScrollRequestBuilder prepareSearchScroll(String scrollId) {
return delegate().prepareSearchScroll(scrollId);
}
@Override
public ActionFuture<MultiSearchResponse> multiSearch(MultiSearchRequest request) {
return delegate().multiSearch(request);
}
@Override
public void multiSearch(MultiSearchRequest request, ActionListener<MultiSearchResponse> listener) {
delegate().multiSearch(request, listener);
}
@Override
public MultiSearchRequestBuilder prepareMultiSearch() {
return delegate().prepareMultiSearch();
}
@Override
public ActionFuture<SearchResponse> moreLikeThis(MoreLikeThisRequest request) {
return delegate().moreLikeThis(request);
}
@Override
public void moreLikeThis(MoreLikeThisRequest request, ActionListener<SearchResponse> listener) {
delegate().moreLikeThis(request, listener);
}
@Override
public MoreLikeThisRequestBuilder prepareMoreLikeThis(String index, String type, String id) {
return delegate().prepareMoreLikeThis(index, type, id);
}
@Override
public ActionFuture<TermVectorResponse> termVector(TermVectorRequest request) {
return delegate().termVector(request);
}
@Override
public void termVector(TermVectorRequest request, ActionListener<TermVectorResponse> listener) {
delegate().termVector(request, listener);
}
@Override
public TermVectorRequestBuilder prepareTermVector(String index, String type, String id) {
return delegate().prepareTermVector(index, type, id);
}
@Override
public ActionFuture<MultiTermVectorsResponse> multiTermVectors(MultiTermVectorsRequest request) {
return delegate().multiTermVectors(request);
}
@Override
public void multiTermVectors(MultiTermVectorsRequest request, ActionListener<MultiTermVectorsResponse> listener) {
delegate().multiTermVectors(request, listener);
}
@Override
public MultiTermVectorsRequestBuilder prepareMultiTermVectors() {
return delegate().prepareMultiTermVectors();
}
@Override
public ActionFuture<PercolateResponse> percolate(PercolateRequest request) {
return delegate().percolate(request);
}
@Override
public void percolate(PercolateRequest request, ActionListener<PercolateResponse> listener) {
delegate().percolate(request, listener);
}
@Override
public PercolateRequestBuilder preparePercolate() {
return delegate().preparePercolate();
}
@Override
public ActionFuture<MultiPercolateResponse> multiPercolate(MultiPercolateRequest request) {
return delegate().multiPercolate(request);
}
@Override
public void multiPercolate(MultiPercolateRequest request, ActionListener<MultiPercolateResponse> listener) {
delegate().multiPercolate(request, listener);
}
@Override
public MultiPercolateRequestBuilder prepareMultiPercolate() {
return delegate().prepareMultiPercolate();
}
@Override
public ExplainRequestBuilder prepareExplain(String index, String type, String id) {
return delegate().prepareExplain(index, type, id);
}
@Override
public ActionFuture<ExplainResponse> explain(ExplainRequest request) {
return delegate().explain(request);
}
@Override
public void explain(ExplainRequest request, ActionListener<ExplainResponse> listener) {
delegate().explain(request, listener);
}
@Override
public ClearScrollRequestBuilder prepareClearScroll() {
return delegate().prepareClearScroll();
}
@Override
public ActionFuture<ClearScrollResponse> clearScroll(ClearScrollRequest request) {
return delegate().clearScroll(request);
}
@Override
public void clearScroll(ClearScrollRequest request, ActionListener<ClearScrollResponse> listener) {
delegate().clearScroll(request, listener);
}
@Override
public void bench(BenchmarkRequest request, ActionListener<BenchmarkResponse> listener) {
delegate().bench(request, listener);
}
@Override
public ActionFuture<BenchmarkResponse> bench(BenchmarkRequest request) {
return delegate().bench(request);
}
@Override
public BenchmarkRequestBuilder prepareBench(String... indices) {
return delegate().prepareBench(indices);
}
@Override
public void abortBench(AbortBenchmarkRequest request, ActionListener<AbortBenchmarkResponse> listener) {
delegate().abortBench(request, listener);
}
@Override
public AbortBenchmarkRequestBuilder prepareAbortBench(String... benchmarkNames) {
return delegate().prepareAbortBench(benchmarkNames);
}
@Override
public void benchStatus(BenchmarkStatusRequest request, ActionListener<BenchmarkStatusResponse> listener) {
delegate().benchStatus(request, listener);
}
@Override
public BenchmarkStatusRequestBuilder prepareBenchStatus() {
return delegate().prepareBenchStatus();
}
@Override
public ThreadPool threadPool() {
return delegate().threadPool();
}
@Override
public Settings settings() {
return delegate().settings();
}
}

View File

@ -20,55 +20,21 @@
package org.elasticsearch.test.client;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.elasticsearch.action.*;
import org.elasticsearch.action.bench.*;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.count.CountRequest;
import org.elasticsearch.action.count.CountRequestBuilder;
import org.elasticsearch.action.count.CountResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteRequestBuilder;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest;
import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder;
import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse;
import org.elasticsearch.action.explain.ExplainRequest;
import org.elasticsearch.action.explain.ExplainRequestBuilder;
import org.elasticsearch.action.explain.ExplainResponse;
import org.elasticsearch.action.get.*;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.mlt.MoreLikeThisRequest;
import org.elasticsearch.action.mlt.MoreLikeThisRequestBuilder;
import org.elasticsearch.action.percolate.*;
import org.elasticsearch.action.search.*;
import org.elasticsearch.action.suggest.SuggestRequest;
import org.elasticsearch.action.suggest.SuggestRequestBuilder;
import org.elasticsearch.action.suggest.SuggestResponse;
import org.elasticsearch.action.termvector.*;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateRequestBuilder;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.AdminClient;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.internal.InternalClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.Arrays;
import java.util.Random;
/** A {@link Client} that randomizes request parameters. */
public class RandomizingClient implements InternalClient {
public class RandomizingClient extends FilterClient {
private final SearchType defaultSearchType;
private final InternalClient delegate;
public RandomizingClient(InternalClient client, Random random) {
this.delegate = client;
super(client);
// we don't use the QUERY_AND_FETCH types that break quite a lot of tests
// given that they return `size*num_shards` hits instead of `size`
defaultSearchType = RandomPicks.randomFrom(random, Arrays.asList(
@ -76,387 +42,9 @@ public class RandomizingClient implements InternalClient {
SearchType.QUERY_THEN_FETCH));
}
@Override
public void close() {
delegate.close();
}
@Override
public AdminClient admin() {
return delegate.admin();
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
Action<Request, Response, RequestBuilder> action, Request request) {
return delegate.execute(action, request);
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
delegate.execute(action, request, listener);
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
Action<Request, Response, RequestBuilder> action) {
return delegate.prepareExecute(action);
}
@Override
public ActionFuture<IndexResponse> index(IndexRequest request) {
return delegate.index(request);
}
@Override
public void index(IndexRequest request, ActionListener<IndexResponse> listener) {
delegate.index(request, listener);
}
@Override
public IndexRequestBuilder prepareIndex() {
return delegate.prepareIndex();
}
@Override
public ActionFuture<UpdateResponse> update(UpdateRequest request) {
return delegate.update(request);
}
@Override
public void update(UpdateRequest request, ActionListener<UpdateResponse> listener) {
delegate.update(request, listener);
}
@Override
public UpdateRequestBuilder prepareUpdate() {
return delegate.prepareUpdate();
}
@Override
public UpdateRequestBuilder prepareUpdate(String index, String type, String id) {
return delegate.prepareUpdate(index, type, id);
}
@Override
public IndexRequestBuilder prepareIndex(String index, String type) {
return delegate.prepareIndex(index, type);
}
@Override
public IndexRequestBuilder prepareIndex(String index, String type, String id) {
return delegate.prepareIndex(index, type, id);
}
@Override
public ActionFuture<DeleteResponse> delete(DeleteRequest request) {
return delegate.delete(request);
}
@Override
public void delete(DeleteRequest request, ActionListener<DeleteResponse> listener) {
delegate.delete(request, listener);
}
@Override
public DeleteRequestBuilder prepareDelete() {
return delegate.prepareDelete();
}
@Override
public DeleteRequestBuilder prepareDelete(String index, String type, String id) {
return delegate.prepareDelete(index, type, id);
}
@Override
public ActionFuture<BulkResponse> bulk(BulkRequest request) {
return delegate.bulk(request);
}
@Override
public void bulk(BulkRequest request, ActionListener<BulkResponse> listener) {
delegate.bulk(request, listener);
}
@Override
public BulkRequestBuilder prepareBulk() {
return delegate.prepareBulk();
}
@Override
public ActionFuture<DeleteByQueryResponse> deleteByQuery(DeleteByQueryRequest request) {
return delegate.deleteByQuery(request);
}
@Override
public void deleteByQuery(DeleteByQueryRequest request, ActionListener<DeleteByQueryResponse> listener) {
delegate.deleteByQuery(request, listener);
}
@Override
public DeleteByQueryRequestBuilder prepareDeleteByQuery(String... indices) {
return delegate.prepareDeleteByQuery(indices);
}
@Override
public ActionFuture<GetResponse> get(GetRequest request) {
return delegate.get(request);
}
@Override
public void get(GetRequest request, ActionListener<GetResponse> listener) {
delegate.get(request, listener);
}
@Override
public GetRequestBuilder prepareGet() {
return delegate.prepareGet();
}
@Override
public GetRequestBuilder prepareGet(String index, String type, String id) {
return delegate.prepareGet(index, type, id);
}
@Override
public ActionFuture<MultiGetResponse> multiGet(MultiGetRequest request) {
return delegate.multiGet(request);
}
@Override
public void multiGet(MultiGetRequest request, ActionListener<MultiGetResponse> listener) {
delegate.multiGet(request, listener);
}
@Override
public MultiGetRequestBuilder prepareMultiGet() {
return delegate.prepareMultiGet();
}
@Override
public ActionFuture<CountResponse> count(CountRequest request) {
return delegate.count(request);
}
@Override
public void count(CountRequest request, ActionListener<CountResponse> listener) {
delegate.count(request, listener);
}
@Override
public CountRequestBuilder prepareCount(String... indices) {
return delegate.prepareCount(indices);
}
@Override
public ActionFuture<SuggestResponse> suggest(SuggestRequest request) {
return delegate.suggest(request);
}
@Override
public void suggest(SuggestRequest request, ActionListener<SuggestResponse> listener) {
delegate.suggest(request, listener);
}
@Override
public SuggestRequestBuilder prepareSuggest(String... indices) {
return delegate.prepareSuggest(indices);
}
@Override
public ActionFuture<SearchResponse> search(SearchRequest request) {
return delegate.search(request);
}
@Override
public void search(SearchRequest request, ActionListener<SearchResponse> listener) {
delegate.search(request, listener);
}
@Override
public SearchRequestBuilder prepareSearch(String... indices) {
return delegate.prepareSearch(indices).setSearchType(defaultSearchType);
}
@Override
public ActionFuture<SearchResponse> searchScroll(SearchScrollRequest request) {
return delegate.searchScroll(request);
}
@Override
public void searchScroll(SearchScrollRequest request, ActionListener<SearchResponse> listener) {
delegate.searchScroll(request, listener);
}
@Override
public SearchScrollRequestBuilder prepareSearchScroll(String scrollId) {
return delegate.prepareSearchScroll(scrollId);
}
@Override
public ActionFuture<MultiSearchResponse> multiSearch(MultiSearchRequest request) {
return delegate.multiSearch(request);
}
@Override
public void multiSearch(MultiSearchRequest request, ActionListener<MultiSearchResponse> listener) {
delegate.multiSearch(request, listener);
}
@Override
public MultiSearchRequestBuilder prepareMultiSearch() {
return delegate.prepareMultiSearch();
}
@Override
public ActionFuture<SearchResponse> moreLikeThis(MoreLikeThisRequest request) {
return delegate.moreLikeThis(request);
}
@Override
public void moreLikeThis(MoreLikeThisRequest request, ActionListener<SearchResponse> listener) {
delegate.moreLikeThis(request, listener);
}
@Override
public MoreLikeThisRequestBuilder prepareMoreLikeThis(String index, String type, String id) {
return delegate.prepareMoreLikeThis(index, type, id);
}
@Override
public ActionFuture<TermVectorResponse> termVector(TermVectorRequest request) {
return delegate.termVector(request);
}
@Override
public void termVector(TermVectorRequest request, ActionListener<TermVectorResponse> listener) {
delegate.termVector(request, listener);
}
@Override
public TermVectorRequestBuilder prepareTermVector(String index, String type, String id) {
return delegate.prepareTermVector(index, type, id);
}
@Override
public ActionFuture<MultiTermVectorsResponse> multiTermVectors(MultiTermVectorsRequest request) {
return delegate.multiTermVectors(request);
}
@Override
public void multiTermVectors(MultiTermVectorsRequest request, ActionListener<MultiTermVectorsResponse> listener) {
delegate.multiTermVectors(request, listener);
}
@Override
public MultiTermVectorsRequestBuilder prepareMultiTermVectors() {
return delegate.prepareMultiTermVectors();
}
@Override
public ActionFuture<PercolateResponse> percolate(PercolateRequest request) {
return delegate.percolate(request);
}
@Override
public void percolate(PercolateRequest request, ActionListener<PercolateResponse> listener) {
delegate.percolate(request, listener);
}
@Override
public PercolateRequestBuilder preparePercolate() {
return delegate.preparePercolate();
}
@Override
public ActionFuture<MultiPercolateResponse> multiPercolate(MultiPercolateRequest request) {
return delegate.multiPercolate(request);
}
@Override
public void multiPercolate(MultiPercolateRequest request, ActionListener<MultiPercolateResponse> listener) {
delegate.multiPercolate(request, listener);
}
@Override
public MultiPercolateRequestBuilder prepareMultiPercolate() {
return delegate.prepareMultiPercolate();
}
@Override
public ExplainRequestBuilder prepareExplain(String index, String type, String id) {
return delegate.prepareExplain(index, type, id);
}
@Override
public ActionFuture<ExplainResponse> explain(ExplainRequest request) {
return delegate.explain(request);
}
@Override
public void explain(ExplainRequest request, ActionListener<ExplainResponse> listener) {
delegate.explain(request, listener);
}
@Override
public ClearScrollRequestBuilder prepareClearScroll() {
return delegate.prepareClearScroll();
}
@Override
public ActionFuture<ClearScrollResponse> clearScroll(ClearScrollRequest request) {
return delegate.clearScroll(request);
}
@Override
public void clearScroll(ClearScrollRequest request, ActionListener<ClearScrollResponse> listener) {
delegate.clearScroll(request, listener);
}
@Override
public void bench(BenchmarkRequest request, ActionListener<BenchmarkResponse> listener) {
delegate.bench(request, listener);
}
@Override
public ActionFuture<BenchmarkResponse> bench(BenchmarkRequest request) {
return delegate.bench(request);
}
@Override
public BenchmarkRequestBuilder prepareBench(String... indices) {
return delegate.prepareBench(indices);
}
@Override
public void abortBench(AbortBenchmarkRequest request, ActionListener<AbortBenchmarkResponse> listener) {
delegate.abortBench(request, listener);
}
@Override
public AbortBenchmarkRequestBuilder prepareAbortBench(String... benchmarkNames) {
return delegate.prepareAbortBench(benchmarkNames);
}
@Override
public void benchStatus(BenchmarkStatusRequest request, ActionListener<BenchmarkStatusResponse> listener) {
delegate.benchStatus(request, listener);
}
@Override
public BenchmarkStatusRequestBuilder prepareBenchStatus() {
return delegate.prepareBenchStatus();
}
@Override
public ThreadPool threadPool() {
return delegate.threadPool();
}
@Override
public Settings settings() {
return delegate.settings();
return delegate().prepareSearch(indices).setSearchType(defaultSearchType);
}
@Override

View File

@ -136,7 +136,8 @@ public class ReproduceInfoPrinter extends RunListener {
public ReproduceErrorMessageBuilder appendESProperties() {
appendProperties("es.logger.level", "es.node.mode", "es.node.local", TESTS_CLUSTER, TestCluster.TESTS_ENABLE_MOCK_MODULES,
"tests.assertion.disabled", "tests.security.manager", "tests.nightly", "tests.jvms", "tests.client.ratio", "tests.heap.size");
"tests.assertion.disabled", "tests.security.manager", "tests.nightly", "tests.jvms", "tests.client.ratio", "tests.heap.size",
"tests.bwc", "tests.bwc.path", "tests.bwc.version");
if (System.getProperty("tests.jvm.argline") != null && !System.getProperty("tests.jvm.argline").isEmpty()) {
appendOpt("tests.jvm.argline", "\"" + System.getProperty("tests.jvm.argline") + "\"");
}