Remove legacy BWC test infrastructure and tests (#21915)

We don't use the test infra nor do we run the tests. They might all be
entirely out of date. We also have a different BWC test infra in-place.
This change removes all of the legacy infra.
This commit is contained in:
Simon Willnauer 2016-12-02 08:06:20 +01:00 committed by GitHub
parent 7861a9dc3e
commit adf9bd90a4
24 changed files with 14 additions and 2437 deletions

View File

@ -1,185 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.upgrade;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.upgrade.get.IndexUpgradeStatus;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESBackcompatTestCase;
import org.elasticsearch.test.ESIntegTestCase;
import org.junit.BeforeClass;
import java.util.ArrayList;
import java.util.List;
import static org.elasticsearch.test.OldIndexUtils.assertNotUpgraded;
import static org.elasticsearch.test.OldIndexUtils.assertUpgraded;
import static org.elasticsearch.test.OldIndexUtils.getUpgradeStatus;
import static org.elasticsearch.test.OldIndexUtils.isUpgraded;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) // test scope since we set cluster wide settings
public class UpgradeIT extends ESBackcompatTestCase {
@BeforeClass
public static void checkUpgradeVersion() {
final boolean luceneVersionMatches = (globalCompatibilityVersion().luceneVersion.major == Version.CURRENT.luceneVersion.major
&& globalCompatibilityVersion().luceneVersion.minor == Version.CURRENT.luceneVersion.minor);
assumeFalse("lucene versions must be different to run upgrade test", luceneVersionMatches);
}
@Override
protected int minExternalNodes() {
return 2;
}
@Override
protected int maximumNumberOfReplicas() {
return Math.max(0, Math.min(backwardsCluster().numBackwardsDataNodes(), backwardsCluster().numNewDataNodes()) - 1);
}
public void testUpgrade() throws Exception {
// allow the cluster to rebalance quickly - 2 concurrent rebalance are default we can do higher
Settings.Builder builder = Settings.builder();
builder.put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 100);
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(builder).get();
int numIndexes = randomIntBetween(2, 4);
String[] indexNames = new String[numIndexes];
for (int i = 0; i < numIndexes; ++i) {
final String indexName = "test" + i;
indexNames[i] = indexName;
Settings settings = Settings.builder()
.put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern())
// don't allow any merges so that we can check segments are upgraded
// by the upgrader, and not just regular merging
.put("index.merge.policy.segments_per_tier", 1000000f)
.put(indexSettings())
.build();
assertAcked(prepareCreate(indexName).setSettings(settings));
ensureGreen(indexName);
assertAllShardsOnNodes(indexName, backwardsCluster().backwardsNodePattern());
int numDocs = scaledRandomIntBetween(100, 1000);
List<IndexRequestBuilder> docs = new ArrayList<>();
for (int j = 0; j < numDocs; ++j) {
String id = Integer.toString(j);
docs.add(client().prepareIndex(indexName, "type1", id).setSource("text", "sometext"));
}
indexRandom(true, docs);
ensureGreen(indexName);
assertEquals(0, flush(indexName).getFailedShards());
// index more docs that won't be flushed
numDocs = scaledRandomIntBetween(100, 1000);
docs = new ArrayList<>();
for (int j = 0; j < numDocs; ++j) {
String id = Integer.toString(j);
docs.add(client().prepareIndex(indexName, "type2", id).setSource("text", "someothertext"));
}
indexRandom(true, docs);
ensureGreen(indexName);
}
logger.debug("--> Upgrading nodes");
backwardsCluster().allowOnAllNodes(indexNames);
ensureGreen();
// disable allocation entirely until all nodes are upgraded
builder = Settings.builder();
builder.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE);
client().admin().cluster().prepareUpdateSettings().setTransientSettings(builder).get();
backwardsCluster().upgradeAllNodes();
builder = Settings.builder();
// disable rebalanceing entirely for the time being otherwise we might get relocations / rebalance from nodes with old segments
builder.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE);
builder.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.ALL);
client().admin().cluster().prepareUpdateSettings().setTransientSettings(builder).get();
ensureGreen();
logger.info("--> Nodes upgrade complete");
logSegmentsState();
assertNotUpgraded(client());
final String indexToUpgrade = "test" + randomInt(numIndexes - 1);
// This test fires up another node running an older version of ES, but because wire protocol changes across major ES versions, it
// means we can never generate ancient segments in this test (unless Lucene major version bumps but ES major version does not):
assertFalse(hasAncientSegments(client(), indexToUpgrade));
logger.info("--> Running upgrade on index {}", indexToUpgrade);
assertNoFailures(client().admin().indices().prepareUpgrade(indexToUpgrade).get());
awaitBusy(() -> {
try {
return isUpgraded(client(), indexToUpgrade);
} catch (Exception e) {
throw ExceptionsHelper.convertToRuntime(e);
}
});
logger.info("--> Single index upgrade complete");
logger.info("--> Running upgrade on the rest of the indexes");
assertNoFailures(client().admin().indices().prepareUpgrade().get());
logSegmentsState();
logger.info("--> Full upgrade complete");
assertUpgraded(client());
}
/** Returns true if there are any ancient segments. */
public static boolean hasAncientSegments(Client client, String index) throws Exception {
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
if (status.getToUpgradeBytesAncient() != 0) {
return true;
}
}
return false;
}
/** Returns true if there are any old but not ancient segments. */
public static boolean hasOldButNotAncientSegments(Client client, String index) throws Exception {
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
if (status.getToUpgradeBytes() > status.getToUpgradeBytesAncient()) {
return true;
}
}
return false;
}
static class UpgradeStatus {
public final String indexName;
public final int totalBytes;
public final int toUpgradeBytes;
public final int toUpgradeBytesAncient;
public UpgradeStatus(String indexName, int totalBytes, int toUpgradeBytes, int toUpgradeBytesAncient) {
this.indexName = indexName;
this.totalBytes = totalBytes;
this.toUpgradeBytes = toUpgradeBytes;
this.toUpgradeBytesAncient = toUpgradeBytesAncient;
assert toUpgradeBytesAncient <= toUpgradeBytes;
}
}
}

View File

@ -1,121 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bwcompat;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.util.TestUtil;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
import org.elasticsearch.test.ESBackcompatTestCase;
import org.elasticsearch.test.ESIntegTestCase;
import java.io.IOException;
import java.util.List;
import java.util.Locale;
import java.util.concurrent.ExecutionException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
@ESIntegTestCase.ClusterScope(numDataNodes = 0, scope = ESIntegTestCase.Scope.SUITE, numClientNodes = 0, transportClientRatio = 0.0)
public class BasicAnalysisBackwardCompatibilityIT extends ESBackcompatTestCase {
// This pattern match characters with Line_Break = Complex_Content.
static final Pattern complexUnicodeChars = Pattern.compile("[\u17B4\u17B5\u17D3\u17CB-\u17D1\u17DD\u1036\u17C6\u1A74\u1038\u17C7\u0E4E\u0E47-\u0E4D\u0EC8-\u0ECD\uAABF\uAAC1\u1037\u17C8-\u17CA\u1A75-\u1A7C\u1AA8-\u1AAB\uAADE\uAADF\u1AA0-\u1AA6\u1AAC\u1AAD\u109E\u109F\uAA77-\uAA79\u0E46\u0EC6\u17D7\u1AA7\uA9E6\uAA70\uAADD\u19DA\u0E01-\u0E3A\u0E40-\u0E45\u0EDE\u0E81\u0E82\u0E84\u0E87\u0E88\u0EAA\u0E8A\u0EDF\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAB\u0EDC\u0EDD\u0EAD-\u0EB9\u0EBB-\u0EBD\u0EC0-\u0EC4\uAA80-\uAABE\uAAC0\uAAC2\uAADB\uAADC\u1000\u1075\u1001\u1076\u1002\u1077\uAA60\uA9E9\u1003\uA9E0\uA9EA\u1004\u105A\u1005\u1078\uAA61\u1006\uA9E1\uAA62\uAA7E\u1007\uAA63\uA9EB\u1079\uAA72\u1008\u105B\uA9E2\uAA64\uA9EC\u1061\uAA7F\u1009\u107A\uAA65\uA9E7\u100A\u100B\uAA66\u100C\uAA67\u100D\uAA68\uA9ED\u100E\uAA69\uA9EE\u100F\u106E\uA9E3\uA9EF\u1010-\u1012\u107B\uA9FB\u1013\uAA6A\uA9FC\u1014\u107C\uAA6B\u105E\u1015\u1016\u107D\u107E\uAA6F\u108E\uA9E8\u1017\u107F\uA9FD\u1018\uA9E4\uA9FE\u1019\u105F\u101A\u103B\u101B\uAA73\uAA7A\u103C\u101C\u1060\u101D\u103D\u1082\u1080\u1050\u1051\u1065\u101E\u103F\uAA6C\u101F\u1081\uAA6D\u103E\uAA6E\uAA71\u1020\uA9FA\u105C\u105D\u106F\u1070\u1066\u1021-\u1026\u1052-\u1055\u1027-\u102A\u102C\u102B\u1083\u1072\u109C\u102D\u1071\u102E\u1033\u102F\u1073\u1074\u1030\u1056-\u1059\u1031\u1084\u1035\u1085\u1032\u109D\u1034\u1062\u1067\u1068\uA9E5\u1086\u1039\u103A\u1063\u1064\u1069-\u106D\u1087\u108B\u1088\u108C\u108D\u1089\u108A\u108F\u109A\u109B\uAA7B-\uAA7D\uAA74-\uAA76\u1780-\u17A2\u17DC\u17A3-\u17B3\u17B6-\u17C5\u17D2\u1950-\u196D\u1970-\u1974\u1980-\u199C\u19DE\u19DF\u199D-\u19AB\u19B0-\u19C9\u1A20-\u1A26\u1A58\u1A59\u1A27-\u1A3B\u1A5A\u1A5B\u1A3C-\u1A46\u1A54\u1A47-\u1A4C\u1A53\u1A6B\u1A55-\u1A57\u1A5C-\u1A5E\u1A4D-\u1A52\u1A61\u1A6C\u1A62-\u1A6A\u1A6E\u1A6F\u1A73\u1A70-\u1A72\u1A6D\u1A60]");
/**
* Simple upgrade test for analyzers to make sure they analyze to the same tokens after upgrade
* TODO we need this for random tokenizers / tokenfilters as well
*/
public void testAnalyzerTokensAfterUpgrade() throws IOException, ExecutionException, InterruptedException {
int numFields = randomIntBetween(PreBuiltAnalyzers.values().length, PreBuiltAnalyzers.values().length * 10);
String[] fields = new String[numFields * 2];
int fieldId = 0;
for (int i = 0; i < fields.length; i++) {
fields[i++] = "field_" + fieldId++;
String analyzer = randomAnalyzer();
fields[i] = "type=text,analyzer=" + analyzer;
}
assertAcked(prepareCreate("test")
.addMapping("type", (Object[])fields)
.setSettings(indexSettings()));
InputOutput[] inout = new InputOutput[numFields];
for (int i = 0; i < numFields; i++) {
String input;
Matcher matcher;
do {
// In Lucene 4.10, a bug was fixed in StandardTokenizer which was causing breaks on complex characters.
// The bug was fixed without backcompat Version handling, so testing between >=4.10 vs <= 4.9 can
// cause differences when the random string generated contains these complex characters. To mitigate
// the problem, we skip any strings containing these characters.
// TODO: only skip strings containing complex chars when comparing against ES <= 1.3.x
input = TestUtil.randomAnalysisString(random(), 100, false);
matcher = complexUnicodeChars.matcher(input);
} while (matcher.find());
AnalyzeResponse test = client().admin().indices().prepareAnalyze("test", input).setField("field_" + i).get();
inout[i] = new InputOutput(test, input, "field_" + i);
}
logClusterState();
boolean upgraded;
do {
logClusterState();
upgraded = backwardsCluster().upgradeOneNode();
ensureYellow();
} while (upgraded);
for (int i = 0; i < inout.length; i++) {
InputOutput inputOutput = inout[i];
AnalyzeResponse test = client().admin().indices().prepareAnalyze("test", inputOutput.input).setField(inputOutput.field).get();
List<AnalyzeResponse.AnalyzeToken> tokens = test.getTokens();
List<AnalyzeResponse.AnalyzeToken> expectedTokens = inputOutput.response.getTokens();
assertThat("size mismatch field: " + fields[i*2] + " analyzer: " + fields[i*2 + 1] + " input: " + BaseTokenStreamTestCase.escape(inputOutput.input), expectedTokens.size(), equalTo(tokens.size()));
for (int j = 0; j < tokens.size(); j++) {
String msg = "failed for term: " + expectedTokens.get(j).getTerm() + " field: " + fields[i*2] + " analyzer: " + fields[i*2 + 1] + " input: " + BaseTokenStreamTestCase.escape(inputOutput.input);
assertThat(msg, BaseTokenStreamTestCase.escape(expectedTokens.get(j).getTerm()), equalTo(BaseTokenStreamTestCase.escape(tokens.get(j).getTerm())));
assertThat(msg, expectedTokens.get(j).getPosition(), equalTo(tokens.get(j).getPosition()));
assertThat(msg, expectedTokens.get(j).getStartOffset(), equalTo(tokens.get(j).getStartOffset()));
assertThat(msg, expectedTokens.get(j).getEndOffset(), equalTo(tokens.get(j).getEndOffset()));
assertThat(msg, expectedTokens.get(j).getType(), equalTo(tokens.get(j).getType()));
}
}
}
private String randomAnalyzer() {
PreBuiltAnalyzers preBuiltAnalyzers = RandomPicks.randomFrom(random(), PreBuiltAnalyzers.values());
return preBuiltAnalyzers.name().toLowerCase(Locale.ROOT);
}
private static final class InputOutput {
final AnalyzeResponse response;
final String input;
final String field;
public InputOutput(AnalyzeResponse response, String input, String field) {
this.response = response;
this.input = input;
this.field = field;
}
}
}

View File

@ -1,670 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bwcompat;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.apache.lucene.index.Fields;
import org.apache.lucene.util.English;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.explain.ExplainResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.get.MultiGetItemResponse;
import org.elasticsearch.action.get.MultiGetRequest;
import org.elasticsearch.action.get.MultiGetRequestBuilder;
import org.elasticsearch.action.get.MultiGetResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.termvectors.TermVectorsResponse;
import org.elasticsearch.action.update.UpdateRequestBuilder;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.mapper.FieldNamesFieldMapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.ESBackcompatTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
import static org.elasticsearch.index.query.QueryBuilders.existsQuery;
import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.hamcrest.Matchers.notNullValue;
public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase {
/**
* Basic test using Index &amp; Realtime Get with external versioning. This test ensures routing works correctly across versions.
*/
public void testExternalVersion() throws Exception {
createIndex("test");
final boolean routing = randomBoolean();
int numDocs = randomIntBetween(10, 20);
for (int i = 0; i < numDocs; i++) {
String id = Integer.toString(i);
String routingKey = routing ? randomRealisticUnicodeOfLength(10) : null;
final long version = randomIntBetween(0, Integer.MAX_VALUE);
client().prepareIndex("test", "type1", id).setRouting(routingKey).setVersion(version).setVersionType(VersionType.EXTERNAL).setSource("field1", English.intToEnglish(i)).get();
GetResponse get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(version).get();
assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
assertThat(get.getVersion(), equalTo(version));
final long nextVersion = version + randomIntBetween(0, Integer.MAX_VALUE);
client().prepareIndex("test", "type1", id).setRouting(routingKey).setVersion(nextVersion).setVersionType(VersionType.EXTERNAL).setSource("field1", English.intToEnglish(i)).get();
get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(nextVersion).get();
assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
assertThat(get.getVersion(), equalTo(nextVersion));
}
}
/**
* Basic test using Index &amp; Realtime Get with internal versioning. This test ensures routing works correctly across versions.
*/
public void testInternalVersion() throws Exception {
createIndex("test");
final boolean routing = randomBoolean();
int numDocs = randomIntBetween(10, 20);
for (int i = 0; i < numDocs; i++) {
String routingKey = routing ? randomRealisticUnicodeOfLength(10) : null;
String id = Integer.toString(i);
assertEquals(id, DocWriteResponse.Result.CREATED, client().prepareIndex("test", "type1", id)
.setRouting(routingKey).setSource("field1", English.intToEnglish(i)).get().getResult());
GetResponse get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(1).get();
assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
assertThat(get.getVersion(), equalTo(1L));
client().prepareIndex("test", "type1", id).setRouting(routingKey).setSource("field1", English.intToEnglish(i)).execute().actionGet();
get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(2).get();
assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
assertThat(get.getVersion(), equalTo(2L));
}
assertVersionCreated(compatibilityVersion(), "test");
}
/**
* Very basic bw compat test with a mixed version cluster random indexing and lookup by ID via term query
*/
public void testIndexAndSearch() throws Exception {
createIndex("test");
int numDocs = randomIntBetween(10, 20);
List<IndexRequestBuilder> builder = new ArrayList<>();
for (int i = 0; i < numDocs; i++) {
String id = Integer.toString(i);
builder.add(client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(i), "the_id", id));
}
indexRandom(true, builder);
for (int i = 0; i < numDocs; i++) {
String id = Integer.toString(i);
assertHitCount(client().prepareSearch().setQuery(QueryBuilders.termQuery("the_id", id)).get(), 1);
}
assertVersionCreated(compatibilityVersion(), "test");
}
public void testRecoverFromPreviousVersion() throws ExecutionException, InterruptedException {
if (backwardsCluster().numNewDataNodes() == 0) {
backwardsCluster().startNewNode();
}
assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings())));
assertAllShardsOnNodes("test", backwardsCluster().backwardsNodePattern());
int numDocs = randomIntBetween(100, 150);
ArrayList<String> ids = new ArrayList<>();
logger.info(" --> indexing [{}] docs", numDocs);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
String id = randomRealisticUnicodeOfLength(10) + String.valueOf(i);
ids.add(id);
docs[i] = client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(i));
}
indexRandom(true, docs);
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
if (randomBoolean()) {
logger.info(" --> moving index to new nodes");
backwardsCluster().allowOnlyNewNodes("test");
} else {
logger.info(" --> allow index to on all nodes");
backwardsCluster().allowOnAllNodes("test");
}
logger.info(" --> indexing [{}] more docs", numDocs);
// sometimes index while relocating
if (randomBoolean()) {
for (int i = 0; i < numDocs; i++) {
String id = randomRealisticUnicodeOfLength(10) + String.valueOf(numDocs + i);
ids.add(id);
docs[i] = client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(numDocs + i));
}
indexRandom(true, docs);
numDocs *= 2;
}
logger.info(" --> waiting for relocation of [{}] docs to complete", numDocs);
ensureYellow("test");// move all shards to the new node (it waits on relocation)
final int numIters = randomIntBetween(10, 20);
for (int i = 0; i < numIters; i++) {
assertSearchHits(client().prepareSearch().setSize(ids.size()).get(), ids.toArray(new String[ids.size()]));
}
assertVersionCreated(compatibilityVersion(), "test");
}
/**
* Test that ensures that we will never recover from a newer to an older version (we are not forward compatible)
*/
public void testNoRecoveryFromNewNodes() throws ExecutionException, InterruptedException {
assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().backwardsNodePattern()).put(indexSettings())));
if (backwardsCluster().numNewDataNodes() == 0) {
backwardsCluster().startNewNode();
}
ensureYellow();
assertAllShardsOnNodes("test", backwardsCluster().newNodePattern());
if (randomBoolean()) {
backwardsCluster().allowOnAllNodes("test");
}
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", randomRealisticUnicodeOfLength(10) + String.valueOf(i)).setSource("field1", English.intToEnglish(i), "num_int", randomInt(), "num_double", randomDouble());
}
indexRandom(true, docs);
backwardsCluster().allowOnAllNodes("test");
while (ensureYellow() != ClusterHealthStatus.GREEN) {
backwardsCluster().startNewNode();
}
assertAllShardsOnNodes("test", backwardsCluster().newNodePattern());
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
final int numIters = randomIntBetween(10, 20);
for (int i = 0; i < numIters; i++) {
countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
assertSimpleSort("num_double", "num_int");
}
assertVersionCreated(compatibilityVersion(), "test");
}
public void assertSimpleSort(String... numericFields) {
for (String field : numericFields) {
SearchResponse searchResponse = client().prepareSearch().addSort(field, SortOrder.ASC).get();
SearchHit[] hits = searchResponse.getHits().getHits();
assertThat(hits.length, greaterThan(0));
Number previous = null;
for (SearchHit hit : hits) {
assertNotNull(hit.getSource().get(field));
if (previous != null) {
assertThat(previous.doubleValue(), lessThanOrEqualTo(((Number) hit.getSource().get(field)).doubleValue()));
}
previous = (Number) hit.getSource().get(field);
}
}
}
@Override
public void assertAllShardsOnNodes(String index, String pattern) {
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
if (shardRouting.currentNodeId() != null && index.equals(shardRouting.getIndexName())) {
String name = clusterState.nodes().get(shardRouting.currentNodeId()).getName();
assertThat("Allocated on new node: " + name, Regex.simpleMatch(pattern, name), is(true));
}
}
}
}
}
/**
* Upgrades a single node to the current version
*/
public void testIndexUpgradeSingleNode() throws Exception {
assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings())));
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i), "num_int", randomInt(), "num_double", randomDouble());
}
indexRandom(true, docs);
assertAllShardsOnNodes("test", backwardsCluster().backwardsNodePattern());
disableAllocation("test");
backwardsCluster().allowOnAllNodes("test");
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
backwardsCluster().upgradeOneNode();
ensureYellow();
if (randomBoolean()) {
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i), "num_int", randomInt(), "num_double", randomDouble());
}
indexRandom(true, docs);
}
enableAllocation("test");
ensureYellow();
final int numIters = randomIntBetween(1, 20);
for (int i = 0; i < numIters; i++) {
assertHitCount(client().prepareSearch().setSize(0).get(), numDocs);
assertSimpleSort("num_double", "num_int");
}
assertVersionCreated(compatibilityVersion(), "test");
}
/**
* Test that allocates an index on one or more old nodes and then do a rolling upgrade
* one node after another is shut down and restarted from a newer version and we verify
* that all documents are still around after each nodes upgrade.
*/
public void testIndexRollingUpgrade() throws Exception {
String[] indices = new String[randomIntBetween(1, 3)];
for (int i = 0; i < indices.length; i++) {
indices[i] = "test" + i;
assertAcked(prepareCreate(indices[i]).setSettings(Settings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings())));
}
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
String[] indexForDoc = new String[docs.length];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex(indexForDoc[i] = RandomPicks.randomFrom(random(), indices), "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i), "num_int", randomInt(), "num_double", randomDouble());
}
indexRandom(true, docs);
for (String index : indices) {
assertAllShardsOnNodes(index, backwardsCluster().backwardsNodePattern());
}
disableAllocation(indices);
backwardsCluster().allowOnAllNodes(indices);
logClusterState();
boolean upgraded;
do {
logClusterState();
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
assertSimpleSort("num_double", "num_int");
upgraded = backwardsCluster().upgradeOneNode();
ensureYellow();
countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex(indexForDoc[i], "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i), "num_int", randomInt(), "num_double", randomDouble());
}
indexRandom(true, docs);
} while (upgraded);
enableAllocation(indices);
ensureYellow();
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
assertSimpleSort("num_double", "num_int");
String[] newIndices = new String[randomIntBetween(1, 3)];
for (int i = 0; i < newIndices.length; i++) {
newIndices[i] = "new_index" + i;
createIndex(newIndices[i]);
}
assertVersionCreated(Version.CURRENT, newIndices); // new indices are all created with the new version
assertVersionCreated(compatibilityVersion(), indices);
}
public void assertVersionCreated(Version version, String... indices) {
GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings(indices).get();
ImmutableOpenMap<String, Settings> indexToSettings = getSettingsResponse.getIndexToSettings();
for (String index : indices) {
Settings settings = indexToSettings.get(index);
assertThat(settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null), notNullValue());
assertThat(settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null), equalTo(version));
}
}
public void testUnsupportedFeatures() throws IOException {
XContentBuilder mapping = XContentBuilder.builder(JsonXContent.jsonXContent)
.startObject()
.startObject("type")
.startObject(FieldNamesFieldMapper.NAME)
// by setting randomly index to no we also test the pre-1.3 behavior
.field("index", randomFrom("no", "not_analyzed"))
.field("store", randomFrom("no", "yes"))
.endObject()
.endObject()
.endObject();
try {
assertAcked(prepareCreate("test").
setSettings(Settings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings()))
.addMapping("type", mapping));
} catch (MapperParsingException ex) {
assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class));
assertThat(ExceptionsHelper.detailedMessage(ex).contains("type=_field_names is not supported on indices created before version 1.3.0"), equalTo(true));
}
}
/**
* This filter had a major upgrade in 1.3 where we started to index the field names. Lets see if they still work as expected...
* this test is basically copied from SimpleQueryTests...
*/
public void testExistsFilter() throws IOException, ExecutionException, InterruptedException {
int indexId = 0;
String indexName;
for (; ; ) {
indexName = "test_"+indexId++;
createIndex(indexName);
indexRandom(true,
client().prepareIndex(indexName, "type1", "1").setSource(jsonBuilder().startObject().startObject("obj1").field("obj1_val", "1").endObject().field("x1", "x_1").field("field1", "value1_1").field("field2", "value2_1").endObject()),
client().prepareIndex(indexName, "type1", "2").setSource(jsonBuilder().startObject().startObject("obj1").field("obj1_val", "1").endObject().field("x2", "x_2").field("field1", "value1_2").endObject()),
client().prepareIndex(indexName, "type1", "3").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y1", "y_1").field("field2", "value2_3").endObject()),
client().prepareIndex(indexName, "type1", "4").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y2", "y_2").field("field3", "value3_4").endObject()));
SearchResponse countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("field1")).get();
assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(constantScoreQuery(existsQuery("field1"))).get();
assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(queryStringQuery("_exists_:field1")).get();
assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("field2")).get();
assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("field3")).get();
assertHitCount(countResponse, 1L);
// wildcard check
countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("x*")).get();
assertHitCount(countResponse, 2L);
// object check
countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("obj1")).get();
assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(queryStringQuery("_missing_:field1")).get();
assertHitCount(countResponse, 2L);
if (!backwardsCluster().upgradeOneNode()) {
break;
}
ensureYellow();
assertVersionCreated(compatibilityVersion(), indexName); // we had an old node in the cluster so we have to be on the compat version
assertAcked(client().admin().indices().prepareDelete(indexName));
}
assertVersionCreated(Version.CURRENT, indexName); // after upgrade we have current version
}
public Version getMasterVersion() {
return client().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getVersion();
}
public void testDeleteRoutingRequired() throws ExecutionException, InterruptedException, IOException {
createIndexWithAlias();
assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource(
XContentFactory.jsonBuilder().startObject().startObject("test").startObject("_routing").field("required", true).endObject().endObject().endObject()));
ensureYellow("test");
int numDocs = iterations(10, 50);
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs - 2; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "test", Integer.toString(i))
.setRouting(randomAsciiOfLength(randomIntBetween(1, 10))).setSource("field", "value");
}
String firstDocId = Integer.toString(numDocs - 2);
indexRequestBuilders[numDocs - 2] = client().prepareIndex("test", "test", firstDocId)
.setRouting("routing").setSource("field", "value");
String secondDocId = Integer.toString(numDocs - 1);
String secondRouting = randomAsciiOfLength(randomIntBetween(1, 10));
indexRequestBuilders[numDocs - 1] = client().prepareIndex("test", "test", secondDocId)
.setRouting(secondRouting).setSource("field", "value");
indexRandom(true, indexRequestBuilders);
SearchResponse searchResponse = client().prepareSearch("test").get();
assertNoFailures(searchResponse);
assertThat(searchResponse.getHits().totalHits(), equalTo((long) numDocs));
DeleteResponse deleteResponse = client().prepareDelete("test", "test", firstDocId).setRouting("routing").get();
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
GetResponse getResponse = client().prepareGet("test", "test", firstDocId).setRouting("routing").get();
assertThat(getResponse.isExists(), equalTo(false));
refresh();
searchResponse = client().prepareSearch("test").get();
assertNoFailures(searchResponse);
assertThat(searchResponse.getHits().totalHits(), equalTo((long) numDocs - 1));
}
public void testIndexGetAndDelete() throws ExecutionException, InterruptedException {
createIndexWithAlias();
int numDocs = iterations(10, 50);
for (int i = 0; i < numDocs; i++) {
IndexResponse indexResponse = client().prepareIndex(indexOrAlias(), "type", Integer.toString(i)).setSource("field", "value-" + i).get();
assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
assertThat(indexResponse.getIndex(), equalTo("test"));
assertThat(indexResponse.getType(), equalTo("type"));
assertThat(indexResponse.getId(), equalTo(Integer.toString(i)));
}
refresh();
String docId = Integer.toString(randomIntBetween(0, numDocs - 1));
GetResponse getResponse = client().prepareGet(indexOrAlias(), "type", docId).get();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getIndex(), equalTo("test"));
assertThat(getResponse.getType(), equalTo("type"));
assertThat(getResponse.getId(), equalTo(docId));
DeleteResponse deleteResponse = client().prepareDelete(indexOrAlias(), "type", docId).get();
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
assertThat(deleteResponse.getIndex(), equalTo("test"));
assertThat(deleteResponse.getType(), equalTo("type"));
assertThat(deleteResponse.getId(), equalTo(docId));
getResponse = client().prepareGet(indexOrAlias(), "type", docId).get();
assertThat(getResponse.isExists(), equalTo(false));
refresh();
SearchResponse searchResponse = client().prepareSearch(indexOrAlias()).get();
assertThat(searchResponse.getHits().totalHits(), equalTo((long) numDocs - 1));
}
public void testUpdate() {
createIndexWithAlias();
UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate(indexOrAlias(), "type1", "1")
.setUpsert("field1", "value1").setDoc("field2", "value2");
UpdateResponse updateResponse = updateRequestBuilder.get();
assertThat(updateResponse.getIndex(), equalTo("test"));
assertThat(updateResponse.getType(), equalTo("type1"));
assertThat(updateResponse.getId(), equalTo("1"));
assertEquals(DocWriteResponse.Result.CREATED, updateResponse.getResult());
GetResponse getResponse = client().prepareGet("test", "type1", "1").get();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getSourceAsMap().containsKey("field1"), equalTo(true));
assertThat(getResponse.getSourceAsMap().containsKey("field2"), equalTo(false));
updateResponse = updateRequestBuilder.get();
assertThat(updateResponse.getIndex(), equalTo("test"));
assertThat(updateResponse.getType(), equalTo("type1"));
assertThat(updateResponse.getId(), equalTo("1"));
assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
getResponse = client().prepareGet("test", "type1", "1").get();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getSourceAsMap().containsKey("field1"), equalTo(true));
assertThat(getResponse.getSourceAsMap().containsKey("field2"), equalTo(true));
}
public void testAnalyze() {
createIndexWithAlias();
assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=text,analyzer=keyword"));
AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze("this is a test").setIndex(indexOrAlias()).setField("field").get();
assertThat(analyzeResponse.getTokens().size(), equalTo(1));
assertThat(analyzeResponse.getTokens().get(0).getTerm(), equalTo("this is a test"));
}
public void testExplain() {
createIndexWithAlias();
client().prepareIndex(indexOrAlias(), "test", "1").setSource("field", "value1").get();
refresh();
ExplainResponse response = client().prepareExplain(indexOrAlias(), "test", "1")
.setQuery(QueryBuilders.termQuery("field", "value1")).get();
assertThat(response.isExists(), equalTo(true));
assertThat(response.isMatch(), equalTo(true));
assertThat(response.getExplanation(), notNullValue());
assertThat(response.getExplanation().isMatch(), equalTo(true));
assertThat(response.getExplanation().getDetails().length, equalTo(1));
}
public void testGetTermVector() throws IOException {
createIndexWithAlias();
assertAcked(client().admin().indices().preparePutMapping("test").setType("type1").setSource("field", "type=text,term_vector=with_positions_offsets_payloads").get());
client().prepareIndex(indexOrAlias(), "type1", "1")
.setSource("field", "the quick brown fox jumps over the lazy dog").get();
refresh();
TermVectorsResponse termVectorsResponse = client().prepareTermVectors(indexOrAlias(), "type1", "1").get();
assertThat(termVectorsResponse.getIndex(), equalTo("test"));
assertThat(termVectorsResponse.isExists(), equalTo(true));
Fields fields = termVectorsResponse.getFields();
assertThat(fields.size(), equalTo(1));
assertThat(fields.terms("field").size(), equalTo(8L));
}
public void testIndicesStats() {
createIndex("test");
IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().all().get();
assertThat(indicesStatsResponse.getIndices().size(), equalTo(1));
assertThat(indicesStatsResponse.getIndices().containsKey("test"), equalTo(true));
}
public void testMultiGet() throws ExecutionException, InterruptedException {
createIndexWithAlias();
int numDocs = iterations(10, 50);
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + Integer.toString(i));
}
indexRandom(false, indexRequestBuilders);
int iterations = iterations(1, numDocs);
MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet();
for (int i = 0; i < iterations; i++) {
multiGetRequestBuilder.add(new MultiGetRequest.Item(indexOrAlias(), "type", Integer.toString(randomInt(numDocs - 1))));
}
MultiGetResponse multiGetResponse = multiGetRequestBuilder.get();
assertThat(multiGetResponse.getResponses().length, equalTo(iterations));
for (int i = 0; i < multiGetResponse.getResponses().length; i++) {
MultiGetItemResponse multiGetItemResponse = multiGetResponse.getResponses()[i];
assertThat(multiGetItemResponse.isFailed(), equalTo(false));
assertThat(multiGetItemResponse.getIndex(), equalTo("test"));
assertThat(multiGetItemResponse.getType(), equalTo("type"));
assertThat(multiGetItemResponse.getId(), equalTo(multiGetRequestBuilder.request().getItems().get(i).id()));
assertThat(multiGetItemResponse.getResponse().isExists(), equalTo(true));
assertThat(multiGetItemResponse.getResponse().getIndex(), equalTo("test"));
assertThat(multiGetItemResponse.getResponse().getType(), equalTo("type"));
assertThat(multiGetItemResponse.getResponse().getId(), equalTo(multiGetRequestBuilder.request().getItems().get(i).id()));
}
}
public void testScroll() throws ExecutionException, InterruptedException {
createIndex("test");
int numDocs = iterations(10, 100);
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + Integer.toString(i));
}
indexRandom(true, indexRequestBuilders);
int size = randomIntBetween(1, 10);
SearchRequestBuilder searchRequestBuilder = client().prepareSearch("test").setScroll("1m").setSize(size);
SearchResponse searchResponse = searchRequestBuilder.get();
assertThat(searchResponse.getScrollId(), notNullValue());
assertHitCount(searchResponse, numDocs);
int hits = 0;
assertThat(searchResponse.getHits().getHits().length, greaterThan(0));
hits += searchResponse.getHits().getHits().length;
try {
do {
searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll("1m").get();
assertThat(searchResponse.getScrollId(), notNullValue());
assertHitCount(searchResponse, numDocs);
hits += searchResponse.getHits().getHits().length;
} while (searchResponse.getHits().getHits().length > 0);
assertThat(hits, equalTo(numDocs));
} finally {
clearScroll(searchResponse.getScrollId());
}
}
private static String indexOrAlias() {
return randomBoolean() ? "test" : "alias";
}
private void createIndexWithAlias() {
assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
}
}

View File

@ -1,106 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bwcompat;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.service.ClusterStateStatus;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESBackcompatTestCase;
import org.elasticsearch.transport.MockTransportClient;
import java.util.HashMap;
import java.util.Map;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_METADATA;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.hamcrest.Matchers.equalTo;
public class ClusterStateBackwardsCompatIT extends ESBackcompatTestCase {
public void testClusterState() throws Exception {
createIndex("test");
// connect to each node with a custom TransportClient, issue a ClusterStateRequest to test serialization
for (NodeInfo n : clusterNodes().getNodes()) {
try (TransportClient tc = newTransportClient()) {
tc.addTransportAddress(n.getNode().getAddress());
ClusterStateResponse response = tc.admin().cluster().prepareState().execute().actionGet();
assertNotNull(response.getClusterName());
assertTrue(response.getState().getMetaData().hasIndex("test"));
}
}
}
public void testClusterStateWithBlocks() {
createIndex("test-blocks");
Map<String, ClusterBlock> blocks = new HashMap<>();
blocks.put(SETTING_BLOCKS_READ, IndexMetaData.INDEX_READ_BLOCK);
blocks.put(SETTING_BLOCKS_WRITE, IndexMetaData.INDEX_WRITE_BLOCK);
blocks.put(SETTING_BLOCKS_METADATA, IndexMetaData.INDEX_METADATA_BLOCK);
for (Map.Entry<String, ClusterBlock> block : blocks.entrySet()) {
try {
enableIndexBlock("test-blocks", block.getKey());
for (NodeInfo n : clusterNodes().getNodes()) {
try (TransportClient tc = newTransportClient()) {
tc.addTransportAddress(n.getNode().getAddress());
ClusterStateResponse response = tc.admin().cluster().prepareState().setIndices("test-blocks")
.setBlocks(true).setNodes(false).execute().actionGet();
ClusterBlocks clusterBlocks = response.getState().blocks();
assertNotNull(clusterBlocks);
assertTrue(clusterBlocks.hasIndexBlock("test-blocks", block.getValue()));
for (ClusterBlockLevel level : block.getValue().levels()) {
assertTrue(clusterBlocks.indexBlocked(level, "test-blocks"));
}
IndexMetaData indexMetaData = response.getState().getMetaData().getIndices().get("test-blocks");
assertNotNull(indexMetaData);
assertTrue(indexMetaData.getSettings().getAsBoolean(block.getKey(), null));
}
}
} finally {
disableIndexBlock("test-blocks", block.getKey());
}
}
}
private NodesInfoResponse clusterNodes() {
return client().admin().cluster().prepareNodesInfo().execute().actionGet();
}
private TransportClient newTransportClient() {
Settings settings = Settings.builder().put("client.transport.ignore_cluster_name", true)
.put("node.name", "transport_client_" + getTestName()).build();
return new MockTransportClient(settings);
}
}

View File

@ -1,90 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bwcompat;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature;
import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESBackcompatTestCase;
import java.util.List;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
public class GetIndexBackwardsCompatibilityIT extends ESBackcompatTestCase {
public void testGetAliases() throws Exception {
CreateIndexResponse createIndexResponse = prepareCreate("test").addAlias(new Alias("testAlias")).execute().actionGet();
assertAcked(createIndexResponse);
GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("test").addFeatures(Feature.ALIASES)
.execute().actionGet();
ImmutableOpenMap<String, List<AliasMetaData>> aliasesMap = getIndexResponse.aliases();
assertThat(aliasesMap, notNullValue());
assertThat(aliasesMap.size(), equalTo(1));
List<AliasMetaData> aliasesList = aliasesMap.get("test");
assertThat(aliasesList, notNullValue());
assertThat(aliasesList.size(), equalTo(1));
AliasMetaData alias = aliasesList.get(0);
assertThat(alias, notNullValue());
assertThat(alias.alias(), equalTo("testAlias"));
}
public void testGetMappings() throws Exception {
CreateIndexResponse createIndexResponse = prepareCreate("test").addMapping("type1", "{\"type1\":{}}").execute().actionGet();
assertAcked(createIndexResponse);
GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("test").addFeatures(Feature.MAPPINGS)
.execute().actionGet();
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = getIndexResponse.mappings();
assertThat(mappings, notNullValue());
assertThat(mappings.size(), equalTo(1));
ImmutableOpenMap<String, MappingMetaData> indexMappings = mappings.get("test");
assertThat(indexMappings, notNullValue());
assertThat(indexMappings.size(), anyOf(equalTo(1), equalTo(2)));
if (indexMappings.size() == 2) {
MappingMetaData mapping = indexMappings.get("_default_");
assertThat(mapping, notNullValue());
}
MappingMetaData mapping = indexMappings.get("type1");
assertThat(mapping, notNullValue());
assertThat(mapping.type(), equalTo("type1"));
}
public void testGetSettings() throws Exception {
CreateIndexResponse createIndexResponse = prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 1)).execute().actionGet();
assertAcked(createIndexResponse);
GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("test").addFeatures(Feature.SETTINGS)
.execute().actionGet();
ImmutableOpenMap<String, Settings> settingsMap = getIndexResponse.settings();
assertThat(settingsMap, notNullValue());
assertThat(settingsMap.size(), equalTo(1));
Settings settings = settingsMap.get("test");
assertThat(settings, notNullValue());
assertThat(settings.get("index.number_of_shards"), equalTo("1"));
}
}

View File

@ -1,87 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bwcompat;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESBackcompatTestCase;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.transport.MockTransportClient;
import java.lang.reflect.Method;
@ESIntegTestCase.ClusterScope(scope= ESIntegTestCase.Scope.SUITE, numClientNodes = 0)
public class NodesStatsBasicBackwardsCompatIT extends ESBackcompatTestCase {
public void testNodeStatsSetIndices() throws Exception {
createIndex("test");
NodesInfoResponse nodesInfo = client().admin().cluster().prepareNodesInfo().execute().actionGet();
Settings settings = Settings.builder()
.put("client.transport.ignore_cluster_name", true)
.put("node.name", "transport_client_" + getTestName()).build();
// We explicitly connect to each node with a custom TransportClient
for (NodeInfo n : nodesInfo.getNodes()) {
TransportClient tc = new MockTransportClient(settings).addTransportAddress(n.getNode().getAddress());
// Just verify that the NS can be sent and serialized/deserialized between nodes with basic indices
tc.admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
tc.close();
}
}
public void testNodeStatsSetRandom() throws Exception {
createIndex("test");
NodesInfoResponse nodesInfo = client().admin().cluster().prepareNodesInfo().execute().actionGet();
Settings settings = Settings.builder()
.put("node.name", "transport_client_" + getTestName())
.put("client.transport.ignore_cluster_name", true).build();
// We explicitly connect to each node with a custom TransportClient
for (NodeInfo n : nodesInfo.getNodes()) {
TransportClient tc = new MockTransportClient(settings).addTransportAddress(n.getNode().getAddress());
// randomize the combination of flags set
// Uses reflection to find methods in an attempt to future-proof this test against newly added flags
NodesStatsRequestBuilder nsBuilder = tc.admin().cluster().prepareNodesStats();
Class c = nsBuilder.getClass();
for (Method method : c.getMethods()) {
if (method.getName().startsWith("set")) {
if (method.getParameterTypes().length == 1 && method.getParameterTypes()[0] == boolean.class) {
method.invoke(nsBuilder, randomBoolean());
}
} else if ((method.getName().equals("all") || method.getName().equals("clear")) && randomBoolean()) {
method.invoke(nsBuilder);
}
}
nsBuilder.execute().actionGet();
tc.close();
}
}
}

View File

@ -1,78 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bwcompat;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.test.CompositeTestCluster;
import org.elasticsearch.test.ESBackcompatTestCase;
import org.elasticsearch.transport.MockTransportClient;
import java.util.concurrent.ExecutionException;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
import static org.hamcrest.CoreMatchers.equalTo;
public class TransportClientBackwardsCompatibilityIT extends ESBackcompatTestCase {
public void testSniffMode() throws ExecutionException, InterruptedException {
Settings settings = Settings.builder().put(requiredSettings()).put("client.transport.nodes_sampler_interval", "1s")
.put("name", "transport_client_sniff_mode").put(ClusterName.CLUSTER_NAME_SETTING.getKey(), cluster().getClusterName())
.put("client.transport.sniff", true).build();
CompositeTestCluster compositeTestCluster = backwardsCluster();
TransportAddress transportAddress = compositeTestCluster.externalTransportAddress();
try(TransportClient client = new MockTransportClient(settings)) {
client.addTransportAddress(transportAddress);
assertAcked(client.admin().indices().prepareCreate("test"));
int numDocs = iterations(10, 100);
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
String id = "id" + i;
indexRequestBuilders[i] = client.prepareIndex("test", "test", id).setSource("field", "value" + i);
}
indexRandom(false, indexRequestBuilders);
String randomId = "id" + randomInt(numDocs-1);
GetResponse getResponse = client.prepareGet("test", "test", randomId).get();
assertThat(getResponse.isExists(), equalTo(true));
refresh();
SearchResponse searchResponse = client.prepareSearch("test").get();
assertThat(searchResponse.getHits().totalHits(), equalTo((long)numDocs));
int randomDocId = randomInt(numDocs-1);
String fieldValue = "value" + randomDocId;
String id = "id" + randomDocId;
searchResponse = client.prepareSearch("test").setQuery(QueryBuilders.termQuery("field", fieldValue)).get();
assertSearchHits(searchResponse, id);
}
}
}

View File

@ -1,52 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bwcompat;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESBackcompatTestCase;
import org.elasticsearch.transport.TransportSettings;
import static org.hamcrest.Matchers.equalTo;
public class UnicastBackwardsCompatibilityIT extends ESBackcompatTestCase {
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put(TransportSettings.PORT.getKey(), 9380 + nodeOrdinal)
.put("discovery.zen.ping.unicast.hosts", "localhost:9380,localhost:9381,localhost:9390,localhost:9391")
.build();
}
@Override
protected Settings externalNodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(super.externalNodeSettings(nodeOrdinal))
.put(TransportSettings.PORT.getKey(), 9390 + nodeOrdinal)
.put("discovery.zen.ping.unicast.hosts", "localhost:9380,localhost:9381,localhost:9390,localhost:9391")
.build();
}
public void testUnicastDiscovery() {
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().get();
assertThat(healthResponse.getNumberOfDataNodes(), equalTo(cluster().numDataNodes()));
}
}

View File

@ -1,113 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gateway;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.test.ESBackcompatTestCase;
import org.elasticsearch.test.ESIntegTestCase;
import java.util.HashMap;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
@ESIntegTestCase.ClusterScope(numDataNodes = 0, scope = ESIntegTestCase.Scope.TEST, numClientNodes = 0, transportClientRatio = 0.0)
public class RecoveryBackwardsCompatibilityIT extends ESBackcompatTestCase {
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put("gateway.recover_after_nodes", 2).build();
}
@Override
protected int minExternalNodes() {
return 2;
}
@Override
protected int maxExternalNodes() {
return 3;
}
public void testReusePeerRecovery() throws Exception {
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(indexSettings())
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)));
logger.info("--> indexing docs");
int numDocs = scaledRandomIntBetween(100, 1000);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
}
indexRandom(true, builders);
ensureGreen();
logger.info("--> bump number of replicas from 0 to 1");
client().admin().indices().prepareFlush().execute().actionGet();
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1").build()).get();
ensureGreen();
assertAllShardsOnNodes("test", backwardsCluster().backwardsNodePattern());
logger.info("--> upgrade cluster");
logClusterState();
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")).execute().actionGet();
backwardsCluster().upgradeAllNodes();
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all")).execute().actionGet();
ensureGreen();
countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").setDetailed(true).get();
HashMap<String, String> map = new HashMap<>();
map.put("details", "true");
final ToXContent.Params params = new ToXContent.MapParams(map);
for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) {
final String recoverStateAsJSON = XContentHelper.toString(recoveryState, params);
if (!recoveryState.getPrimary()) {
RecoveryState.Index index = recoveryState.getIndex();
assertThat(recoverStateAsJSON, index.recoveredBytes(), equalTo(0L));
assertThat(recoverStateAsJSON, index.reusedBytes(), greaterThan(0L));
assertThat(recoverStateAsJSON, index.reusedBytes(), equalTo(index.totalBytes()));
assertThat(recoverStateAsJSON, index.recoveredFileCount(), equalTo(0));
assertThat(recoverStateAsJSON, index.reusedFileCount(), equalTo(index.totalFileCount()));
assertThat(recoverStateAsJSON, index.reusedFileCount(), greaterThan(0));
assertThat(recoverStateAsJSON, index.recoveredBytesPercent(), equalTo(100.f));
assertThat(recoverStateAsJSON, index.recoveredFilesPercent(), equalTo(100.f));
assertThat(recoverStateAsJSON, index.reusedBytes(), greaterThan(index.recoveredBytes()));
// TODO upgrade via optimize?
}
}
}
}

View File

@ -25,7 +25,6 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESBackcompatTestCase;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.InternalSettingsPlugin;

View File

@ -1,35 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket;
import org.elasticsearch.test.ESBackcompatTestCase;
import org.elasticsearch.test.search.aggregations.bucket.SharedSignificantTermsTestMethods;
import java.io.IOException;
import java.util.concurrent.ExecutionException;
public class SignificantTermsBackwardCompatibilityIT extends ESBackcompatTestCase {
/**
* Test for streaming significant terms buckets to old es versions.
*/
public void testAggregateAndCheckFromSeveralShards() throws IOException, ExecutionException, InterruptedException {
SharedSignificantTermsTestMethods.aggregateAndCheckFromSeveralShards(this);
}
}

View File

@ -1,120 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.functionscore;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder.FilterFunctionBuilder;
import org.elasticsearch.script.Script;
import org.elasticsearch.test.ESBackcompatTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import static org.elasticsearch.client.Requests.searchRequest;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.gaussDecayFunction;
import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.weightFactorFunction;
import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
public class FunctionScoreBackwardCompatibilityIT extends ESBackcompatTestCase {
/**
* Simple upgrade test for function score.
*/
public void testSimpleFunctionScoreParsingWorks() throws IOException, ExecutionException, InterruptedException {
assertAcked(prepareCreate("test").addMapping(
"type1",
jsonBuilder().startObject()
.startObject("type1")
.startObject("properties")
.startObject("text")
.field("type", "text")
.endObject()
.startObject("loc")
.field("type", "geo_point")
.endObject()
.endObject()
.endObject()
.endObject()));
int numDocs = 10;
String[] ids = new String[numDocs];
List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
for (int i = 0; i < numDocs; i++) {
String id = Integer.toString(i);
indexBuilders.add(client().prepareIndex()
.setType("type1").setId(id).setIndex("test")
.setSource(
jsonBuilder().startObject()
.field("text", "value " + (i < 5 ? "boosted" : ""))
.startObject("loc")
.field("lat", 10 + i)
.field("lon", 20)
.endObject()
.endObject()));
ids[i] = id;
}
indexRandom(true, indexBuilders);
checkFunctionScoreStillWorks(ids);
logClusterState();
// prevent any kind of allocation during the upgrade we recover from gateway
disableAllocation("test");
boolean upgraded;
int upgradedNodesCounter = 1;
do {
logger.debug("function_score bwc: upgrading {}st node", upgradedNodesCounter++);
upgraded = backwardsCluster().upgradeOneNode();
ensureYellow();
logClusterState();
checkFunctionScoreStillWorks(ids);
} while (upgraded);
enableAllocation("test");
logger.debug("done function_score while upgrading");
}
@Override
protected Settings commonNodeSettings(int nodeOrdinal) {
return Settings.builder().put(super.commonNodeSettings(nodeOrdinal))
.put("script.inline", "true").build();
}
private void checkFunctionScoreStillWorks(String... ids) throws ExecutionException, InterruptedException, IOException {
SearchResponse response = client().search(
searchRequest().source(
searchSource().query(
functionScoreQuery(termQuery("text", "value"), new FilterFunctionBuilder[] {
new FilterFunctionBuilder(gaussDecayFunction("loc", new GeoPoint(10, 20), "1000km")),
new FilterFunctionBuilder(scriptFunction(new Script("_index['text']['value'].tf()"))),
new FilterFunctionBuilder(termQuery("text", "boosted"), weightFactorFunction(5))
}
)))).actionGet();
assertSearchResponse(response);
assertOrderedSearchHits(response, ids);
}
}

View File

@ -1,243 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.snapshots;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStatus;
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.test.ESBackcompatTestCase;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ExecutionException;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.lessThan;
public class SnapshotBackwardsCompatibilityIT extends ESBackcompatTestCase {
public void testSnapshotAndRestore() throws ExecutionException, InterruptedException, IOException {
logger.info("--> creating repository");
assertAcked(client().admin().cluster().preparePutRepository("test-repo")
.setType("fs").setSettings(Settings.builder()
.put("location", randomRepoPath().toAbsolutePath())
.put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
String[] indicesBefore = new String[randomIntBetween(2,5)];
String[] indicesAfter = new String[randomIntBetween(2,5)];
for (int i = 0; i < indicesBefore.length; i++) {
indicesBefore[i] = "index_before_" + i;
createIndex(indicesBefore[i]);
}
for (int i = 0; i < indicesAfter.length; i++) {
indicesAfter[i] = "index_after_" + i;
createIndex(indicesAfter[i]);
}
String[] indices = new String[indicesBefore.length + indicesAfter.length];
System.arraycopy(indicesBefore, 0, indices, 0, indicesBefore.length);
System.arraycopy(indicesAfter, 0, indices, indicesBefore.length, indicesAfter.length);
logger.info("--> indexing some data");
IndexRequestBuilder[] buildersBefore = new IndexRequestBuilder[randomIntBetween(10, 200)];
for (int i = 0; i < buildersBefore.length; i++) {
buildersBefore[i] = client().prepareIndex(RandomPicks.randomFrom(random(), indicesBefore), "foo", Integer.toString(i)).setSource("{ \"foo\" : \"bar\" } ");
}
IndexRequestBuilder[] buildersAfter = new IndexRequestBuilder[randomIntBetween(10, 200)];
for (int i = 0; i < buildersAfter.length; i++) {
buildersAfter[i] = client().prepareIndex(RandomPicks.randomFrom(random(), indicesBefore), "bar", Integer.toString(i)).setSource("{ \"foo\" : \"bar\" } ");
}
indexRandom(true, buildersBefore);
indexRandom(true, buildersAfter);
assertThat(client().prepareSearch(indices).setSize(0).get().getHits().totalHits(), equalTo((long) (buildersBefore.length + buildersAfter.length)));
long[] counts = new long[indices.length];
for (int i = 0; i < indices.length; i++) {
counts[i] = client().prepareSearch(indices[i]).setSize(0).get().getHits().totalHits();
}
logger.info("--> snapshot subset of indices before upgrade");
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("index_before_*").get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
assertThat(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-1").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
logger.info("--> delete some data from indices that were already snapshotted");
int howMany = randomIntBetween(1, buildersBefore.length);
for (int i = 0; i < howMany; i++) {
IndexRequestBuilder indexRequestBuilder = RandomPicks.randomFrom(random(), buildersBefore);
IndexRequest request = indexRequestBuilder.request();
client().prepareDelete(request.index(), request.type(), request.id()).get();
}
refresh();
final long numDocs = client().prepareSearch(indices).setSize(0).get().getHits().totalHits();
assertThat(client().prepareSearch(indices).setSize(0).get().getHits().totalHits(), lessThan((long) (buildersBefore.length + buildersAfter.length)));
disableAllocation(indices);
backwardsCluster().allowOnAllNodes(indices);
logClusterState();
boolean upgraded;
do {
logClusterState();
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
upgraded = backwardsCluster().upgradeOneNode();
ensureYellow();
countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
} while (upgraded);
enableAllocation(indices);
logger.info("--> close indices");
client().admin().indices().prepareClose("index_before_*").get();
logger.info("--> verify repository");
client().admin().cluster().prepareVerifyRepository("test-repo").get();
logger.info("--> restore all indices from the snapshot");
RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).execute().actionGet();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
ensureYellow();
assertThat(client().prepareSearch(indices).setSize(0).get().getHits().totalHits(), equalTo((long) (buildersBefore.length + buildersAfter.length)));
for (int i = 0; i < indices.length; i++) {
assertThat(counts[i], equalTo(client().prepareSearch(indices[i]).setSize(0).get().getHits().totalHits()));
}
logger.info("--> snapshot subset of indices after upgrade");
createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2").setWaitForCompletion(true).setIndices("index_*").get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
// Test restore after index deletion
logger.info("--> delete indices");
String index = RandomPicks.randomFrom(random(), indices);
cluster().wipeIndices(index);
logger.info("--> restore one index after deletion");
restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2").setWaitForCompletion(true).setIndices(index).execute().actionGet();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
ensureYellow();
assertThat(client().prepareSearch(indices).setSize(0).get().getHits().totalHits(), equalTo((long) (buildersBefore.length + buildersAfter.length)));
for (int i = 0; i < indices.length; i++) {
assertThat(counts[i], equalTo(client().prepareSearch(indices[i]).setSize(0).get().getHits().totalHits()));
}
}
public void testSnapshotMoreThanOnce() throws ExecutionException, InterruptedException, IOException {
Client client = client();
final Path tempDir = randomRepoPath().toAbsolutePath();
logger.info("--> creating repository");
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
.setType("fs").setSettings(Settings.builder()
.put("location", tempDir)
.put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
// only one shard
assertAcked(prepareCreate("test").setSettings(Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
));
logger.info("--> indexing");
final int numDocs = randomIntBetween(10, 100);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test", "doc", Integer.toString(i)).setSource("foo", "bar" + i);
}
indexRandom(true, builders);
flushAndRefresh();
assertNoFailures(client().admin().indices().prepareForceMerge("test").setFlush(true).setMaxNumSegments(1).get());
CreateSnapshotResponse createSnapshotResponseFirst = client.admin().cluster().prepareCreateSnapshot("test-repo", "test").setWaitForCompletion(true).setIndices("test").get();
assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseFirst.getSnapshotInfo().totalShards()));
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
{
SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test").get().getSnapshots().get(0);
List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards();
for (SnapshotIndexShardStatus status : shards) {
assertThat(status.getStats().getProcessedFiles(), greaterThan(1));
}
}
if (frequently()) {
logger.info("--> upgrade");
disableAllocation("test");
backwardsCluster().allowOnAllNodes("test");
logClusterState();
boolean upgraded;
do {
logClusterState();
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
upgraded = backwardsCluster().upgradeOneNode();
ensureYellow();
countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
} while (upgraded);
enableAllocation("test");
}
if (cluster().numDataNodes() > 1 && randomBoolean()) { // only bump the replicas if we have enough nodes
logger.info("--> move from 0 to 1 replica");
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get();
}
logger.debug("---> repo exists: {} files: {}", Files.exists(tempDir.resolve("indices/test/0")), Arrays.toString(FileSystemUtils.files(tempDir.resolve("indices/test/0")))); // it's only one shard!
CreateSnapshotResponse createSnapshotResponseSecond = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-1").setWaitForCompletion(true).setIndices("test").get();
assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseSecond.getSnapshotInfo().totalShards()));
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-1").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
{
SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-1").get().getSnapshots().get(0);
List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards();
for (SnapshotIndexShardStatus status : shards) {
assertThat(status.getStats().getProcessedFiles(), equalTo(1)); // we flush before the snapshot such that we have to process the segments_N files
}
}
client().prepareDelete("test", "doc", "1").get();
CreateSnapshotResponse createSnapshotResponseThird = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-2").setWaitForCompletion(true).setIndices("test").get();
assertThat(createSnapshotResponseThird.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponseThird.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseThird.getSnapshotInfo().totalShards()));
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-2").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
{
SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-2").get().getSnapshots().get(0);
List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards();
for (SnapshotIndexShardStatus status : shards) {
assertThat(status.getStats().getProcessedFiles(), equalTo(2)); // we flush before the snapshot such that we have to process the segments_N files plus the .del file
}
}
}
}

View File

@ -30,11 +30,6 @@ import java.util.Collection;
public abstract class ESNetty4IntegTestCase extends ESIntegTestCase { public abstract class ESNetty4IntegTestCase extends ESIntegTestCase {
@Override
protected boolean ignoreExternalCluster() {
return true;
}
@Override @Override
protected boolean addMockTransportService() { protected boolean addMockTransportService() {
return false; return false;

View File

@ -88,11 +88,6 @@ public class ContextAndHeaderTransportIT extends HttpSmokeTestCase {
.build(); .build();
} }
@Override
protected boolean ignoreExternalCluster() {
return true;
}
@Override @Override
protected Collection<Class<? extends Plugin>> nodePlugins() { protected Collection<Class<? extends Plugin>> nodePlugins() {
ArrayList<Class<? extends Plugin>> plugins = new ArrayList<>(super.nodePlugins()); ArrayList<Class<? extends Plugin>> plugins = new ArrayList<>(super.nodePlugins());

View File

@ -53,11 +53,6 @@ public class CorsRegexIT extends HttpSmokeTestCase {
.build(); .build();
} }
@Override
protected boolean ignoreExternalCluster() {
return true;
}
public void testThatRegularExpressionWorksOnMatch() throws IOException { public void testThatRegularExpressionWorksOnMatch() throws IOException {
String corsValue = "http://localhost:9200"; String corsValue = "http://localhost:9200";
Response response = getRestClient().performRequest("GET", "/", Response response = getRestClient().performRequest("GET", "/",

View File

@ -48,11 +48,6 @@ public class DetailedErrorsDisabledIT extends HttpSmokeTestCase {
.build(); .build();
} }
@Override
protected boolean ignoreExternalCluster() {
return true;
}
public void testThatErrorTraceParamReturns400() throws IOException { public void testThatErrorTraceParamReturns400() throws IOException {
try { try {
getRestClient().performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true")); getRestClient().performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true"));

View File

@ -24,12 +24,12 @@ import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader; import org.apache.http.message.BasicHeader;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.rest.ESRestTestCase;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
public class HttpCompressionIT extends ESIntegTestCase { public class HttpCompressionIT extends ESRestTestCase {
private static final String GZIP_ENCODING = "gzip"; private static final String GZIP_ENCODING = "gzip";
private static final StringEntity SAMPLE_DOCUMENT = new StringEntity("{\n" + private static final StringEntity SAMPLE_DOCUMENT = new StringEntity("{\n" +
@ -39,23 +39,16 @@ public class HttpCompressionIT extends ESIntegTestCase {
" }\n" + " }\n" +
"}", ContentType.APPLICATION_JSON); "}", ContentType.APPLICATION_JSON);
@Override
protected boolean ignoreExternalCluster() {
return false;
}
public void testCompressesResponseIfRequested() throws IOException { public void testCompressesResponseIfRequested() throws IOException {
ensureGreen(); RestClient client = client();
try (RestClient client = getRestClient()) {
Response response = client.performRequest("GET", "/", new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING)); Response response = client.performRequest("GET", "/", new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING));
assertEquals(200, response.getStatusLine().getStatusCode()); assertEquals(200, response.getStatusLine().getStatusCode());
assertEquals(GZIP_ENCODING, response.getHeader(HttpHeaders.CONTENT_ENCODING)); assertEquals(GZIP_ENCODING, response.getHeader(HttpHeaders.CONTENT_ENCODING));
} }
}
public void testUncompressedResponseByDefault() throws IOException { public void testUncompressedResponseByDefault() throws IOException {
ensureGreen(); RestClient client = client();
try (RestClient client = getRestClient()) {
Response response = client.performRequest("GET", "/"); Response response = client.performRequest("GET", "/");
assertEquals(200, response.getStatusLine().getStatusCode()); assertEquals(200, response.getStatusLine().getStatusCode());
assertNull(response.getHeader(HttpHeaders.CONTENT_ENCODING)); assertNull(response.getHeader(HttpHeaders.CONTENT_ENCODING));
@ -64,6 +57,5 @@ public class HttpCompressionIT extends ESIntegTestCase {
assertEquals(201, response.getStatusLine().getStatusCode()); assertEquals(201, response.getStatusLine().getStatusCode());
assertNull(response.getHeader(HttpHeaders.CONTENT_ENCODING)); assertNull(response.getHeader(HttpHeaders.CONTENT_ENCODING));
} }
}
} }

View File

@ -78,10 +78,4 @@ public abstract class HttpSmokeTestCase extends ESIntegTestCase {
.put(NetworkModule.TRANSPORT_TYPE_KEY, clientTypeKey) .put(NetworkModule.TRANSPORT_TYPE_KEY, clientTypeKey)
.build(); .build();
} }
@Override
protected boolean ignoreExternalCluster() {
return true;
}
} }

View File

@ -45,11 +45,6 @@ public class ResponseHeaderPluginIT extends HttpSmokeTestCase {
.build(); .build();
} }
@Override
protected boolean ignoreExternalCluster() {
return true;
}
@Override @Override
protected Collection<Class<? extends Plugin>> nodePlugins() { protected Collection<Class<? extends Plugin>> nodePlugins() {
ArrayList<Class<? extends Plugin>> plugins = new ArrayList<>(super.nodePlugins()); ArrayList<Class<? extends Plugin>> plugins = new ArrayList<>(super.nodePlugins());

View File

@ -1,272 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test;
import com.carrotsearch.randomizedtesting.annotations.TestGroup;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.discovery.TestZenDiscovery;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.test.junit.listeners.LoggingListener;
import java.io.IOException;
import java.lang.annotation.ElementType;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import static org.hamcrest.Matchers.is;
/**
* Abstract base class for backwards compatibility tests. Subclasses of this class
* can run tests against a mixed version cluster. A subset of the nodes in the cluster
* are started in dedicated process running off a full fledged elasticsearch release.
* Nodes can be "upgraded" from the "backwards" node to an "new" node where "new" nodes
* version corresponds to current version.
* The purpose of this test class is to run tests in scenarios where clusters are in an
* intermediate state during a rolling upgrade as well as upgrade situations. The clients
* accessed via #client() are random clients to the nodes in the cluster which might
* execute requests on the "new" as well as the "old" nodes.
* <p>
* Note: this base class is still experimental and might have bugs or leave external processes running behind.
* </p>
* Backwards compatibility tests are disabled by default via {@link Backwards} annotation.
* The following system variables control the test execution:
* <ul>
* <li>
* <tt>{@value #TESTS_BACKWARDS_COMPATIBILITY}</tt> enables / disables
* tests annotated with {@link Backwards} (defaults to
* <tt>false</tt>)
* </li>
* <li>
* <tt>{@value #TESTS_BACKWARDS_COMPATIBILITY_VERSION}</tt>
* sets the version to run the external nodes from formatted as <i>X.Y.Z</i>.
* The tests class will try to locate a release folder <i>elasticsearch-X.Y.Z</i>
* within path passed via {@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}
* depending on this system variable.
* </li>
* <li>
* <tt>{@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}</tt> the path to the
* elasticsearch releases to run backwards compatibility tests against.
* </li>
* </ul>
*
*/
// the transportClientRatio is tricky here since we don't fully control the cluster nodes
@ESBackcompatTestCase.Backwards
@ESIntegTestCase.ClusterScope(minNumDataNodes = 0, maxNumDataNodes = 2, scope = ESIntegTestCase.Scope.SUITE, numClientNodes = 0, transportClientRatio = 0.0)
public abstract class ESBackcompatTestCase extends ESIntegTestCase {
/**
* Key used to set the path for the elasticsearch executable used to run backwards compatibility tests from
* via the commandline -D{@value #TESTS_BACKWARDS_COMPATIBILITY}
*/
public static final String TESTS_BACKWARDS_COMPATIBILITY = "tests.bwc";
public static final String TESTS_BACKWARDS_COMPATIBILITY_VERSION = "tests.bwc.version";
/**
* Key used to set the path for the elasticsearch executable used to run backwards compatibility tests from
* via the commandline -D{@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}
*/
public static final String TESTS_BACKWARDS_COMPATIBILITY_PATH = "tests.bwc.path";
/**
* Property that allows to adapt the tests behaviour to older features/bugs based on the input version
*/
private static final String TESTS_COMPATIBILITY = "tests.compatibility";
private static final Version GLOBAL_COMPATIBILITY_VERSION = Version.fromString(compatibilityVersionProperty());
private static Path backwardsCompatibilityPath() {
String path = System.getProperty(TESTS_BACKWARDS_COMPATIBILITY_PATH);
if (path == null || path.isEmpty()) {
throw new IllegalArgumentException("Must specify backwards test path with property " + TESTS_BACKWARDS_COMPATIBILITY_PATH);
}
String version = System.getProperty(TESTS_BACKWARDS_COMPATIBILITY_VERSION);
if (version == null || version.isEmpty()) {
throw new IllegalArgumentException("Must specify backwards test version with property " + TESTS_BACKWARDS_COMPATIBILITY_VERSION);
}
if (Version.fromString(version).before(Version.CURRENT.minimumCompatibilityVersion())) {
throw new IllegalArgumentException("Backcompat elasticsearch version must be same major version as current. " +
"backcompat: " + version + ", current: " + Version.CURRENT.toString());
}
Path file = PathUtils.get(path, "elasticsearch-" + version);
if (!Files.exists(file)) {
throw new IllegalArgumentException("Backwards tests location is missing: " + file.toAbsolutePath());
}
if (!Files.isDirectory(file)) {
throw new IllegalArgumentException("Backwards tests location is not a directory: " + file.toAbsolutePath());
}
return file;
}
/**
* Retruns the tests compatibility version.
*/
public Version compatibilityVersion() {
return compatibilityVersion(getClass());
}
private Version compatibilityVersion(Class<?> clazz) {
if (clazz == Object.class || clazz == ESIntegTestCase.class) {
return globalCompatibilityVersion();
}
CompatibilityVersion annotation = clazz.getAnnotation(CompatibilityVersion.class);
if (annotation != null) {
return Version.min(Version.fromId(annotation.version()), compatibilityVersion(clazz.getSuperclass()));
}
return compatibilityVersion(clazz.getSuperclass());
}
/**
* Returns a global compatibility version that is set via the
* {@value #TESTS_COMPATIBILITY} or {@value #TESTS_BACKWARDS_COMPATIBILITY_VERSION} system property.
* If both are unset the current version is used as the global compatibility version. This
* compatibility version is used for static randomization. For per-suite compatibility version see
* {@link #compatibilityVersion()}
*/
public static Version globalCompatibilityVersion() {
return GLOBAL_COMPATIBILITY_VERSION;
}
private static String compatibilityVersionProperty() {
final String version = System.getProperty(TESTS_COMPATIBILITY);
if (Strings.hasLength(version)) {
return version;
}
return System.getProperty(TESTS_BACKWARDS_COMPATIBILITY_VERSION);
}
public CompositeTestCluster backwardsCluster() {
return (CompositeTestCluster) cluster();
}
@Override
protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException {
TestCluster cluster = super.buildTestCluster(scope, seed);
ExternalNode externalNode = new ExternalNode(backwardsCompatibilityPath(), randomLong(), new NodeConfigurationSource() {
@Override
public Settings nodeSettings(int nodeOrdinal) {
return externalNodeSettings(nodeOrdinal);
}
@Override
public Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.emptyList();
}
@Override
public Settings transportClientSettings() {
return transportClientSettings();
}
});
return new CompositeTestCluster((InternalTestCluster) cluster, between(minExternalNodes(), maxExternalNodes()), externalNode);
}
private Settings addLoggerSettings(Settings externalNodesSettings) {
TestLogging logging = getClass().getAnnotation(TestLogging.class);
Map<String, String> loggingLevels = LoggingListener.getLoggersAndLevelsFromAnnotation(logging);
Settings.Builder finalSettings = Settings.builder();
if (loggingLevels != null) {
for (Map.Entry<String, String> level : loggingLevels.entrySet()) {
finalSettings.put("logger." + level.getKey(), level.getValue());
}
}
finalSettings.put(externalNodesSettings);
return finalSettings.build();
}
protected int minExternalNodes() { return 1; }
protected int maxExternalNodes() {
return 2;
}
@Override
protected int maximumNumberOfReplicas() {
return 1;
}
protected Settings requiredSettings() {
return ExternalNode.REQUIRED_SETTINGS;
}
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return commonNodeSettings(nodeOrdinal);
}
public void assertAllShardsOnNodes(String index, String pattern) {
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
if (shardRouting.currentNodeId() != null && index.equals(shardRouting.getIndexName())) {
String name = clusterState.nodes().get(shardRouting.currentNodeId()).getName();
assertThat("Allocated on new node: " + name, Regex.simpleMatch(pattern, name), is(true));
}
}
}
}
}
protected Settings commonNodeSettings(int nodeOrdinal) {
Settings.Builder builder = Settings.builder().put(requiredSettings());
builder.put(NetworkModule.TRANSPORT_TYPE_KEY, randomBoolean() ? "netty3" : "netty4"); // run same transport / disco as external
builder.put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false);
return builder.build();
}
protected Settings externalNodeSettings(int nodeOrdinal) {
return addLoggerSettings(commonNodeSettings(nodeOrdinal));
}
/**
* Annotation for backwards compat tests
*/
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
@TestGroup(enabled = false, sysProperty = ESBackcompatTestCase.TESTS_BACKWARDS_COMPATIBILITY)
public @interface Backwards {
}
/**
* If a test is annotated with {@link CompatibilityVersion}
* all randomized settings will only contain settings or mappings which are compatible with the specified version ID.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE})
public @interface CompatibilityVersion {
int version();
}
}

View File

@ -267,11 +267,6 @@ public abstract class ESIntegTestCase extends ESTestCase {
*/ */
public static final String TESTS_CLIENT_RATIO = "tests.client.ratio"; public static final String TESTS_CLIENT_RATIO = "tests.client.ratio";
/**
* Key used to eventually switch to using an external cluster and provide its transport addresses
*/
public static final String TESTS_CLUSTER = "tests.cluster";
/** /**
* Key used to retrieve the index random seed from the index settings on a running node. * Key used to retrieve the index random seed from the index settings on a running node.
* The value of this seed can be used to initialize a random context for a specific index. * The value of this seed can be used to initialize a random context for a specific index.
@ -1718,35 +1713,11 @@ public abstract class ESIntegTestCase extends ESTestCase {
return Settings.EMPTY; return Settings.EMPTY;
} }
private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws IOException {
String[] stringAddresses = clusterAddresses.split(",");
TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length];
int i = 0;
for (String stringAddress : stringAddresses) {
URL url = new URL("http://" + stringAddress);
InetAddress inetAddress = InetAddress.getByName(url.getHost());
transportAddresses[i++] = new TransportAddress(new InetSocketAddress(inetAddress, url.getPort()));
}
return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses);
}
protected Settings externalClusterClientSettings() { protected Settings externalClusterClientSettings() {
return Settings.EMPTY; return Settings.EMPTY;
} }
protected boolean ignoreExternalCluster() {
return false;
}
protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException { protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException {
String clusterAddresses = System.getProperty(TESTS_CLUSTER);
if (Strings.hasLength(clusterAddresses) && ignoreExternalCluster() == false) {
if (scope == Scope.TEST) {
throw new IllegalArgumentException("Cannot run TEST scope test with " + TESTS_CLUSTER);
}
return buildExternalCluster(clusterAddresses);
}
final String nodePrefix; final String nodePrefix;
switch (scope) { switch (scope) {
case TEST: case TEST:

View File

@ -1,181 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.transport.MockTcpTransportPlugin;
import org.elasticsearch.transport.MockTransportClient;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.concurrent.atomic.AtomicInteger;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.assertThat;
/**
* External cluster to run the tests against.
* It is a pure immutable test cluster that allows to send requests to a pre-existing cluster
* and supports by nature all the needed test operations like wipeIndices etc.
*/
public final class ExternalTestCluster extends TestCluster {
private static final Logger logger = Loggers.getLogger(ExternalTestCluster.class);
private static final AtomicInteger counter = new AtomicInteger();
public static final String EXTERNAL_CLUSTER_PREFIX = "external_";
private final Client client;
private final InetSocketAddress[] httpAddresses;
private final String clusterName;
private final int numDataNodes;
private final int numMasterAndDataNodes;
public ExternalTestCluster(Path tempDir, Settings additionalSettings, Collection<Class<? extends Plugin>> pluginClasses, TransportAddress... transportAddresses) {
super(0);
Settings.Builder clientSettingsBuilder = Settings.builder()
.put(additionalSettings)
.put("node.name", InternalTestCluster.TRANSPORT_CLIENT_PREFIX + EXTERNAL_CLUSTER_PREFIX + counter.getAndIncrement())
.put("client.transport.ignore_cluster_name", true)
.put(Environment.PATH_HOME_SETTING.getKey(), tempDir);
boolean addMockTcpTransport = additionalSettings.get(NetworkModule.TRANSPORT_TYPE_KEY) == null;
if (addMockTcpTransport) {
clientSettingsBuilder.put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME);
if (pluginClasses.contains(MockTcpTransportPlugin.class) == false) {
pluginClasses = new ArrayList<>(pluginClasses);
pluginClasses.add(MockTcpTransportPlugin.class);
}
}
Settings clientSettings = clientSettingsBuilder.build();
TransportClient client = new MockTransportClient(clientSettings, pluginClasses);
try {
client.addTransportAddresses(transportAddresses);
NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().clear().setSettings(true).setHttp(true).get();
httpAddresses = new InetSocketAddress[nodeInfos.getNodes().size()];
this.clusterName = nodeInfos.getClusterName().value();
int dataNodes = 0;
int masterAndDataNodes = 0;
for (int i = 0; i < nodeInfos.getNodes().size(); i++) {
NodeInfo nodeInfo = nodeInfos.getNodes().get(i);
httpAddresses[i] = nodeInfo.getHttp().address().publishAddress().address();
if (DiscoveryNode.isDataNode(nodeInfo.getSettings())) {
dataNodes++;
masterAndDataNodes++;
} else if (DiscoveryNode.isMasterNode(nodeInfo.getSettings())) {
masterAndDataNodes++;
}
}
this.numDataNodes = dataNodes;
this.numMasterAndDataNodes = masterAndDataNodes;
this.client = client;
logger.info("Setup ExternalTestCluster [{}] made of [{}] nodes", nodeInfos.getClusterName().value(), size());
} catch (Exception e) {
client.close();
throw e;
}
}
@Override
public void afterTest() {
}
@Override
public Client client() {
return client;
}
@Override
public int size() {
return httpAddresses.length;
}
@Override
public int numDataNodes() {
return numDataNodes;
}
@Override
public int numDataAndMasterNodes() {
return numMasterAndDataNodes;
}
@Override
public InetSocketAddress[] httpAddresses() {
return httpAddresses;
}
@Override
public void close() throws IOException {
client.close();
}
@Override
public void ensureEstimatedStats() {
if (size() > 0) {
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats()
.clear().setBreaker(true).setIndices(true).execute().actionGet();
for (NodeStats stats : nodeStats.getNodes()) {
assertThat("Fielddata breaker not reset to 0 on node: " + stats.getNode(),
stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
// ExternalTestCluster does not check the request breaker,
// because checking it requires a network request, which in
// turn increments the breaker, making it non-0
assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0L));
assertThat("Query cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0L));
assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0L));
}
}
}
@Override
public Iterable<Client> getClients() {
return Collections.singleton(client);
}
@Override
public String getClusterName() {
return clusterName;
}
}

View File

@ -36,7 +36,6 @@ import java.util.TimeZone;
import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_ITERATIONS; import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_ITERATIONS;
import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_PREFIX; import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_PREFIX;
import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_TESTMETHOD; import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_TESTMETHOD;
import static org.elasticsearch.test.ESIntegTestCase.TESTS_CLUSTER;
import static org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.REST_TESTS_BLACKLIST; import static org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.REST_TESTS_BLACKLIST;
import static org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.REST_TESTS_SPEC; import static org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.REST_TESTS_SPEC;
import static org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.REST_TESTS_SUITE; import static org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.REST_TESTS_SUITE;
@ -140,7 +139,7 @@ public class ReproduceInfoPrinter extends RunListener {
appendProperties("tests.es.logger.level"); appendProperties("tests.es.logger.level");
if (inVerifyPhase()) { if (inVerifyPhase()) {
// these properties only make sense for integration tests // these properties only make sense for integration tests
appendProperties(TESTS_CLUSTER, ESIntegTestCase.TESTS_ENABLE_MOCK_MODULES); appendProperties(ESIntegTestCase.TESTS_ENABLE_MOCK_MODULES);
} }
appendProperties("tests.assertion.disabled", "tests.security.manager", "tests.nightly", "tests.jvms", appendProperties("tests.assertion.disabled", "tests.security.manager", "tests.nightly", "tests.jvms",
"tests.client.ratio", "tests.heap.size", "tests.bwc", "tests.bwc.version"); "tests.client.ratio", "tests.heap.size", "tests.bwc", "tests.bwc.version");