Cleanup test framework in order to release it as a jar file
This commit adds javadocs and removed unused methods from central classes like ElasticsearchIntegrationTest. It also changes visibility of many methods and classes that are only needed inside the test infrastructure.
This commit is contained in:
parent
2afdb4c8e7
commit
16ee742682
1
pom.xml
1
pom.xml
|
@ -987,6 +987,7 @@
|
||||||
<configuration>
|
<configuration>
|
||||||
<includes>
|
<includes>
|
||||||
<include>org/elasticsearch/test/**/*</include>
|
<include>org/elasticsearch/test/**/*</include>
|
||||||
|
<include>org/apache/lucene/util/AbstractRandomizedTest.class</include>
|
||||||
</includes>
|
</includes>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
|
|
|
@ -180,7 +180,7 @@ public class AckTests extends ElasticsearchIntegrationTest {
|
||||||
public void testClusterRerouteAcknowledgement() throws InterruptedException {
|
public void testClusterRerouteAcknowledgement() throws InterruptedException {
|
||||||
client().admin().indices().prepareCreate("test")
|
client().admin().indices().prepareCreate("test")
|
||||||
.setSettings(settingsBuilder()
|
.setSettings(settingsBuilder()
|
||||||
.put("number_of_shards", atLeast(cluster().numNodes()))
|
.put("number_of_shards", atLeast(cluster().size()))
|
||||||
.put("number_of_replicas", 0)).get();
|
.put("number_of_replicas", 0)).get();
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
||||||
|
@ -220,7 +220,7 @@ public class AckTests extends ElasticsearchIntegrationTest {
|
||||||
public void testClusterRerouteNoAcknowledgement() throws InterruptedException {
|
public void testClusterRerouteNoAcknowledgement() throws InterruptedException {
|
||||||
client().admin().indices().prepareCreate("test")
|
client().admin().indices().prepareCreate("test")
|
||||||
.setSettings(settingsBuilder()
|
.setSettings(settingsBuilder()
|
||||||
.put("number_of_shards", atLeast(cluster().numNodes()))
|
.put("number_of_shards", atLeast(cluster().size()))
|
||||||
.put("number_of_replicas", 0)).get();
|
.put("number_of_replicas", 0)).get();
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
||||||
|
@ -234,7 +234,7 @@ public class AckTests extends ElasticsearchIntegrationTest {
|
||||||
public void testClusterRerouteAcknowledgementDryRun() throws InterruptedException {
|
public void testClusterRerouteAcknowledgementDryRun() throws InterruptedException {
|
||||||
client().admin().indices().prepareCreate("test")
|
client().admin().indices().prepareCreate("test")
|
||||||
.setSettings(settingsBuilder()
|
.setSettings(settingsBuilder()
|
||||||
.put("number_of_shards", atLeast(cluster().numNodes()))
|
.put("number_of_shards", atLeast(cluster().size()))
|
||||||
.put("number_of_replicas", 0)).get();
|
.put("number_of_replicas", 0)).get();
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
||||||
|
@ -270,7 +270,7 @@ public class AckTests extends ElasticsearchIntegrationTest {
|
||||||
public void testClusterRerouteNoAcknowledgementDryRun() throws InterruptedException {
|
public void testClusterRerouteNoAcknowledgementDryRun() throws InterruptedException {
|
||||||
client().admin().indices().prepareCreate("test")
|
client().admin().indices().prepareCreate("test")
|
||||||
.setSettings(settingsBuilder()
|
.setSettings(settingsBuilder()
|
||||||
.put("number_of_shards", atLeast(cluster().numNodes()))
|
.put("number_of_shards", atLeast(cluster().size()))
|
||||||
.put("number_of_replicas", 0)).get();
|
.put("number_of_replicas", 0)).get();
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
||||||
|
@ -313,7 +313,7 @@ public class AckTests extends ElasticsearchIntegrationTest {
|
||||||
public void testClusterUpdateSettingsAcknowledgement() {
|
public void testClusterUpdateSettingsAcknowledgement() {
|
||||||
client().admin().indices().prepareCreate("test")
|
client().admin().indices().prepareCreate("test")
|
||||||
.setSettings(settingsBuilder()
|
.setSettings(settingsBuilder()
|
||||||
.put("number_of_shards", atLeast(cluster().numNodes()))
|
.put("number_of_shards", atLeast(cluster().size()))
|
||||||
.put("number_of_replicas", 0)).get();
|
.put("number_of_replicas", 0)).get();
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
||||||
|
@ -358,7 +358,7 @@ public class AckTests extends ElasticsearchIntegrationTest {
|
||||||
public void testClusterUpdateSettingsNoAcknowledgement() {
|
public void testClusterUpdateSettingsNoAcknowledgement() {
|
||||||
client().admin().indices().prepareCreate("test")
|
client().admin().indices().prepareCreate("test")
|
||||||
.setSettings(settingsBuilder()
|
.setSettings(settingsBuilder()
|
||||||
.put("number_of_shards", atLeast(cluster().numNodes()))
|
.put("number_of_shards", atLeast(cluster().size()))
|
||||||
.put("number_of_replicas", 0)).get();
|
.put("number_of_replicas", 0)).get();
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
||||||
|
|
|
@ -126,7 +126,7 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest {
|
||||||
logger.info("--> starting 2 nodes");
|
logger.info("--> starting 2 nodes");
|
||||||
String node_1 = cluster().startNode(commonSettings);
|
String node_1 = cluster().startNode(commonSettings);
|
||||||
cluster().startNode(commonSettings);
|
cluster().startNode(commonSettings);
|
||||||
assertThat(cluster().numNodes(), equalTo(2));
|
assertThat(cluster().size(), equalTo(2));
|
||||||
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
|
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
|
||||||
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
||||||
|
|
||||||
|
@ -170,7 +170,7 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest {
|
||||||
// wait a bit for the cluster to realize that the shard is not there...
|
// wait a bit for the cluster to realize that the shard is not there...
|
||||||
// TODO can we get around this? the cluster is RED, so what do we wait for?
|
// TODO can we get around this? the cluster is RED, so what do we wait for?
|
||||||
client().admin().cluster().prepareReroute().get();
|
client().admin().cluster().prepareReroute().get();
|
||||||
assertThat(cluster().client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet().getStatus(), equalTo(ClusterHealthStatus.RED));
|
assertThat(client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet().getStatus(), equalTo(ClusterHealthStatus.RED));
|
||||||
logger.info("--> explicitly allocate primary");
|
logger.info("--> explicitly allocate primary");
|
||||||
state = client().admin().cluster().prepareReroute()
|
state = client().admin().cluster().prepareReroute()
|
||||||
.add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
|
.add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
|
||||||
|
|
|
@ -45,7 +45,7 @@ public class FilteringAllocationTests extends ElasticsearchIntegrationTest {
|
||||||
logger.info("--> starting 2 nodes");
|
logger.info("--> starting 2 nodes");
|
||||||
final String node_0 = cluster().startNode();
|
final String node_0 = cluster().startNode();
|
||||||
final String node_1 = cluster().startNode();
|
final String node_1 = cluster().startNode();
|
||||||
assertThat(cluster().numNodes(), equalTo(2));
|
assertThat(cluster().size(), equalTo(2));
|
||||||
|
|
||||||
logger.info("--> creating an index with no replicas");
|
logger.info("--> creating an index with no replicas");
|
||||||
client().admin().indices().prepareCreate("test")
|
client().admin().indices().prepareCreate("test")
|
||||||
|
@ -84,7 +84,7 @@ public class FilteringAllocationTests extends ElasticsearchIntegrationTest {
|
||||||
logger.info("--> starting 2 nodes");
|
logger.info("--> starting 2 nodes");
|
||||||
final String node_0 = cluster().startNode();
|
final String node_0 = cluster().startNode();
|
||||||
final String node_1 = cluster().startNode();
|
final String node_1 = cluster().startNode();
|
||||||
assertThat(cluster().numNodes(), equalTo(2));
|
assertThat(cluster().size(), equalTo(2));
|
||||||
|
|
||||||
logger.info("--> creating an index with no replicas");
|
logger.info("--> creating an index with no replicas");
|
||||||
client().admin().indices().prepareCreate("test")
|
client().admin().indices().prepareCreate("test")
|
||||||
|
|
|
@ -58,7 +58,7 @@ public class ShardsAllocatorModuleTests extends ElasticsearchIntegrationTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertAllocatorInstance(Settings settings, Class<? extends ShardsAllocator> clazz) {
|
private void assertAllocatorInstance(Settings settings, Class<? extends ShardsAllocator> clazz) {
|
||||||
while (cluster().numNodes() != 0) {
|
while (cluster().size() != 0) {
|
||||||
cluster().stopRandomNode();
|
cluster().stopRandomNode();
|
||||||
}
|
}
|
||||||
cluster().startNode(settings);
|
cluster().startNode(settings);
|
||||||
|
|
|
@ -113,10 +113,10 @@ public class BulkTests extends ElasticsearchIntegrationTest {
|
||||||
public void testBulkVersioning() throws Exception {
|
public void testBulkVersioning() throws Exception {
|
||||||
createIndex("test");
|
createIndex("test");
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
BulkResponse bulkResponse = run(client().prepareBulk()
|
BulkResponse bulkResponse = client().prepareBulk()
|
||||||
.add(client().prepareIndex("test", "type", "1").setCreate(true).setSource("field", "1"))
|
.add(client().prepareIndex("test", "type", "1").setCreate(true).setSource("field", "1"))
|
||||||
.add(client().prepareIndex("test", "type", "2").setCreate(true).setSource("field", "1"))
|
.add(client().prepareIndex("test", "type", "2").setCreate(true).setSource("field", "1"))
|
||||||
.add(client().prepareIndex("test", "type", "1").setSource("field", "2")));
|
.add(client().prepareIndex("test", "type", "1").setSource("field", "2")).get();
|
||||||
|
|
||||||
assertTrue(((IndexResponse) bulkResponse.getItems()[0].getResponse()).isCreated());
|
assertTrue(((IndexResponse) bulkResponse.getItems()[0].getResponse()).isCreated());
|
||||||
assertThat(((IndexResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(1l));
|
assertThat(((IndexResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(1l));
|
||||||
|
@ -125,19 +125,19 @@ public class BulkTests extends ElasticsearchIntegrationTest {
|
||||||
assertFalse(((IndexResponse) bulkResponse.getItems()[2].getResponse()).isCreated());
|
assertFalse(((IndexResponse) bulkResponse.getItems()[2].getResponse()).isCreated());
|
||||||
assertThat(((IndexResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(2l));
|
assertThat(((IndexResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(2l));
|
||||||
|
|
||||||
bulkResponse = run(client().prepareBulk()
|
bulkResponse = client().prepareBulk()
|
||||||
.add(client().prepareUpdate("test", "type", "1").setVersion(4l).setDoc("field", "2"))
|
.add(client().prepareUpdate("test", "type", "1").setVersion(4l).setDoc("field", "2"))
|
||||||
.add(client().prepareUpdate("test", "type", "2").setDoc("field", "2"))
|
.add(client().prepareUpdate("test", "type", "2").setDoc("field", "2"))
|
||||||
.add(client().prepareUpdate("test", "type", "1").setVersion(2l).setDoc("field", "3")));
|
.add(client().prepareUpdate("test", "type", "1").setVersion(2l).setDoc("field", "3")).get();
|
||||||
|
|
||||||
assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("Version"));
|
assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("Version"));
|
||||||
assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l));
|
assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l));
|
||||||
assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(3l));
|
assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(3l));
|
||||||
|
|
||||||
bulkResponse = run(client().prepareBulk()
|
bulkResponse = client().prepareBulk()
|
||||||
.add(client().prepareIndex("test", "type", "e1").setCreate(true).setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL))
|
.add(client().prepareIndex("test", "type", "e1").setCreate(true).setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL))
|
||||||
.add(client().prepareIndex("test", "type", "e2").setCreate(true).setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL))
|
.add(client().prepareIndex("test", "type", "e2").setCreate(true).setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL))
|
||||||
.add(client().prepareIndex("test", "type", "e1").setSource("field", "2").setVersion(12).setVersionType(VersionType.EXTERNAL)));
|
.add(client().prepareIndex("test", "type", "e1").setSource("field", "2").setVersion(12).setVersionType(VersionType.EXTERNAL)).get();
|
||||||
|
|
||||||
assertTrue(((IndexResponse) bulkResponse.getItems()[0].getResponse()).isCreated());
|
assertTrue(((IndexResponse) bulkResponse.getItems()[0].getResponse()).isCreated());
|
||||||
assertThat(((IndexResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(10l));
|
assertThat(((IndexResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(10l));
|
||||||
|
@ -146,10 +146,10 @@ public class BulkTests extends ElasticsearchIntegrationTest {
|
||||||
assertFalse(((IndexResponse) bulkResponse.getItems()[2].getResponse()).isCreated());
|
assertFalse(((IndexResponse) bulkResponse.getItems()[2].getResponse()).isCreated());
|
||||||
assertThat(((IndexResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(12l));
|
assertThat(((IndexResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(12l));
|
||||||
|
|
||||||
bulkResponse = run(client().prepareBulk()
|
bulkResponse = client().prepareBulk()
|
||||||
.add(client().prepareUpdate("test", "type", "e1").setVersion(4l).setDoc("field", "2").setVersion(10).setVersionType(VersionType.EXTERNAL))
|
.add(client().prepareUpdate("test", "type", "e1").setVersion(4l).setDoc("field", "2").setVersion(10).setVersionType(VersionType.EXTERNAL))
|
||||||
.add(client().prepareUpdate("test", "type", "e2").setDoc("field", "2").setVersion(15).setVersionType(VersionType.EXTERNAL))
|
.add(client().prepareUpdate("test", "type", "e2").setDoc("field", "2").setVersion(15).setVersionType(VersionType.EXTERNAL))
|
||||||
.add(client().prepareUpdate("test", "type", "e1").setVersion(2l).setDoc("field", "3").setVersion(15).setVersionType(VersionType.EXTERNAL)));
|
.add(client().prepareUpdate("test", "type", "e1").setVersion(2l).setDoc("field", "3").setVersion(15).setVersionType(VersionType.EXTERNAL)).get();
|
||||||
|
|
||||||
assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("Version"));
|
assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("Version"));
|
||||||
assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(15l));
|
assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(15l));
|
||||||
|
|
|
@ -54,7 +54,7 @@ import static org.hamcrest.Matchers.nullValue;
|
||||||
public class DocumentActionsTests extends ElasticsearchIntegrationTest {
|
public class DocumentActionsTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
protected void createIndex() {
|
protected void createIndex() {
|
||||||
wipeIndex(getConcreteIndexName());
|
wipeIndices(getConcreteIndexName());
|
||||||
createIndex(getConcreteIndexName());
|
createIndex(getConcreteIndexName());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -56,7 +56,7 @@ public class RecoverAfterNodesTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
public Client startNode(Settings.Builder settings) {
|
public Client startNode(Settings.Builder settings) {
|
||||||
String name = cluster().startNode(settings);
|
String name = cluster().startNode(settings);
|
||||||
return cluster().clientNodeClient(name);
|
return cluster().client(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -23,6 +23,7 @@ import com.google.common.collect.Maps;
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
|
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
|
import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
|
||||||
|
@ -72,7 +73,7 @@ public class PreBuiltAnalyzerIntegrationTests extends ElasticsearchIntegrationTe
|
||||||
.endObject()
|
.endObject()
|
||||||
.endObject();
|
.endObject();
|
||||||
|
|
||||||
Settings versionSettings = randomSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build();
|
Settings versionSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build();
|
||||||
client().admin().indices().prepareCreate(indexName).addMapping("type", mapping).setSettings(versionSettings).get();
|
client().admin().indices().prepareCreate(indexName).addMapping("type", mapping).setSettings(versionSettings).get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ public class ConcurrentDynamicTemplateTests extends ElasticsearchIntegrationTest
|
||||||
|
|
||||||
int iters = atLeast(5);
|
int iters = atLeast(5);
|
||||||
for (int i = 0; i < iters; i++) {
|
for (int i = 0; i < iters; i++) {
|
||||||
wipeIndex("test");
|
wipeIndices("test");
|
||||||
client().admin().indices().prepareCreate("test")
|
client().admin().indices().prepareCreate("test")
|
||||||
.setSettings(
|
.setSettings(
|
||||||
ImmutableSettings.settingsBuilder()
|
ImmutableSettings.settingsBuilder()
|
||||||
|
|
|
@ -285,7 +285,7 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest {
|
||||||
@Test
|
@Test
|
||||||
public void updateMappingConcurrently() throws Throwable {
|
public void updateMappingConcurrently() throws Throwable {
|
||||||
// Test that we can concurrently update different indexes and types.
|
// Test that we can concurrently update different indexes and types.
|
||||||
int shardNo = Math.max(5, cluster().numNodes());
|
int shardNo = Math.max(5, cluster().size());
|
||||||
|
|
||||||
prepareCreate("test1").setSettings("index.number_of_shards", shardNo).execute().actionGet();
|
prepareCreate("test1").setSettings("index.number_of_shards", shardNo).execute().actionGet();
|
||||||
prepareCreate("test2").setSettings("index.number_of_shards", shardNo).execute().actionGet();
|
prepareCreate("test2").setSettings("index.number_of_shards", shardNo).execute().actionGet();
|
||||||
|
|
|
@ -102,7 +102,7 @@ public class SimpleDistributorTests extends ElasticsearchIntegrationTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void createIndexWithStoreType(String index, String storeType, String distributor) {
|
private void createIndexWithStoreType(String index, String storeType, String distributor) {
|
||||||
wipeIndex(index);
|
wipeIndices(index);
|
||||||
client().admin().indices().prepareCreate(index)
|
client().admin().indices().prepareCreate(index)
|
||||||
.setSettings(settingsBuilder()
|
.setSettings(settingsBuilder()
|
||||||
.put("index.store.distributor", distributor)
|
.put("index.store.distributor", distributor)
|
||||||
|
@ -115,7 +115,7 @@ public class SimpleDistributorTests extends ElasticsearchIntegrationTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void createIndexWithoutRateLimitingStoreType(String index, String storeType, String distributor) {
|
private void createIndexWithoutRateLimitingStoreType(String index, String storeType, String distributor) {
|
||||||
wipeIndex(index);
|
wipeIndices(index);
|
||||||
client().admin().indices().prepareCreate(index)
|
client().admin().indices().prepareCreate(index)
|
||||||
.setSettings(settingsBuilder()
|
.setSettings(settingsBuilder()
|
||||||
.put("index.store.distributor", distributor)
|
.put("index.store.distributor", distributor)
|
||||||
|
|
|
@ -96,12 +96,13 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest {
|
||||||
waitForRelocation(ClusterHealthStatus.GREEN);
|
waitForRelocation(ClusterHealthStatus.GREEN);
|
||||||
// flush, so we fetch it from the index (as see that we filter nested docs)
|
// flush, so we fetch it from the index (as see that we filter nested docs)
|
||||||
flush();
|
flush();
|
||||||
GetResponse getResponse = run(client().prepareGet("test", "type1", "1"));
|
GetResponse getResponse = client().prepareGet("test", "type1", "1").get();
|
||||||
assertThat(getResponse.isExists(), equalTo(true));
|
assertThat(getResponse.isExists(), equalTo(true));
|
||||||
assertThat(getResponse.getSourceAsBytes(), notNullValue());
|
assertThat(getResponse.getSourceAsBytes(), notNullValue());
|
||||||
|
|
||||||
// check the numDocs
|
// check the numDocs
|
||||||
IndicesStatusResponse statusResponse = run(admin().indices().prepareStatus());
|
IndicesStatusResponse statusResponse = admin().indices().prepareStatus().get();
|
||||||
|
assertNoFailures(statusResponse);
|
||||||
assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(3l));
|
assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(3l));
|
||||||
|
|
||||||
// check that _all is working on nested docs
|
// check that _all is working on nested docs
|
||||||
|
@ -111,17 +112,17 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest {
|
||||||
assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
|
assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
|
||||||
|
|
||||||
// search for something that matches the nested doc, and see that we don't find the nested doc
|
// search for something that matches the nested doc, and see that we don't find the nested doc
|
||||||
searchResponse = run(client().prepareSearch("test").setQuery(matchAllQuery()));
|
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).get();
|
||||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||||
searchResponse = run(client().prepareSearch("test").setQuery(termQuery("nested1.n_field1", "n_value1_1")));
|
searchResponse = client().prepareSearch("test").setQuery(termQuery("nested1.n_field1", "n_value1_1")).get();
|
||||||
assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
|
assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
|
||||||
|
|
||||||
// now, do a nested query
|
// now, do a nested query
|
||||||
searchResponse = run(client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))));
|
searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).get();
|
||||||
assertNoFailures(searchResponse);
|
assertNoFailures(searchResponse);
|
||||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||||
|
|
||||||
searchResponse = run(client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).setSearchType(SearchType.DFS_QUERY_THEN_FETCH));
|
searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).get();
|
||||||
assertNoFailures(searchResponse);
|
assertNoFailures(searchResponse);
|
||||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||||
|
|
||||||
|
@ -143,7 +144,7 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest {
|
||||||
waitForRelocation(ClusterHealthStatus.GREEN);
|
waitForRelocation(ClusterHealthStatus.GREEN);
|
||||||
// flush, so we fetch it from the index (as see that we filter nested docs)
|
// flush, so we fetch it from the index (as see that we filter nested docs)
|
||||||
flush();
|
flush();
|
||||||
statusResponse = run(client().admin().indices().prepareStatus());
|
statusResponse = client().admin().indices().prepareStatus().get();
|
||||||
assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(6l));
|
assertThat(statusResponse.getIndex("test").getDocs().getNumDocs(), equalTo(6l));
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
|
searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
|
||||||
|
|
|
@ -284,7 +284,7 @@ public class PercolatorTests extends ElasticsearchIntegrationTest {
|
||||||
.setRefresh(true)
|
.setRefresh(true)
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
|
|
||||||
wipeIndex("test");
|
wipeIndices("test");
|
||||||
prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
|
prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
||||||
|
@ -1508,7 +1508,7 @@ public class PercolatorTests extends ElasticsearchIntegrationTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
private CountDownLatch createCountDownLatch(String index) {
|
private CountDownLatch createCountDownLatch(String index) {
|
||||||
final CountDownLatch latch = new CountDownLatch(cluster().numNodes());
|
final CountDownLatch latch = new CountDownLatch(cluster().size());
|
||||||
Iterable<IndicesService> mapperServices = cluster().getInstances(IndicesService.class);
|
Iterable<IndicesService> mapperServices = cluster().getInstances(IndicesService.class);
|
||||||
for (IndicesService indicesService : mapperServices) {
|
for (IndicesService indicesService : mapperServices) {
|
||||||
MapperService mapperService = indicesService.indexService(index).mapperService();
|
MapperService mapperService = indicesService.indexService(index).mapperService();
|
||||||
|
|
|
@ -60,7 +60,7 @@ public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
|
||||||
@Slow
|
@Slow
|
||||||
public void testRestartNodePercolator1() throws Exception {
|
public void testRestartNodePercolator1() throws Exception {
|
||||||
Settings settings = settingsBuilder()
|
Settings settings = settingsBuilder()
|
||||||
.put(super.getSettings())
|
.put(super.indexSettings())
|
||||||
.put("gateway.type", "local")
|
.put("gateway.type", "local")
|
||||||
.build();
|
.build();
|
||||||
cluster().startNode(settings);
|
cluster().startNode(settings);
|
||||||
|
@ -106,7 +106,7 @@ public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
|
||||||
@Slow
|
@Slow
|
||||||
public void testRestartNodePercolator2() throws Exception {
|
public void testRestartNodePercolator2() throws Exception {
|
||||||
Settings settings = settingsBuilder()
|
Settings settings = settingsBuilder()
|
||||||
.put(super.getSettings())
|
.put(super.indexSettings())
|
||||||
.put("gateway.type", "local")
|
.put("gateway.type", "local")
|
||||||
.build();
|
.build();
|
||||||
cluster().startNode(settings);
|
cluster().startNode(settings);
|
||||||
|
@ -183,7 +183,7 @@ public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
|
||||||
@Slow
|
@Slow
|
||||||
public void testLoadingPercolateQueriesDuringCloseAndOpen() throws Exception {
|
public void testLoadingPercolateQueriesDuringCloseAndOpen() throws Exception {
|
||||||
Settings settings = settingsBuilder()
|
Settings settings = settingsBuilder()
|
||||||
.put(super.getSettings())
|
.put(super.indexSettings())
|
||||||
.put("gateway.type", "local")
|
.put("gateway.type", "local")
|
||||||
.build();
|
.build();
|
||||||
logger.info("--> Starting 2 nodes");
|
logger.info("--> Starting 2 nodes");
|
||||||
|
|
|
@ -310,7 +310,7 @@ public class RecoveryWhileUnderLoadTests extends ElasticsearchIntegrationTest {
|
||||||
cluster().ensureAtLeastNumNodes(3);
|
cluster().ensureAtLeastNumNodes(3);
|
||||||
logger.info("--> creating test index ...");
|
logger.info("--> creating test index ...");
|
||||||
int allowNodes = 2;
|
int allowNodes = 2;
|
||||||
assertAcked(prepareCreate("test").setSettings(randomSettingsBuilder().put("number_of_shards", numShards).put("number_of_replicas", numReplicas).build()));
|
assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder().put("number_of_shards", numShards).put("number_of_replicas", numReplicas).build()));
|
||||||
final AtomicLong idGenerator = new AtomicLong();
|
final AtomicLong idGenerator = new AtomicLong();
|
||||||
final AtomicLong indexCounter = new AtomicLong();
|
final AtomicLong indexCounter = new AtomicLong();
|
||||||
final AtomicBoolean stop = new AtomicBoolean(false);
|
final AtomicBoolean stop = new AtomicBoolean(false);
|
||||||
|
|
|
@ -38,7 +38,7 @@ import static org.hamcrest.Matchers.equalTo;
|
||||||
public class SimpleRecoveryTests extends ElasticsearchIntegrationTest {
|
public class SimpleRecoveryTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Settings getSettings() {
|
public Settings indexSettings() {
|
||||||
return recoverySettings();
|
return recoverySettings();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ public class AliasRoutingTests extends ElasticsearchIntegrationTest {
|
||||||
public void testAliasCrudRouting() throws Exception {
|
public void testAliasCrudRouting() throws Exception {
|
||||||
createIndex("test");
|
createIndex("test");
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
IndicesAliasesResponse res = run(admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test", "alias0").routing("0")));
|
IndicesAliasesResponse res = admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test", "alias0").routing("0")).get();
|
||||||
assertThat(res.isAcknowledged(), equalTo(true));
|
assertThat(res.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> indexing with id [1], and routing [0] using alias");
|
logger.info("--> indexing with id [1], and routing [0] using alias");
|
||||||
|
@ -125,11 +125,11 @@ public class AliasRoutingTests extends ElasticsearchIntegrationTest {
|
||||||
public void testAliasSearchRouting() throws Exception {
|
public void testAliasSearchRouting() throws Exception {
|
||||||
createIndex("test");
|
createIndex("test");
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
IndicesAliasesResponse res = run(admin().indices().prepareAliases()
|
IndicesAliasesResponse res = admin().indices().prepareAliases()
|
||||||
.addAliasAction(newAddAliasAction("test", "alias"))
|
.addAliasAction(newAddAliasAction("test", "alias"))
|
||||||
.addAliasAction(newAddAliasAction("test", "alias0").routing("0"))
|
.addAliasAction(newAddAliasAction("test", "alias0").routing("0"))
|
||||||
.addAliasAction(newAddAliasAction("test", "alias1").routing("1"))
|
.addAliasAction(newAddAliasAction("test", "alias1").routing("1"))
|
||||||
.addAliasAction(newAddAliasAction("test", "alias01").searchRouting("0,1")));
|
.addAliasAction(newAddAliasAction("test", "alias01").searchRouting("0,1")).get();
|
||||||
assertThat(res.isAcknowledged(), equalTo(true));
|
assertThat(res.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> indexing with id [1], and routing [0] using alias");
|
logger.info("--> indexing with id [1], and routing [0] using alias");
|
||||||
|
@ -222,13 +222,13 @@ public class AliasRoutingTests extends ElasticsearchIntegrationTest {
|
||||||
createIndex("test-a");
|
createIndex("test-a");
|
||||||
createIndex("test-b");
|
createIndex("test-b");
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
IndicesAliasesResponse res = run(admin().indices().prepareAliases()
|
IndicesAliasesResponse res = admin().indices().prepareAliases()
|
||||||
.addAliasAction(newAddAliasAction("test-a", "alias-a0").routing("0"))
|
.addAliasAction(newAddAliasAction("test-a", "alias-a0").routing("0"))
|
||||||
.addAliasAction(newAddAliasAction("test-a", "alias-a1").routing("1"))
|
.addAliasAction(newAddAliasAction("test-a", "alias-a1").routing("1"))
|
||||||
.addAliasAction(newAddAliasAction("test-b", "alias-b0").routing("0"))
|
.addAliasAction(newAddAliasAction("test-b", "alias-b0").routing("0"))
|
||||||
.addAliasAction(newAddAliasAction("test-b", "alias-b1").routing("1"))
|
.addAliasAction(newAddAliasAction("test-b", "alias-b1").routing("1"))
|
||||||
.addAliasAction(newAddAliasAction("test-a", "alias-ab").searchRouting("0"))
|
.addAliasAction(newAddAliasAction("test-a", "alias-ab").searchRouting("0"))
|
||||||
.addAliasAction(newAddAliasAction("test-b", "alias-ab").searchRouting("1")));
|
.addAliasAction(newAddAliasAction("test-b", "alias-ab").searchRouting("1")).get();
|
||||||
assertThat(res.isAcknowledged(), equalTo(true));
|
assertThat(res.isAcknowledged(), equalTo(true));
|
||||||
ensureGreen(); // wait for events again to make sure we got the aliases on all nodes
|
ensureGreen(); // wait for events again to make sure we got the aliases on all nodes
|
||||||
logger.info("--> indexing with id [1], and routing [0] using alias to test-a");
|
logger.info("--> indexing with id [1], and routing [0] using alias to test-a");
|
||||||
|
@ -283,8 +283,8 @@ public class AliasRoutingTests extends ElasticsearchIntegrationTest {
|
||||||
public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue2682() throws Exception {
|
public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue2682() throws Exception {
|
||||||
createIndex("index", "index_2");
|
createIndex("index", "index_2");
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
IndicesAliasesResponse res = run(admin().indices().prepareAliases()
|
IndicesAliasesResponse res = admin().indices().prepareAliases()
|
||||||
.addAliasAction(newAddAliasAction("index", "index_1").routing("1")));
|
.addAliasAction(newAddAliasAction("index", "index_1").routing("1")).get();
|
||||||
assertThat(res.isAcknowledged(), equalTo(true));
|
assertThat(res.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> indexing on index_1 which is an alias for index with routing [1]");
|
logger.info("--> indexing on index_1 which is an alias for index with routing [1]");
|
||||||
|
@ -310,8 +310,8 @@ public class AliasRoutingTests extends ElasticsearchIntegrationTest {
|
||||||
public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue3268() throws Exception {
|
public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue3268() throws Exception {
|
||||||
createIndex("index", "index_2");
|
createIndex("index", "index_2");
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
IndicesAliasesResponse res = run(admin().indices().prepareAliases()
|
IndicesAliasesResponse res = admin().indices().prepareAliases()
|
||||||
.addAliasAction(newAddAliasAction("index", "index_1").routing("1")));
|
.addAliasAction(newAddAliasAction("index", "index_1").routing("1")).get();
|
||||||
assertThat(res.isAcknowledged(), equalTo(true));
|
assertThat(res.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> indexing on index_1 which is an alias for index with routing [1]");
|
logger.info("--> indexing on index_1 which is an alias for index with routing [1]");
|
||||||
|
@ -330,10 +330,10 @@ public class AliasRoutingTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testRequiredRoutingMappingWithAlias() throws Exception {
|
public void testRequiredRoutingMappingWithAlias() throws Exception {
|
||||||
run(prepareCreate("test").addMapping(
|
prepareCreate("test").addMapping(
|
||||||
"type1",
|
"type1",
|
||||||
XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true)
|
XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true)
|
||||||
.endObject().endObject().endObject()));
|
.endObject().endObject().endObject()).get();
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
logger.info("--> indexing with id [1], and routing [0]");
|
logger.info("--> indexing with id [1], and routing [0]");
|
||||||
client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
|
client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
|
||||||
|
@ -377,8 +377,8 @@ public class AliasRoutingTests extends ElasticsearchIntegrationTest {
|
||||||
createIndex("test");
|
createIndex("test");
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
logger.info("--> creating alias with routing [3]");
|
logger.info("--> creating alias with routing [3]");
|
||||||
IndicesAliasesResponse res = run(admin().indices().prepareAliases()
|
IndicesAliasesResponse res = admin().indices().prepareAliases()
|
||||||
.addAliasAction(newAddAliasAction("test", "alias").routing("3")));
|
.addAliasAction(newAddAliasAction("test", "alias").routing("3")).get();
|
||||||
assertThat(res.isAcknowledged(), equalTo(true));
|
assertThat(res.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> indexing with id [0], and routing [3]");
|
logger.info("--> indexing with id [0], and routing [3]");
|
||||||
|
@ -393,8 +393,8 @@ public class AliasRoutingTests extends ElasticsearchIntegrationTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info("--> creating alias with routing [4]");
|
logger.info("--> creating alias with routing [4]");
|
||||||
res = run(admin().indices().prepareAliases()
|
res = admin().indices().prepareAliases()
|
||||||
.addAliasAction(newAddAliasAction("test", "alias").routing("4")));
|
.addAliasAction(newAddAliasAction("test", "alias").routing("4")).get();
|
||||||
assertThat(res.isAcknowledged(), equalTo(true));
|
assertThat(res.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> verifying search with wrong routing should not find");
|
logger.info("--> verifying search with wrong routing should not find");
|
||||||
|
|
|
@ -106,7 +106,7 @@ public class SearchWhileCreatingIndexTests extends ElasticsearchIntegrationTest
|
||||||
status = client().admin().cluster().prepareHealth("test").get().getStatus();
|
status = client().admin().cluster().prepareHealth("test").get().getStatus();
|
||||||
cluster().ensureAtLeastNumNodes(numberOfReplicas + 1);
|
cluster().ensureAtLeastNumNodes(numberOfReplicas + 1);
|
||||||
}
|
}
|
||||||
wipeIndex("test");
|
wipeIndices("test");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -4,6 +4,7 @@ import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
import org.elasticsearch.common.collect.Tuple;
|
import org.elasticsearch.common.collect.Tuple;
|
||||||
import org.elasticsearch.common.regex.Regex;
|
import org.elasticsearch.common.regex.Regex;
|
||||||
|
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.text.StringText;
|
import org.elasticsearch.common.text.StringText;
|
||||||
import org.elasticsearch.common.text.Text;
|
import org.elasticsearch.common.text.Text;
|
||||||
|
@ -24,8 +25,8 @@ import static org.hamcrest.Matchers.equalTo;
|
||||||
public class ExtendedFacetsTests extends ElasticsearchIntegrationTest {
|
public class ExtendedFacetsTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Settings getSettings() {
|
public Settings indexSettings() {
|
||||||
return randomSettingsBuilder()
|
return ImmutableSettings.builder()
|
||||||
.put("index.number_of_shards", numberOfShards())
|
.put("index.number_of_shards", numberOfShards())
|
||||||
.put("index.number_of_replicas", 0)
|
.put("index.number_of_replicas", 0)
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.common.Priority;
|
import org.elasticsearch.common.Priority;
|
||||||
import org.elasticsearch.common.bytes.BytesArray;
|
import org.elasticsearch.common.bytes.BytesArray;
|
||||||
import org.elasticsearch.common.joda.Joda;
|
import org.elasticsearch.common.joda.Joda;
|
||||||
|
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
|
@ -69,8 +70,8 @@ public class SimpleFacetsTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
private int numRuns = -1;
|
private int numRuns = -1;
|
||||||
@Override
|
@Override
|
||||||
public Settings getSettings() {
|
public Settings indexSettings() {
|
||||||
return randomSettingsBuilder()
|
return ImmutableSettings.builder()
|
||||||
.put("index.number_of_shards", between(1, 5))
|
.put("index.number_of_shards", between(1, 5))
|
||||||
.put("index.number_of_replicas", 0)
|
.put("index.number_of_replicas", 0)
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.search.facet.terms;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
import com.google.common.collect.ImmutableMap;
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
|
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.search.facet.Facets;
|
import org.elasticsearch.search.facet.Facets;
|
||||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||||
|
@ -51,7 +52,7 @@ public class ShardSizeTermsFacetTests extends ElasticsearchIntegrationTest {
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
protected Settings nodeSettings(int nodeOrdinal) {
|
protected Settings nodeSettings(int nodeOrdinal) {
|
||||||
return randomSettingsBuilder()
|
return ImmutableSettings.builder()
|
||||||
.put("index.number_of_shards", 2)
|
.put("index.number_of_shards", 2)
|
||||||
.put("index.number_of_replicas", 0)
|
.put("index.number_of_replicas", 0)
|
||||||
.put("cluster.routing.operation.hash.type", "djb")
|
.put("cluster.routing.operation.hash.type", "djb")
|
||||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.search.facet.terms;
|
||||||
import org.elasticsearch.ElasticSearchException;
|
import org.elasticsearch.ElasticSearchException;
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
import org.elasticsearch.common.Priority;
|
import org.elasticsearch.common.Priority;
|
||||||
|
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -41,8 +42,8 @@ import static org.hamcrest.Matchers.is;
|
||||||
public class UnmappedFieldsTermsFacetsTests extends ElasticsearchIntegrationTest {
|
public class UnmappedFieldsTermsFacetsTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Settings getSettings() {
|
public Settings indexSettings() {
|
||||||
return randomSettingsBuilder()
|
return ImmutableSettings.builder()
|
||||||
.put("index.number_of_shards", numberOfShards())
|
.put("index.number_of_shards", numberOfShards())
|
||||||
.put("index.number_of_replicas", 0)
|
.put("index.number_of_replicas", 0)
|
||||||
.build();
|
.build();
|
||||||
|
@ -157,7 +158,7 @@ public class UnmappedFieldsTermsFacetsTests extends ElasticsearchIntegrationTest
|
||||||
@Test
|
@Test
|
||||||
public void testPartiallyUnmappedField() throws ElasticSearchException, IOException {
|
public void testPartiallyUnmappedField() throws ElasticSearchException, IOException {
|
||||||
client().admin().indices().prepareCreate("mapped_idx")
|
client().admin().indices().prepareCreate("mapped_idx")
|
||||||
.setSettings(getSettings())
|
.setSettings(indexSettings())
|
||||||
.addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
|
.addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
|
||||||
.startObject("partially_mapped_byte").field("type", "byte").endObject()
|
.startObject("partially_mapped_byte").field("type", "byte").endObject()
|
||||||
.startObject("partially_mapped_short").field("type", "short").endObject()
|
.startObject("partially_mapped_short").field("type", "short").endObject()
|
||||||
|
@ -285,7 +286,7 @@ public class UnmappedFieldsTermsFacetsTests extends ElasticsearchIntegrationTest
|
||||||
@Test
|
@Test
|
||||||
public void testMappedYetMissingField() throws IOException {
|
public void testMappedYetMissingField() throws IOException {
|
||||||
client().admin().indices().prepareCreate("idx")
|
client().admin().indices().prepareCreate("idx")
|
||||||
.setSettings(getSettings())
|
.setSettings(indexSettings())
|
||||||
.addMapping("type", jsonBuilder().startObject()
|
.addMapping("type", jsonBuilder().startObject()
|
||||||
.field("type").startObject()
|
.field("type").startObject()
|
||||||
.field("properties").startObject()
|
.field("properties").startObject()
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.search.facet.termsstats;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
import com.google.common.collect.ImmutableMap;
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
|
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.search.facet.Facets;
|
import org.elasticsearch.search.facet.Facets;
|
||||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||||
|
@ -51,7 +52,7 @@ public class ShardSizeTermsStatsFacetTests extends ElasticsearchIntegrationTest
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
protected Settings nodeSettings(int nodeOrdinal) {
|
protected Settings nodeSettings(int nodeOrdinal) {
|
||||||
return randomSettingsBuilder()
|
return ImmutableSettings.builder()
|
||||||
.put("index.number_of_shards", 2)
|
.put("index.number_of_shards", 2)
|
||||||
.put("index.number_of_replicas", 0)
|
.put("index.number_of_replicas", 0)
|
||||||
.put("cluster.routing.operation.hash.type", "djb")
|
.put("cluster.routing.operation.hash.type", "djb")
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.bytes.BytesArray;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.collect.MapBuilder;
|
import org.elasticsearch.common.collect.MapBuilder;
|
||||||
import org.elasticsearch.common.joda.Joda;
|
import org.elasticsearch.common.joda.Joda;
|
||||||
|
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
import org.elasticsearch.search.sort.SortOrder;
|
import org.elasticsearch.search.sort.SortOrder;
|
||||||
|
@ -50,8 +51,8 @@ import static org.hamcrest.Matchers.*;
|
||||||
public class SearchFieldsTests extends ElasticsearchIntegrationTest {
|
public class SearchFieldsTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Settings getSettings() {
|
public Settings indexSettings() {
|
||||||
return randomSettingsBuilder()
|
return ImmutableSettings.builder()
|
||||||
.put("index.number_of_shards", 1) // why just one?
|
.put("index.number_of_shards", 1) // why just one?
|
||||||
.put("index.number_of_replicas", 0)
|
.put("index.number_of_replicas", 0)
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -36,7 +36,7 @@ public class SearchPreferenceTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Test // see #2896
|
@Test // see #2896
|
||||||
public void testStopOneNodePreferenceWithRedState() throws InterruptedException {
|
public void testStopOneNodePreferenceWithRedState() throws InterruptedException {
|
||||||
client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", cluster().numNodes()+2).put("index.number_of_replicas", 0)).execute().actionGet();
|
client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", cluster().size()+2).put("index.number_of_replicas", 0)).execute().actionGet();
|
||||||
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
|
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
client().prepareIndex("test", "type1", ""+i).setSource("field1", "value1").execute().actionGet();
|
client().prepareIndex("test", "type1", ""+i).setSource("field1", "value1").execute().actionGet();
|
||||||
|
|
|
@ -62,7 +62,7 @@ public class SimpleQueryTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Test // see https://github.com/elasticsearch/elasticsearch/issues/3177
|
@Test // see https://github.com/elasticsearch/elasticsearch/issues/3177
|
||||||
public void testIssue3177() {
|
public void testIssue3177() {
|
||||||
run(prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)));
|
prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).get();
|
||||||
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet();
|
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet();
|
||||||
client().prepareIndex("test", "type1", "2").setSource("field1", "value2").execute().actionGet();
|
client().prepareIndex("test", "type1", "2").setSource("field1", "value2").execute().actionGet();
|
||||||
client().prepareIndex("test", "type1", "3").setSource("field1", "value3").execute().actionGet();
|
client().prepareIndex("test", "type1", "3").setSource("field1", "value3").execute().actionGet();
|
||||||
|
|
|
@ -59,8 +59,8 @@ import static org.hamcrest.Matchers.*;
|
||||||
public class SimpleSortTests extends ElasticsearchIntegrationTest {
|
public class SimpleSortTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Settings getSettings() {
|
public Settings indexSettings() {
|
||||||
return randomSettingsBuilder()
|
return ImmutableSettings.builder()
|
||||||
.put("index.number_of_shards", 3)
|
.put("index.number_of_shards", 3)
|
||||||
.put("index.number_of_replicas", 0)
|
.put("index.number_of_replicas", 0)
|
||||||
.build();
|
.build();
|
||||||
|
@ -111,7 +111,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest {
|
||||||
int numberOfShards = between(1, 10);
|
int numberOfShards = between(1, 10);
|
||||||
Random random = getRandom();
|
Random random = getRandom();
|
||||||
prepareCreate("test")
|
prepareCreate("test")
|
||||||
.setSettings(randomSettingsBuilder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", 0))
|
.setSettings(ImmutableSettings.builder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", 0))
|
||||||
.addMapping("type",
|
.addMapping("type",
|
||||||
XContentFactory.jsonBuilder()
|
XContentFactory.jsonBuilder()
|
||||||
.startObject()
|
.startObject()
|
||||||
|
@ -236,7 +236,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testScoreSortDirection() throws Exception {
|
public void testScoreSortDirection() throws Exception {
|
||||||
prepareCreate("test").setSettings(randomSettingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
|
prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1)).execute().actionGet();
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
||||||
client().prepareIndex("test", "type", "1").setSource("field", 2).execute().actionGet();
|
client().prepareIndex("test", "type", "1").setSource("field", 2).execute().actionGet();
|
||||||
|
@ -268,7 +268,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testScoreSortDirection_withFunctionScore() throws Exception {
|
public void testScoreSortDirection_withFunctionScore() throws Exception {
|
||||||
prepareCreate("test").setSettings(randomSettingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
|
prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1)).execute().actionGet();
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
||||||
client().prepareIndex("test", "type", "1").setSource("field", 2).execute().actionGet();
|
client().prepareIndex("test", "type", "1").setSource("field", 2).execute().actionGet();
|
||||||
|
@ -299,7 +299,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testIssue2986() {
|
public void testIssue2986() {
|
||||||
prepareCreate("test").setSettings(getSettings()).execute().actionGet();
|
prepareCreate("test").setSettings(indexSettings()).execute().actionGet();
|
||||||
|
|
||||||
client().prepareIndex("test", "post", "1").setSource("{\"field1\":\"value1\"}").execute().actionGet();
|
client().prepareIndex("test", "post", "1").setSource("{\"field1\":\"value1\"}").execute().actionGet();
|
||||||
client().prepareIndex("test", "post", "2").setSource("{\"field1\":\"value2\"}").execute().actionGet();
|
client().prepareIndex("test", "post", "2").setSource("{\"field1\":\"value2\"}").execute().actionGet();
|
||||||
|
@ -353,7 +353,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest {
|
||||||
final int numberOfShards = between(1, 10);
|
final int numberOfShards = between(1, 10);
|
||||||
Random random = getRandom();
|
Random random = getRandom();
|
||||||
prepareCreate("test")
|
prepareCreate("test")
|
||||||
.setSettings(randomSettingsBuilder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", 0))
|
.setSettings(ImmutableSettings.builder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", 0))
|
||||||
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
|
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
|
||||||
.startObject("str_value").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
|
.startObject("str_value").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
|
||||||
.startObject("boolean_value").field("type", "boolean").endObject()
|
.startObject("boolean_value").field("type", "boolean").endObject()
|
||||||
|
@ -671,7 +671,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest {
|
||||||
.startObject("svalue").field("type", "string").endObject()
|
.startObject("svalue").field("type", "string").endObject()
|
||||||
.startObject("gvalue").field("type", "geo_point").endObject()
|
.startObject("gvalue").field("type", "geo_point").endObject()
|
||||||
.endObject().endObject().endObject().string();
|
.endObject().endObject().endObject().string();
|
||||||
prepareCreate("test").setSettings(getSettings()).addMapping("type1", mapping).execute().actionGet();
|
prepareCreate("test").setSettings(indexSettings()).addMapping("type1", mapping).execute().actionGet();
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
|
@ -761,7 +761,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest {
|
||||||
String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
|
String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
|
||||||
.startObject("svalue").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
|
.startObject("svalue").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
|
||||||
.endObject().endObject().endObject().string();
|
.endObject().endObject().endObject().string();
|
||||||
prepareCreate("test").setSettings(getSettings()).addMapping("type1", mapping).execute().actionGet();
|
prepareCreate("test").setSettings(indexSettings()).addMapping("type1", mapping).execute().actionGet();
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
||||||
client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
|
client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
|
||||||
|
@ -1047,7 +1047,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest {
|
||||||
@Test
|
@Test
|
||||||
public void testSortMVField() throws Exception {
|
public void testSortMVField() throws Exception {
|
||||||
prepareCreate("test")
|
prepareCreate("test")
|
||||||
.setSettings(randomSettingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
|
.setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
|
||||||
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
|
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
|
||||||
.startObject("long_values").field("type", "long").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
|
.startObject("long_values").field("type", "long").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
|
||||||
.startObject("int_values").field("type", "integer").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
|
.startObject("int_values").field("type", "integer").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
|
||||||
|
@ -1365,7 +1365,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest {
|
||||||
@Test
|
@Test
|
||||||
public void testSortOnRareField() throws ElasticSearchException, IOException {
|
public void testSortOnRareField() throws ElasticSearchException, IOException {
|
||||||
prepareCreate("test")
|
prepareCreate("test")
|
||||||
.setSettings(randomSettingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
|
.setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
|
||||||
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
|
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
|
||||||
.startObject("string_values").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
|
.startObject("string_values").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", maybeDocValues() ? "doc_values" : null).endObject().endObject()
|
||||||
.endObject().endObject().endObject())
|
.endObject().endObject().endObject())
|
||||||
|
|
|
@ -24,6 +24,11 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
import org.elasticsearch.action.search.SearchType;
|
import org.elasticsearch.action.search.SearchType;
|
||||||
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||||
|
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||||
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
|
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.index.query.QueryBuilders;
|
import org.elasticsearch.index.query.QueryBuilders;
|
||||||
|
@ -31,6 +36,7 @@ import org.elasticsearch.index.search.stats.SearchStats.Stats;
|
||||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||||
|
@ -41,8 +47,8 @@ import static org.hamcrest.Matchers.*;
|
||||||
public class SearchStatsTests extends ElasticsearchIntegrationTest {
|
public class SearchStatsTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Settings getSettings() {
|
public Settings indexSettings() {
|
||||||
return randomSettingsBuilder()
|
return ImmutableSettings.builder()
|
||||||
.put("index.number_of_replicas", 0)
|
.put("index.number_of_replicas", 0)
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
@ -103,6 +109,21 @@ public class SearchStatsTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Set<String> nodeIdsWithIndex(String... indices) {
|
||||||
|
ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||||
|
GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
|
||||||
|
Set<String> nodes = new HashSet<String>();
|
||||||
|
for (ShardIterator shardIterator : allAssignedShardsGrouped) {
|
||||||
|
for (ShardRouting routing : shardIterator.asUnordered()) {
|
||||||
|
if (routing.active()) {
|
||||||
|
nodes.add(routing.currentNodeId());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes;
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testOpenContexts() {
|
public void testOpenContexts() {
|
||||||
createIndex("test1");
|
createIndex("test1");
|
||||||
|
@ -128,4 +149,10 @@ public class SearchStatsTests extends ElasticsearchIntegrationTest {
|
||||||
indicesStats = client().admin().indices().prepareStats().execute().actionGet();
|
indicesStats = client().admin().indices().prepareStats().execute().actionGet();
|
||||||
assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0l));
|
assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0l));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected int numAssignedShards(String... indices) {
|
||||||
|
ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||||
|
GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
|
||||||
|
return allAssignedShardsGrouped.size();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -761,7 +761,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
private ImmutableSettings.Builder createDefaultSettings() {
|
private ImmutableSettings.Builder createDefaultSettings() {
|
||||||
int randomShardNumber = between(1, 5);
|
int randomShardNumber = between(1, 5);
|
||||||
int randomReplicaNumber = between(0, cluster().numNodes() - 1);
|
int randomReplicaNumber = between(0, cluster().size() - 1);
|
||||||
return settingsBuilder().put(SETTING_NUMBER_OF_SHARDS, randomShardNumber).put(SETTING_NUMBER_OF_REPLICAS, randomReplicaNumber);
|
return settingsBuilder().put(SETTING_NUMBER_OF_SHARDS, randomShardNumber).put(SETTING_NUMBER_OF_REPLICAS, randomReplicaNumber);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -142,7 +142,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest {
|
||||||
public void testUnmappedField() throws IOException, InterruptedException, ExecutionException {
|
public void testUnmappedField() throws IOException, InterruptedException, ExecutionException {
|
||||||
CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
|
CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
|
||||||
.put(SETTING_NUMBER_OF_SHARDS, between(1,5))
|
.put(SETTING_NUMBER_OF_SHARDS, between(1,5))
|
||||||
.put(SETTING_NUMBER_OF_REPLICAS, between(0, cluster().numNodes() - 1))
|
.put(SETTING_NUMBER_OF_REPLICAS, between(0, cluster().size() - 1))
|
||||||
.put("index.analysis.analyzer.biword.tokenizer", "standard")
|
.put("index.analysis.analyzer.biword.tokenizer", "standard")
|
||||||
.putArray("index.analysis.analyzer.biword.filter", "shingler", "lowercase")
|
.putArray("index.analysis.analyzer.biword.filter", "shingler", "lowercase")
|
||||||
.put("index.analysis.filter.shingler.type", "shingle")
|
.put("index.analysis.filter.shingler.type", "shingle")
|
||||||
|
@ -690,7 +690,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest {
|
||||||
public void testShardFailures() throws IOException, InterruptedException {
|
public void testShardFailures() throws IOException, InterruptedException {
|
||||||
CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
|
CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
|
||||||
.put(SETTING_NUMBER_OF_SHARDS, between(1, 5))
|
.put(SETTING_NUMBER_OF_SHARDS, between(1, 5))
|
||||||
.put(SETTING_NUMBER_OF_REPLICAS, between(0, cluster().numNodes() - 1))
|
.put(SETTING_NUMBER_OF_REPLICAS, between(0, cluster().size() - 1))
|
||||||
.put("index.analysis.analyzer.suggest.tokenizer", "standard")
|
.put("index.analysis.analyzer.suggest.tokenizer", "standard")
|
||||||
.putArray("index.analysis.analyzer.suggest.filter", "standard", "lowercase", "shingler")
|
.putArray("index.analysis.analyzer.suggest.filter", "standard", "lowercase", "shingler")
|
||||||
.put("index.analysis.filter.shingler.type", "shingle")
|
.put("index.analysis.filter.shingler.type", "shingle")
|
||||||
|
@ -798,7 +798,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
|
CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
|
||||||
.put(SETTING_NUMBER_OF_SHARDS, numberOfShards)
|
.put(SETTING_NUMBER_OF_SHARDS, numberOfShards)
|
||||||
.put(SETTING_NUMBER_OF_REPLICAS, between(0, cluster().numNodes() - 1))
|
.put(SETTING_NUMBER_OF_REPLICAS, between(0, cluster().size() - 1))
|
||||||
.put("index.analysis.analyzer.body.tokenizer", "standard")
|
.put("index.analysis.analyzer.body.tokenizer", "standard")
|
||||||
.putArray("index.analysis.analyzer.body.filter", "lowercase", "my_shingle")
|
.putArray("index.analysis.analyzer.body.filter", "lowercase", "my_shingle")
|
||||||
.put("index.analysis.filter.my_shingle.type", "shingle")
|
.put("index.analysis.filter.my_shingle.type", "shingle")
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
package org.elasticsearch.search.timeout;
|
package org.elasticsearch.search.timeout;
|
||||||
|
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
|
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -34,8 +35,8 @@ import static org.hamcrest.Matchers.equalTo;
|
||||||
public class SearchTimeoutTests extends ElasticsearchIntegrationTest {
|
public class SearchTimeoutTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Settings getSettings() {
|
public Settings indexSettings() {
|
||||||
return randomSettingsBuilder()
|
return ImmutableSettings.builder()
|
||||||
.put("index.number_of_shards", 2)
|
.put("index.number_of_shards", 2)
|
||||||
.put("index.number_of_replicas", 0)
|
.put("index.number_of_replicas", 0)
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -104,7 +104,7 @@ public abstract class AbstractSnapshotTests extends ElasticsearchIntegrationTest
|
||||||
public String waitForCompletionOrBlock(Collection<String> nodes, String repository, String snapshot, TimeValue timeout) throws InterruptedException {
|
public String waitForCompletionOrBlock(Collection<String> nodes, String repository, String snapshot, TimeValue timeout) throws InterruptedException {
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
while (System.currentTimeMillis() - start < timeout.millis()) {
|
while (System.currentTimeMillis() - start < timeout.millis()) {
|
||||||
ImmutableList<SnapshotInfo> snapshotInfos = run(client().admin().cluster().prepareGetSnapshots(repository).setSnapshots(snapshot)).getSnapshots();
|
ImmutableList<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots(repository).setSnapshots(snapshot).get().getSnapshots();
|
||||||
assertThat(snapshotInfos.size(), equalTo(1));
|
assertThat(snapshotInfos.size(), equalTo(1));
|
||||||
if (snapshotInfos.get(0).state().completed()) {
|
if (snapshotInfos.get(0).state().completed()) {
|
||||||
return null;
|
return null;
|
||||||
|
@ -124,7 +124,7 @@ public abstract class AbstractSnapshotTests extends ElasticsearchIntegrationTest
|
||||||
public SnapshotInfo waitForCompletion(String repository, String snapshot, TimeValue timeout) throws InterruptedException {
|
public SnapshotInfo waitForCompletion(String repository, String snapshot, TimeValue timeout) throws InterruptedException {
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
while (System.currentTimeMillis() - start < timeout.millis()) {
|
while (System.currentTimeMillis() - start < timeout.millis()) {
|
||||||
ImmutableList<SnapshotInfo> snapshotInfos = run(client().admin().cluster().prepareGetSnapshots(repository).setSnapshots(snapshot)).getSnapshots();
|
ImmutableList<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots(repository).setSnapshots(snapshot).get().getSnapshots();
|
||||||
assertThat(snapshotInfos.size(), equalTo(1));
|
assertThat(snapshotInfos.size(), equalTo(1));
|
||||||
if (snapshotInfos.get(0).state().completed()) {
|
if (snapshotInfos.get(0).state().completed()) {
|
||||||
return snapshotInfos.get(0);
|
return snapshotInfos.get(0);
|
||||||
|
|
|
@ -49,7 +49,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
||||||
public void restorePersistentSettingsTest() throws Exception {
|
public void restorePersistentSettingsTest() throws Exception {
|
||||||
logger.info("--> start node");
|
logger.info("--> start node");
|
||||||
cluster().startNode(settingsBuilder().put("gateway.type", "local"));
|
cluster().startNode(settingsBuilder().put("gateway.type", "local"));
|
||||||
Client client = cluster().client();
|
Client client = client();
|
||||||
|
|
||||||
// Add dummy persistent setting
|
// Add dummy persistent setting
|
||||||
logger.info("--> set test persistent setting");
|
logger.info("--> set test persistent setting");
|
||||||
|
@ -87,7 +87,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
||||||
ArrayList<String> nodes = newArrayList();
|
ArrayList<String> nodes = newArrayList();
|
||||||
nodes.add(cluster().startNode());
|
nodes.add(cluster().startNode());
|
||||||
nodes.add(cluster().startNode());
|
nodes.add(cluster().startNode());
|
||||||
Client client = cluster().client();
|
Client client = client();
|
||||||
|
|
||||||
assertAcked(prepareCreate("test-idx", 2, settingsBuilder().put("number_of_shards", 2).put("number_of_replicas", 0).put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false)));
|
assertAcked(prepareCreate("test-idx", 2, settingsBuilder().put("number_of_shards", 2).put("number_of_replicas", 0).put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false)));
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
@ -97,22 +97,22 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
||||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||||
}
|
}
|
||||||
refresh();
|
refresh();
|
||||||
assertThat(run(client.prepareCount("test-idx")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
|
||||||
|
|
||||||
logger.info("--> create repository");
|
logger.info("--> create repository");
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
||||||
ImmutableSettings.settingsBuilder()
|
ImmutableSettings.settingsBuilder()
|
||||||
.put("location", newTempDir(LifecycleScope.TEST))
|
.put("location", newTempDir(LifecycleScope.TEST))
|
||||||
.put("random", randomAsciiOfLength(10))
|
.put("random", randomAsciiOfLength(10))
|
||||||
.put("random_data_file_blocking_rate", 0.1)
|
.put("random_data_file_blocking_rate", 0.1)
|
||||||
.put("wait_after_unblock", 200)
|
.put("wait_after_unblock", 200)
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx"));
|
client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
|
||||||
|
|
||||||
String blockedNode = waitForCompletionOrBlock(nodes, "test-repo", "test-snap", TimeValue.timeValueSeconds(60));
|
String blockedNode = waitForCompletionOrBlock(nodes, "test-repo", "test-snap", TimeValue.timeValueSeconds(60));
|
||||||
if (blockedNode != null) {
|
if (blockedNode != null) {
|
||||||
|
@ -126,7 +126,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
||||||
logger.info("--> done");
|
logger.info("--> done");
|
||||||
} else {
|
} else {
|
||||||
logger.info("--> done without blocks");
|
logger.info("--> done without blocks");
|
||||||
ImmutableList<SnapshotInfo> snapshotInfos = run(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap")).getSnapshots();
|
ImmutableList<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots();
|
||||||
assertThat(snapshotInfos.size(), equalTo(1));
|
assertThat(snapshotInfos.size(), equalTo(1));
|
||||||
assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS));
|
assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS));
|
||||||
assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0));
|
assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0));
|
||||||
|
|
|
@ -44,14 +44,14 @@ public class RepositoriesTests extends AbstractSnapshotTests {
|
||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo-1")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo-1")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||||
.put("location", newTempDir(LifecycleScope.SUITE))
|
.put("location", newTempDir(LifecycleScope.SUITE))
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> check that repository is really there");
|
logger.info("--> check that repository is really there");
|
||||||
ClusterStateResponse clusterStateResponse = run(client.admin().cluster().prepareState().setFilterAll().setFilterMetaData(false));
|
ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().setFilterAll().setFilterMetaData(false).get();
|
||||||
MetaData metaData = clusterStateResponse.getState().getMetaData();
|
MetaData metaData = clusterStateResponse.getState().getMetaData();
|
||||||
RepositoriesMetaData repositoriesMetaData = metaData.custom(RepositoriesMetaData.TYPE);
|
RepositoriesMetaData repositoriesMetaData = metaData.custom(RepositoriesMetaData.TYPE);
|
||||||
assertThat(repositoriesMetaData, notNullValue());
|
assertThat(repositoriesMetaData, notNullValue());
|
||||||
|
@ -59,14 +59,14 @@ public class RepositoriesTests extends AbstractSnapshotTests {
|
||||||
assertThat(repositoriesMetaData.repository("test-repo-1").type(), equalTo("fs"));
|
assertThat(repositoriesMetaData.repository("test-repo-1").type(), equalTo("fs"));
|
||||||
|
|
||||||
logger.info("--> creating anoter repository");
|
logger.info("--> creating anoter repository");
|
||||||
putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo-2")
|
putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo-2")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||||
.put("location", newTempDir(LifecycleScope.SUITE))
|
.put("location", newTempDir(LifecycleScope.SUITE))
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> check that both repositories are in cluster state");
|
logger.info("--> check that both repositories are in cluster state");
|
||||||
clusterStateResponse = run(client.admin().cluster().prepareState().setFilterAll().setFilterMetaData(false));
|
clusterStateResponse = client.admin().cluster().prepareState().setFilterAll().setFilterMetaData(false).get();
|
||||||
metaData = clusterStateResponse.getState().getMetaData();
|
metaData = clusterStateResponse.getState().getMetaData();
|
||||||
repositoriesMetaData = metaData.custom(RepositoriesMetaData.TYPE);
|
repositoriesMetaData = metaData.custom(RepositoriesMetaData.TYPE);
|
||||||
assertThat(repositoriesMetaData, notNullValue());
|
assertThat(repositoriesMetaData, notNullValue());
|
||||||
|
@ -77,20 +77,20 @@ public class RepositoriesTests extends AbstractSnapshotTests {
|
||||||
assertThat(repositoriesMetaData.repository("test-repo-2").type(), equalTo("fs"));
|
assertThat(repositoriesMetaData.repository("test-repo-2").type(), equalTo("fs"));
|
||||||
|
|
||||||
logger.info("--> check that both repositories can be retrieved by getRepositories query");
|
logger.info("--> check that both repositories can be retrieved by getRepositories query");
|
||||||
GetRepositoriesResponse repositoriesResponse = run(client.admin().cluster().prepareGetRepositories());
|
GetRepositoriesResponse repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
|
||||||
assertThat(repositoriesResponse.repositories().size(), equalTo(2));
|
assertThat(repositoriesResponse.repositories().size(), equalTo(2));
|
||||||
assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-1"), notNullValue());
|
assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-1"), notNullValue());
|
||||||
assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue());
|
assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue());
|
||||||
|
|
||||||
logger.info("--> delete repository test-repo-1");
|
logger.info("--> delete repository test-repo-1");
|
||||||
run(client.admin().cluster().prepareDeleteRepository("test-repo-1"));
|
client.admin().cluster().prepareDeleteRepository("test-repo-1").get();
|
||||||
repositoriesResponse = run(client.admin().cluster().prepareGetRepositories());
|
repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
|
||||||
assertThat(repositoriesResponse.repositories().size(), equalTo(1));
|
assertThat(repositoriesResponse.repositories().size(), equalTo(1));
|
||||||
assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue());
|
assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue());
|
||||||
|
|
||||||
logger.info("--> delete repository test-repo-2");
|
logger.info("--> delete repository test-repo-2");
|
||||||
run(client.admin().cluster().prepareDeleteRepository("test-repo-2"));
|
client.admin().cluster().prepareDeleteRepository("test-repo-2").get();
|
||||||
repositoriesResponse = run(client.admin().cluster().prepareGetRepositories());
|
repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
|
||||||
assertThat(repositoriesResponse.repositories().size(), equalTo(0));
|
assertThat(repositoriesResponse.repositories().size(), equalTo(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,7 +109,7 @@ public class RepositoriesTests extends AbstractSnapshotTests {
|
||||||
|
|
||||||
logger.info("--> trying creating repository with incorrect settings");
|
logger.info("--> trying creating repository with incorrect settings");
|
||||||
try {
|
try {
|
||||||
run(client.admin().cluster().preparePutRepository("test-repo").setType("fs"));
|
client.admin().cluster().preparePutRepository("test-repo").setType("fs").get();
|
||||||
fail("Shouldn't be here");
|
fail("Shouldn't be here");
|
||||||
} catch (RepositoryException ex) {
|
} catch (RepositoryException ex) {
|
||||||
// Expected
|
// Expected
|
||||||
|
@ -120,31 +120,31 @@ public class RepositoriesTests extends AbstractSnapshotTests {
|
||||||
public void repositoryAckTimeoutTest() throws Exception {
|
public void repositoryAckTimeoutTest() throws Exception {
|
||||||
|
|
||||||
logger.info("--> creating repository test-repo-1 with 0s timeout - shouldn't ack");
|
logger.info("--> creating repository test-repo-1 with 0s timeout - shouldn't ack");
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client().admin().cluster().preparePutRepository("test-repo-1")
|
PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo-1")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||||
.put("location", newTempDir(LifecycleScope.SUITE))
|
.put("location", newTempDir(LifecycleScope.SUITE))
|
||||||
.put("compress", randomBoolean())
|
.put("compress", randomBoolean())
|
||||||
.put("chunk_size", randomIntBetween(5, 100))
|
.put("chunk_size", randomIntBetween(5, 100))
|
||||||
)
|
)
|
||||||
.setTimeout("0s"));
|
.setTimeout("0s").get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(false));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(false));
|
||||||
|
|
||||||
logger.info("--> creating repository test-repo-2 with standard timeout - should ack");
|
logger.info("--> creating repository test-repo-2 with standard timeout - should ack");
|
||||||
putRepositoryResponse = run(client().admin().cluster().preparePutRepository("test-repo-2")
|
putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo-2")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||||
.put("location", newTempDir(LifecycleScope.SUITE))
|
.put("location", newTempDir(LifecycleScope.SUITE))
|
||||||
.put("compress", randomBoolean())
|
.put("compress", randomBoolean())
|
||||||
.put("chunk_size", randomIntBetween(5, 100))
|
.put("chunk_size", randomIntBetween(5, 100))
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> deleting repository test-repo-2 with 0s timeout - shouldn't ack");
|
logger.info("--> deleting repository test-repo-2 with 0s timeout - shouldn't ack");
|
||||||
DeleteRepositoryResponse deleteRepositoryResponse = run(client().admin().cluster().prepareDeleteRepository("test-repo-2")
|
DeleteRepositoryResponse deleteRepositoryResponse = client().admin().cluster().prepareDeleteRepository("test-repo-2")
|
||||||
.setTimeout("0s"));
|
.setTimeout("0s").get();
|
||||||
assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(false));
|
assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(false));
|
||||||
|
|
||||||
logger.info("--> deleting repository test-repo-1 with standard timeout - should ack");
|
logger.info("--> deleting repository test-repo-1 with standard timeout - should ack");
|
||||||
deleteRepositoryResponse = run(client().admin().cluster().prepareDeleteRepository("test-repo-1"));
|
deleteRepositoryResponse = client().admin().cluster().prepareDeleteRepository("test-repo-1").get();
|
||||||
assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -54,10 +54,10 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Settings getSettings() {
|
public Settings indexSettings() {
|
||||||
// During restore we frequently restore index to exactly the same state it was before, that might cause the same
|
// During restore we frequently restore index to exactly the same state it was before, that might cause the same
|
||||||
// checksum file to be written twice during restore operation
|
// checksum file to be written twice during restore operation
|
||||||
return ImmutableSettings.builder().put(super.getSettings())
|
return ImmutableSettings.builder().put(super.indexSettings())
|
||||||
.put(MockDirectoryHelper.RANDOM_PREVENT_DOUBLE_WRITE, false)
|
.put(MockDirectoryHelper.RANDOM_PREVENT_DOUBLE_WRITE, false)
|
||||||
.put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false) //TODO: Ask Simon if this is hiding an issue
|
.put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false) //TODO: Ask Simon if this is hiding an issue
|
||||||
.build();
|
.build();
|
||||||
|
@ -68,12 +68,12 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||||
.put("location", newTempDir(LifecycleScope.SUITE))
|
.put("location", newTempDir(LifecycleScope.SUITE))
|
||||||
.put("compress", randomBoolean())
|
.put("compress", randomBoolean())
|
||||||
.put("chunk_size", randomIntBetween(5, 100))
|
.put("chunk_size", randomIntBetween(5, 100))
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
createIndex("test-idx-1", "test-idx-2", "test-idx-3");
|
createIndex("test-idx-1", "test-idx-2", "test-idx-3");
|
||||||
|
@ -86,43 +86,43 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
|
index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
|
||||||
}
|
}
|
||||||
refresh();
|
refresh();
|
||||||
assertThat(run(client.prepareCount("test-idx-1")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
|
||||||
assertThat(run(client.prepareCount("test-idx-2")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
|
||||||
assertThat(run(client.prepareCount("test-idx-3")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(100L));
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
CreateSnapshotResponse createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3"));
|
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get();
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
||||||
|
|
||||||
assertThat(run(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap")).getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
||||||
|
|
||||||
logger.info("--> delete some data");
|
logger.info("--> delete some data");
|
||||||
for (int i = 0; i < 50; i++) {
|
for (int i = 0; i < 50; i++) {
|
||||||
run(client.prepareDelete("test-idx-1", "doc", Integer.toString(i)));
|
client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get();
|
||||||
}
|
}
|
||||||
for (int i = 50; i < 100; i++) {
|
for (int i = 50; i < 100; i++) {
|
||||||
run(client.prepareDelete("test-idx-2", "doc", Integer.toString(i)));
|
client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get();
|
||||||
}
|
}
|
||||||
for (int i = 0; i < 100; i += 2) {
|
for (int i = 0; i < 100; i += 2) {
|
||||||
run(client.prepareDelete("test-idx-3", "doc", Integer.toString(i)));
|
client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get();
|
||||||
}
|
}
|
||||||
refresh();
|
refresh();
|
||||||
assertThat(run(client.prepareCount("test-idx-1")).getCount(), equalTo(50L));
|
assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(50L));
|
||||||
assertThat(run(client.prepareCount("test-idx-2")).getCount(), equalTo(50L));
|
assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(50L));
|
||||||
assertThat(run(client.prepareCount("test-idx-3")).getCount(), equalTo(50L));
|
assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L));
|
||||||
|
|
||||||
logger.info("--> close indices");
|
logger.info("--> close indices");
|
||||||
run(client.admin().indices().prepareClose("test-idx-1", "test-idx-2"));
|
client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get();
|
||||||
|
|
||||||
logger.info("--> restore all indices from the snapshot");
|
logger.info("--> restore all indices from the snapshot");
|
||||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
|
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||||
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
assertThat(run(client.prepareCount("test-idx-1")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
|
||||||
assertThat(run(client.prepareCount("test-idx-2")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
|
||||||
assertThat(run(client.prepareCount("test-idx-3")).getCount(), equalTo(50L));
|
assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L));
|
||||||
|
|
||||||
// Test restore after index deletion
|
// Test restore after index deletion
|
||||||
logger.info("--> delete indices");
|
logger.info("--> delete indices");
|
||||||
|
@ -131,8 +131,8 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet();
|
restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet();
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
assertThat(run(client.prepareCount("test-idx-1")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
|
||||||
ClusterState clusterState = run(client.admin().cluster().prepareState()).getState();
|
ClusterState clusterState = client.admin().cluster().prepareState().get().getState();
|
||||||
assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true));
|
assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true));
|
||||||
assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
|
assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
|
||||||
}
|
}
|
||||||
|
@ -142,16 +142,16 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())));
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
CreateSnapshotResponse createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true));
|
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).get();
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
|
||||||
|
|
||||||
assertThat(run(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap")).getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -159,22 +159,22 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())));
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> creating test template");
|
logger.info("--> creating test template");
|
||||||
assertThat(run(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", "{}")).isAcknowledged(), equalTo(true));
|
assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", "{}").get().isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
CreateSnapshotResponse createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setIndices().setWaitForCompletion(true));
|
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setIndices().setWaitForCompletion(true).get();
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
|
||||||
assertThat(run(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap")).getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
||||||
|
|
||||||
logger.info("--> delete test template");
|
logger.info("--> delete test template");
|
||||||
assertThat(run(client.admin().indices().prepareDeleteTemplate("test-template")).isAcknowledged(), equalTo(true));
|
assertThat(client.admin().indices().prepareDeleteTemplate("test-template").get().isAcknowledged(), equalTo(true));
|
||||||
ClusterStateResponse clusterStateResponse = run(client.admin().cluster().prepareState().setFilterRoutingTable(true).setFilterNodes(true).setFilterIndexTemplates("test-template").setFilterIndices());
|
ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().setFilterRoutingTable(true).setFilterNodes(true).setFilterIndexTemplates("test-template").setFilterIndices().get();
|
||||||
assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
|
assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
|
||||||
|
|
||||||
logger.info("--> restore cluster state");
|
logger.info("--> restore cluster state");
|
||||||
|
@ -183,7 +183,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
|
||||||
|
|
||||||
logger.info("--> check that template is restored");
|
logger.info("--> check that template is restored");
|
||||||
clusterStateResponse = run(client.admin().cluster().prepareState().setFilterRoutingTable(true).setFilterNodes(true).setFilterIndexTemplates("test-template").setFilterIndices());
|
clusterStateResponse = client.admin().cluster().prepareState().setFilterRoutingTable(true).setFilterNodes(true).setFilterIndexTemplates("test-template").setFilterIndices().get();
|
||||||
assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(true));
|
assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,28 +193,28 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
|
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
File location = newTempDir();
|
File location = newTempDir();
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", location)));
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", location)).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> creating test template");
|
logger.info("--> creating test template");
|
||||||
assertThat(run(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", "{}")).isAcknowledged(), equalTo(true));
|
assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", "{}").get().isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> snapshot without global state");
|
logger.info("--> snapshot without global state");
|
||||||
CreateSnapshotResponse createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state").setIndices().setIncludeGlobalState(false).setWaitForCompletion(true));
|
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state").setIndices().setIncludeGlobalState(false).setWaitForCompletion(true).get();
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
|
||||||
assertThat(run(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state")).getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
||||||
|
|
||||||
logger.info("--> snapshot with global state");
|
logger.info("--> snapshot with global state");
|
||||||
createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-with-global-state").setIndices().setIncludeGlobalState(true).setWaitForCompletion(true));
|
createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-with-global-state").setIndices().setIncludeGlobalState(true).setWaitForCompletion(true).get();
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
|
||||||
assertThat(run(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-with-global-state")).getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-with-global-state").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
||||||
|
|
||||||
logger.info("--> delete test template");
|
logger.info("--> delete test template");
|
||||||
wipeTemplates("test-template");
|
wipeTemplates("test-template");
|
||||||
ClusterStateResponse clusterStateResponse = run(client.admin().cluster().prepareState().setFilterRoutingTable(true).setFilterNodes(true).setFilterIndexTemplates("test-template").setFilterIndices());
|
ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().setFilterRoutingTable(true).setFilterNodes(true).setFilterIndexTemplates("test-template").setFilterIndices().get();
|
||||||
assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
|
assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
|
||||||
|
|
||||||
logger.info("--> try restoring cluster state from snapshot without global state");
|
logger.info("--> try restoring cluster state from snapshot without global state");
|
||||||
|
@ -222,7 +222,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
|
||||||
|
|
||||||
logger.info("--> check that template wasn't restored");
|
logger.info("--> check that template wasn't restored");
|
||||||
clusterStateResponse = run(client.admin().cluster().prepareState().setFilterRoutingTable(true).setFilterNodes(true).setFilterIndexTemplates("test-template").setFilterIndices());
|
clusterStateResponse = client.admin().cluster().prepareState().setFilterRoutingTable(true).setFilterNodes(true).setFilterIndexTemplates("test-template").setFilterIndices().get();
|
||||||
assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
|
assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
|
||||||
|
|
||||||
logger.info("--> restore cluster state");
|
logger.info("--> restore cluster state");
|
||||||
|
@ -230,7 +230,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
|
||||||
|
|
||||||
logger.info("--> check that template is restored");
|
logger.info("--> check that template is restored");
|
||||||
clusterStateResponse = run(client.admin().cluster().prepareState().setFilterRoutingTable(true).setFilterNodes(true).setFilterIndexTemplates("test-template").setFilterIndices());
|
clusterStateResponse = client.admin().cluster().prepareState().setFilterRoutingTable(true).setFilterNodes(true).setFilterIndexTemplates("test-template").setFilterIndices().get();
|
||||||
assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(true));
|
assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(true));
|
||||||
|
|
||||||
createIndex("test-idx");
|
createIndex("test-idx");
|
||||||
|
@ -241,18 +241,18 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||||
}
|
}
|
||||||
refresh();
|
refresh();
|
||||||
assertThat(run(client.prepareCount("test-idx")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
|
||||||
|
|
||||||
logger.info("--> snapshot without global state but with indices");
|
logger.info("--> snapshot without global state but with indices");
|
||||||
createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state-with-index").setIndices("test-idx").setIncludeGlobalState(false).setWaitForCompletion(true));
|
createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state-with-index").setIndices("test-idx").setIncludeGlobalState(false).setWaitForCompletion(true).get();
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), greaterThan(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), greaterThan(0));
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
||||||
assertThat(run(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state-with-index")).getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state-with-index").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
||||||
|
|
||||||
logger.info("--> delete test template and index ");
|
logger.info("--> delete test template and index ");
|
||||||
wipeIndex("test-idx");
|
wipeIndices("test-idx");
|
||||||
wipeTemplates("test-template");
|
wipeTemplates("test-template");
|
||||||
clusterStateResponse = run(client.admin().cluster().prepareState().setFilterRoutingTable(true).setFilterNodes(true).setFilterIndexTemplates("test-template").setFilterIndices());
|
clusterStateResponse = client.admin().cluster().prepareState().setFilterRoutingTable(true).setFilterNodes(true).setFilterIndexTemplates("test-template").setFilterIndices().get();
|
||||||
assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
|
assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
|
||||||
|
|
||||||
logger.info("--> try restoring index and cluster state from snapshot without global state");
|
logger.info("--> try restoring index and cluster state from snapshot without global state");
|
||||||
|
@ -262,9 +262,9 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
logger.info("--> check that template wasn't restored but index was");
|
logger.info("--> check that template wasn't restored but index was");
|
||||||
clusterStateResponse = run(client.admin().cluster().prepareState().setFilterRoutingTable(true).setFilterNodes(true).setFilterIndexTemplates("test-template").setFilterIndices());
|
clusterStateResponse = client.admin().cluster().prepareState().setFilterRoutingTable(true).setFilterNodes(true).setFilterIndexTemplates("test-template").setFilterIndices().get();
|
||||||
assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
|
assertThat(clusterStateResponse.getState().getMetaData().templates().containsKey("test-template"), equalTo(false));
|
||||||
assertThat(run(client.prepareCount("test-idx")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,13 +274,13 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
||||||
ImmutableSettings.settingsBuilder()
|
ImmutableSettings.settingsBuilder()
|
||||||
.put("location", newTempDir(LifecycleScope.TEST))
|
.put("location", newTempDir(LifecycleScope.TEST))
|
||||||
.put("random", randomAsciiOfLength(10))
|
.put("random", randomAsciiOfLength(10))
|
||||||
.put("random_control_io_exception_rate", 0.2)
|
.put("random_control_io_exception_rate", 0.2)
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
createIndex("test-idx");
|
createIndex("test-idx");
|
||||||
|
@ -291,11 +291,11 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||||
}
|
}
|
||||||
refresh();
|
refresh();
|
||||||
assertThat(run(client.prepareCount("test-idx")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
try {
|
try {
|
||||||
CreateSnapshotResponse createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx"));
|
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
|
||||||
if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) {
|
if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) {
|
||||||
// If we are here, that means we didn't have any failures, let's check it
|
// If we are here, that means we didn't have any failures, let's check it
|
||||||
assertThat(getFailureCount("test-repo"), equalTo(0L));
|
assertThat(getFailureCount("test-repo"), equalTo(0L));
|
||||||
|
@ -307,7 +307,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
assertThat(shardFailure.nodeId(), notNullValue());
|
assertThat(shardFailure.nodeId(), notNullValue());
|
||||||
assertThat(shardFailure.index(), equalTo("test-idx"));
|
assertThat(shardFailure.index(), equalTo("test-idx"));
|
||||||
}
|
}
|
||||||
GetSnapshotsResponse getSnapshotsResponse = run(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap"));
|
GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap").get();
|
||||||
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
|
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
|
||||||
SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);
|
SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);
|
||||||
if (snapshotInfo.state() == SnapshotState.SUCCESS) {
|
if (snapshotInfo.state() == SnapshotState.SUCCESS) {
|
||||||
|
@ -326,13 +326,13 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
public void dataFileFailureDuringSnapshotTest() throws Exception {
|
public void dataFileFailureDuringSnapshotTest() throws Exception {
|
||||||
Client client = client();
|
Client client = client();
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
||||||
ImmutableSettings.settingsBuilder()
|
ImmutableSettings.settingsBuilder()
|
||||||
.put("location", newTempDir(LifecycleScope.TEST))
|
.put("location", newTempDir(LifecycleScope.TEST))
|
||||||
.put("random", randomAsciiOfLength(10))
|
.put("random", randomAsciiOfLength(10))
|
||||||
.put("random_data_file_io_exception_rate", 0.1)
|
.put("random_data_file_io_exception_rate", 0.1)
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
createIndex("test-idx");
|
createIndex("test-idx");
|
||||||
|
@ -343,10 +343,10 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||||
}
|
}
|
||||||
refresh();
|
refresh();
|
||||||
assertThat(run(client.prepareCount("test-idx")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
CreateSnapshotResponse createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx"));
|
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
|
||||||
if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) {
|
if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) {
|
||||||
// If we are here, that means we didn't have any failures, let's check it
|
// If we are here, that means we didn't have any failures, let's check it
|
||||||
assertThat(getFailureCount("test-repo"), equalTo(0L));
|
assertThat(getFailureCount("test-repo"), equalTo(0L));
|
||||||
|
@ -357,7 +357,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
assertThat(shardFailure.nodeId(), notNullValue());
|
assertThat(shardFailure.nodeId(), notNullValue());
|
||||||
assertThat(shardFailure.index(), equalTo("test-idx"));
|
assertThat(shardFailure.index(), equalTo("test-idx"));
|
||||||
}
|
}
|
||||||
GetSnapshotsResponse getSnapshotsResponse = run(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap"));
|
GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap").get();
|
||||||
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
|
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
|
||||||
SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);
|
SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);
|
||||||
assertThat(snapshotInfo.shardFailures().size(), greaterThan(0));
|
assertThat(snapshotInfo.shardFailures().size(), greaterThan(0));
|
||||||
|
@ -372,8 +372,8 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
File repositoryLocation = newTempDir(LifecycleScope.TEST);
|
File repositoryLocation = newTempDir(LifecycleScope.TEST);
|
||||||
Client client = client();
|
Client client = client();
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)));
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
createIndex("test-idx");
|
createIndex("test-idx");
|
||||||
|
@ -384,31 +384,31 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||||
}
|
}
|
||||||
refresh();
|
refresh();
|
||||||
assertThat(run(client.prepareCount("test-idx")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
CreateSnapshotResponse createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx"));
|
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
|
assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(createSnapshotResponse.getSnapshotInfo().successfulShards()));
|
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(createSnapshotResponse.getSnapshotInfo().successfulShards()));
|
||||||
|
|
||||||
logger.info("--> update repository with mock version");
|
logger.info("--> update repository with mock version");
|
||||||
putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
||||||
ImmutableSettings.settingsBuilder()
|
ImmutableSettings.settingsBuilder()
|
||||||
.put("location", repositoryLocation)
|
.put("location", repositoryLocation)
|
||||||
.put("random", randomAsciiOfLength(10))
|
.put("random", randomAsciiOfLength(10))
|
||||||
.put("random_data_file_io_exception_rate", 0.3)
|
.put("random_data_file_io_exception_rate", 0.3)
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
// Test restore after index deletion
|
// Test restore after index deletion
|
||||||
logger.info("--> delete index");
|
logger.info("--> delete index");
|
||||||
wipeIndex("test-idx");
|
wipeIndices("test-idx");
|
||||||
logger.info("--> restore index after deletion");
|
logger.info("--> restore index after deletion");
|
||||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
|
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
CountResponse countResponse = run(client.prepareCount("test-idx"));
|
CountResponse countResponse = client.prepareCount("test-idx").get();
|
||||||
assertThat(countResponse.getCount(), equalTo(100L));
|
assertThat(countResponse.getCount(), equalTo(100L));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -420,8 +420,8 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
File repositoryLocation = newTempDir(LifecycleScope.TEST);
|
File repositoryLocation = newTempDir(LifecycleScope.TEST);
|
||||||
Client client = client();
|
Client client = client();
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)));
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
createIndex("test-idx");
|
createIndex("test-idx");
|
||||||
|
@ -432,26 +432,26 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||||
}
|
}
|
||||||
refresh();
|
refresh();
|
||||||
assertThat(run(client.prepareCount("test-idx")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
CreateSnapshotResponse createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx"));
|
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
|
assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(createSnapshotResponse.getSnapshotInfo().successfulShards()));
|
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(createSnapshotResponse.getSnapshotInfo().successfulShards()));
|
||||||
|
|
||||||
logger.info("--> update repository with mock version");
|
logger.info("--> update repository with mock version");
|
||||||
putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
||||||
ImmutableSettings.settingsBuilder()
|
ImmutableSettings.settingsBuilder()
|
||||||
.put("location", repositoryLocation)
|
.put("location", repositoryLocation)
|
||||||
.put("random", randomAsciiOfLength(10))
|
.put("random", randomAsciiOfLength(10))
|
||||||
.put("random_data_file_io_exception_rate", 1.0) // Fail completely
|
.put("random_data_file_io_exception_rate", 1.0) // Fail completely
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
// Test restore after index deletion
|
// Test restore after index deletion
|
||||||
logger.info("--> delete index");
|
logger.info("--> delete index");
|
||||||
wipeIndex("test-idx");
|
wipeIndices("test-idx");
|
||||||
logger.info("--> restore index after deletion");
|
logger.info("--> restore index after deletion");
|
||||||
ListenableActionFuture<RestoreSnapshotResponse> restoreSnapshotResponseFuture =
|
ListenableActionFuture<RestoreSnapshotResponse> restoreSnapshotResponseFuture =
|
||||||
client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute();
|
client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute();
|
||||||
|
@ -461,7 +461,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
assertThat(waitForIndex("test-idx", TimeValue.timeValueSeconds(10)), equalTo(true));
|
assertThat(waitForIndex("test-idx", TimeValue.timeValueSeconds(10)), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> delete index");
|
logger.info("--> delete index");
|
||||||
wipeIndex("test-idx");
|
wipeIndices("test-idx");
|
||||||
logger.info("--> get restore results");
|
logger.info("--> get restore results");
|
||||||
// Now read restore results and make sure it failed
|
// Now read restore results and make sure it failed
|
||||||
RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotResponseFuture.actionGet(TimeValue.timeValueSeconds(10));
|
RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotResponseFuture.actionGet(TimeValue.timeValueSeconds(10));
|
||||||
|
@ -469,8 +469,8 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(restoreSnapshotResponse.getRestoreInfo().failedShards()));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(restoreSnapshotResponse.getRestoreInfo().failedShards()));
|
||||||
|
|
||||||
logger.info("--> restoring working repository");
|
logger.info("--> restoring working repository");
|
||||||
putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)));
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> trying to restore index again");
|
logger.info("--> trying to restore index again");
|
||||||
|
@ -478,7 +478,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
CountResponse countResponse = run(client.prepareCount("test-idx"));
|
CountResponse countResponse = client.prepareCount("test-idx").get();
|
||||||
assertThat(countResponse.getCount(), equalTo(100L));
|
assertThat(countResponse.getCount(), equalTo(100L));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -488,17 +488,17 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||||
.put("location", newTempDir(LifecycleScope.SUITE))
|
.put("location", newTempDir(LifecycleScope.SUITE))
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> creating index that cannot be allocated");
|
logger.info("--> creating index that cannot be allocated");
|
||||||
run(prepareCreate("test-idx", 2, ImmutableSettings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + ".tag", "nowhere").put("index.number_of_shards", 3)));
|
prepareCreate("test-idx", 2, ImmutableSettings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + ".tag", "nowhere").put("index.number_of_shards", 3)).get();
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
CreateSnapshotResponse createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx"));
|
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(3));
|
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(3));
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), equalTo(3));
|
assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), equalTo(3));
|
||||||
|
@ -512,10 +512,10 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
|
|
||||||
File repo = newTempDir(LifecycleScope.SUITE);
|
File repo = newTempDir(LifecycleScope.SUITE);
|
||||||
logger.info("--> creating repository at " + repo.getAbsolutePath());
|
logger.info("--> creating repository at " + repo.getAbsolutePath());
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||||
.put("location", repo).put("compress", false).put("chunk_size", atLeast(5))
|
.put("location", repo).put("compress", false).put("chunk_size", atLeast(5))
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
createIndex("test-idx");
|
createIndex("test-idx");
|
||||||
|
@ -529,18 +529,18 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
}
|
}
|
||||||
refresh();
|
refresh();
|
||||||
logger.info("--> snapshot {}", i);
|
logger.info("--> snapshot {}", i);
|
||||||
CreateSnapshotResponse createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-" + i).setWaitForCompletion(true).setIndices("test-idx"));
|
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-" + i).setWaitForCompletion(true).setIndices("test-idx").get();
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
||||||
// Store number of files after each snapshot
|
// Store number of files after each snapshot
|
||||||
numberOfFiles[i] = numberOfFiles(repo);
|
numberOfFiles[i] = numberOfFiles(repo);
|
||||||
}
|
}
|
||||||
assertThat(run(client.prepareCount("test-idx")).getCount(), equalTo(10L * numberOfSnapshots));
|
assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(10L * numberOfSnapshots));
|
||||||
int numberOfFilesBeforeDeletion = numberOfFiles(repo);
|
int numberOfFilesBeforeDeletion = numberOfFiles(repo);
|
||||||
|
|
||||||
logger.info("--> delete all snapshots except the first one and last one");
|
logger.info("--> delete all snapshots except the first one and last one");
|
||||||
for (int i = 1; i < numberOfSnapshots - 1; i++) {
|
for (int i = 1; i < numberOfSnapshots - 1; i++) {
|
||||||
run(client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-" + i));
|
client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-" + i).get();
|
||||||
}
|
}
|
||||||
|
|
||||||
int numberOfFilesAfterDeletion = numberOfFiles(repo);
|
int numberOfFilesAfterDeletion = numberOfFiles(repo);
|
||||||
|
@ -548,7 +548,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
assertThat(numberOfFilesAfterDeletion, lessThan(numberOfFilesBeforeDeletion));
|
assertThat(numberOfFilesAfterDeletion, lessThan(numberOfFilesBeforeDeletion));
|
||||||
|
|
||||||
logger.info("--> delete index");
|
logger.info("--> delete index");
|
||||||
wipeIndex("test-idx");
|
wipeIndices("test-idx");
|
||||||
|
|
||||||
logger.info("--> restore index");
|
logger.info("--> restore index");
|
||||||
String lastSnapshot = "test-snap-" + (numberOfSnapshots - 1);
|
String lastSnapshot = "test-snap-" + (numberOfSnapshots - 1);
|
||||||
|
@ -556,10 +556,10 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||||
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
assertThat(run(client.prepareCount("test-idx")).getCount(), equalTo(10L * numberOfSnapshots));
|
assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(10L * numberOfSnapshots));
|
||||||
|
|
||||||
logger.info("--> delete the last snapshot");
|
logger.info("--> delete the last snapshot");
|
||||||
run(client.admin().cluster().prepareDeleteSnapshot("test-repo", lastSnapshot));
|
client.admin().cluster().prepareDeleteSnapshot("test-repo", lastSnapshot).get();
|
||||||
logger.info("--> make sure that number of files is back to what it was when the first snapshot was made");
|
logger.info("--> make sure that number of files is back to what it was when the first snapshot was made");
|
||||||
assertThat(numberOfFiles(repo), equalTo(numberOfFiles[0]));
|
assertThat(numberOfFiles(repo), equalTo(numberOfFiles[0]));
|
||||||
}
|
}
|
||||||
|
@ -570,27 +570,27 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||||
.put("location", newTempDir(LifecycleScope.SUITE))
|
.put("location", newTempDir(LifecycleScope.SUITE))
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
createIndex("test-idx", "test-idx-closed");
|
createIndex("test-idx", "test-idx-closed");
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
logger.info("--> closing index test-idx-closed");
|
logger.info("--> closing index test-idx-closed");
|
||||||
run(client.admin().indices().prepareClose("test-idx-closed"));
|
client.admin().indices().prepareClose("test-idx-closed").get();
|
||||||
ClusterStateResponse stateResponse = run(client.admin().cluster().prepareState());
|
ClusterStateResponse stateResponse = client.admin().cluster().prepareState().get();
|
||||||
assertThat(stateResponse.getState().metaData().index("test-idx-closed").state(), equalTo(IndexMetaData.State.CLOSE));
|
assertThat(stateResponse.getState().metaData().index("test-idx-closed").state(), equalTo(IndexMetaData.State.CLOSE));
|
||||||
assertThat(stateResponse.getState().routingTable().index("test-idx-closed"), nullValue());
|
assertThat(stateResponse.getState().routingTable().index("test-idx-closed"), nullValue());
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
CreateSnapshotResponse createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx*"));
|
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx*").get();
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().indices().size(), equalTo(1));
|
assertThat(createSnapshotResponse.getSnapshotInfo().indices().size(), equalTo(1));
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), equalTo(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), equalTo(0));
|
||||||
|
|
||||||
logger.info("--> deleting snapshot");
|
logger.info("--> deleting snapshot");
|
||||||
run(client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap"));
|
client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -598,10 +598,10 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||||
.put("location", newTempDir(LifecycleScope.SUITE))
|
.put("location", newTempDir(LifecycleScope.SUITE))
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
createIndex("test-idx-1", "test-idx-2");
|
createIndex("test-idx-1", "test-idx-2");
|
||||||
|
@ -613,11 +613,11 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
index("test-idx-2", "doc", Integer.toString(i), "foo", "bar" + i);
|
index("test-idx-2", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||||
}
|
}
|
||||||
refresh();
|
refresh();
|
||||||
assertThat(run(client.prepareCount("test-idx-1")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
|
||||||
assertThat(run(client.prepareCount("test-idx-2")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
CreateSnapshotResponse createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-1", "test-idx-2"));
|
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-1", "test-idx-2").get();
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
||||||
|
|
||||||
|
@ -627,11 +627,11 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||||
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
assertThat(run(client.prepareCount("test-idx-1-copy")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx-1-copy").get().getCount(), equalTo(100L));
|
||||||
assertThat(run(client.prepareCount("test-idx-2-copy")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx-2-copy").get().getCount(), equalTo(100L));
|
||||||
|
|
||||||
logger.info("--> close indices");
|
logger.info("--> close indices");
|
||||||
run(client.admin().indices().prepareClose("test-idx-1", "test-idx-2-copy"));
|
client.admin().indices().prepareClose("test-idx-1", "test-idx-2-copy").get();
|
||||||
|
|
||||||
logger.info("--> restore indices with different names");
|
logger.info("--> restore indices with different names");
|
||||||
restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
|
restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
|
||||||
|
@ -670,14 +670,14 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
Client client = client();
|
Client client = client();
|
||||||
File repositoryLocation = newTempDir(LifecycleScope.TEST);
|
File repositoryLocation = newTempDir(LifecycleScope.TEST);
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
.setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
|
||||||
ImmutableSettings.settingsBuilder()
|
ImmutableSettings.settingsBuilder()
|
||||||
.put("location", repositoryLocation)
|
.put("location", repositoryLocation)
|
||||||
.put("random", randomAsciiOfLength(10))
|
.put("random", randomAsciiOfLength(10))
|
||||||
.put("random_data_file_blocking_rate", 0.1)
|
.put("random_data_file_blocking_rate", 0.1)
|
||||||
.put("wait_after_unblock", 200)
|
.put("wait_after_unblock", 200)
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
// Create index on 2 nodes and make sure each node has a primary by setting no replicas
|
// Create index on 2 nodes and make sure each node has a primary by setting no replicas
|
||||||
|
@ -688,15 +688,15 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||||
}
|
}
|
||||||
refresh();
|
refresh();
|
||||||
assertThat(run(client.prepareCount("test-idx")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx"));
|
client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
|
||||||
String blockedNode = waitForCompletionOrBlock(cluster().nodesInclude("test-idx"), "test-repo", "test-snap", TimeValue.timeValueSeconds(60));
|
String blockedNode = waitForCompletionOrBlock(cluster().nodesInclude("test-idx"), "test-repo", "test-snap", TimeValue.timeValueSeconds(60));
|
||||||
if (blockedNode != null) {
|
if (blockedNode != null) {
|
||||||
logger.info("--> move shards away from the node");
|
logger.info("--> move shards away from the node");
|
||||||
ImmutableSettings.Builder excludeSettings = ImmutableSettings.builder().put("index.routing.allocation.exclude._name", blockedNode);
|
ImmutableSettings.Builder excludeSettings = ImmutableSettings.builder().put("index.routing.allocation.exclude._name", blockedNode);
|
||||||
run(client().admin().indices().prepareUpdateSettings("test-idx").setSettings(excludeSettings));
|
client().admin().indices().prepareUpdateSettings("test-idx").setSettings(excludeSettings).get();
|
||||||
logger.info("--> execution was blocked on node [{}], moving shards away from this node", blockedNode);
|
logger.info("--> execution was blocked on node [{}], moving shards away from this node", blockedNode);
|
||||||
unblock("test-repo");
|
unblock("test-repo");
|
||||||
logger.info("--> waiting for completion");
|
logger.info("--> waiting for completion");
|
||||||
|
@ -706,18 +706,18 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
} else {
|
} else {
|
||||||
logger.info("--> done without blocks");
|
logger.info("--> done without blocks");
|
||||||
}
|
}
|
||||||
ImmutableList<SnapshotInfo> snapshotInfos = run(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap")).getSnapshots();
|
ImmutableList<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots();
|
||||||
assertThat(snapshotInfos.size(), equalTo(1));
|
assertThat(snapshotInfos.size(), equalTo(1));
|
||||||
assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS));
|
assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS));
|
||||||
assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0));
|
assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0));
|
||||||
|
|
||||||
logger.info("--> delete index");
|
logger.info("--> delete index");
|
||||||
wipeIndex("test-idx");
|
wipeIndices("test-idx");
|
||||||
|
|
||||||
logger.info("--> replace mock repository with real one at the same location");
|
logger.info("--> replace mock repository with real one at the same location");
|
||||||
putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", repositoryLocation)
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
logger.info("--> restore index");
|
logger.info("--> restore index");
|
||||||
|
@ -725,7 +725,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||||
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
assertThat(run(client.prepareCount("test-idx")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -734,12 +734,12 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
|
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
File repositoryLocation = newTempDir(LifecycleScope.SUITE);
|
File repositoryLocation = newTempDir(LifecycleScope.SUITE);
|
||||||
PutRepositoryResponse putRepositoryResponse = run(client.admin().cluster().preparePutRepository("test-repo")
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||||
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
|
||||||
.put("location", repositoryLocation)
|
.put("location", repositoryLocation)
|
||||||
.put("compress", randomBoolean())
|
.put("compress", randomBoolean())
|
||||||
.put("chunk_size", randomIntBetween(5, 100))
|
.put("chunk_size", randomIntBetween(5, 100))
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
createIndex("test-idx");
|
createIndex("test-idx");
|
||||||
|
@ -750,14 +750,14 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||||
}
|
}
|
||||||
refresh();
|
refresh();
|
||||||
assertThat(run(client.prepareCount("test-idx")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
CreateSnapshotResponse createSnapshotResponse = run(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx"));
|
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
|
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
||||||
|
|
||||||
assertThat(run(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap")).getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
||||||
|
|
||||||
logger.info("--> delete index");
|
logger.info("--> delete index");
|
||||||
wipeIndices("test-idx");
|
wipeIndices("test-idx");
|
||||||
|
@ -766,19 +766,19 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
||||||
wipeRepositories("test-repo");
|
wipeRepositories("test-repo");
|
||||||
|
|
||||||
logger.info("--> create read-only URL repository");
|
logger.info("--> create read-only URL repository");
|
||||||
putRepositoryResponse = run(client.admin().cluster().preparePutRepository("url-repo")
|
putRepositoryResponse = client.admin().cluster().preparePutRepository("url-repo")
|
||||||
.setType("url").setSettings(ImmutableSettings.settingsBuilder()
|
.setType("url").setSettings(ImmutableSettings.settingsBuilder()
|
||||||
.put("url", repositoryLocation.toURI().toURL())
|
.put("url", repositoryLocation.toURI().toURL())
|
||||||
));
|
).get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
logger.info("--> restore index after deletion");
|
logger.info("--> restore index after deletion");
|
||||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("url-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").execute().actionGet();
|
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("url-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").execute().actionGet();
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
assertThat(run(client.prepareCount("test-idx")).getCount(), equalTo(100L));
|
assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
|
||||||
|
|
||||||
logger.info("--> list available shapshots");
|
logger.info("--> list available shapshots");
|
||||||
GetSnapshotsResponse getSnapshotsResponse = run(client.admin().cluster().prepareGetSnapshots("url-repo"));
|
GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get();
|
||||||
assertThat(getSnapshotsResponse.getSnapshots(), notNullValue());
|
assertThat(getSnapshotsResponse.getSnapshots(), notNullValue());
|
||||||
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
|
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
|
||||||
}
|
}
|
||||||
|
|
|
@ -183,7 +183,7 @@ public abstract class AbstractTermVectorTests extends ElasticsearchIntegrationTe
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void createIndexBasedOnFieldSettings(TestFieldSetting[] fieldSettings, int number_of_shards) throws IOException {
|
protected void createIndexBasedOnFieldSettings(TestFieldSetting[] fieldSettings, int number_of_shards) throws IOException {
|
||||||
wipeIndex("test");
|
wipeIndices("test");
|
||||||
XContentBuilder mappingBuilder = jsonBuilder();
|
XContentBuilder mappingBuilder = jsonBuilder();
|
||||||
mappingBuilder.startObject().startObject("type1").startObject("properties");
|
mappingBuilder.startObject().startObject("type1").startObject("properties");
|
||||||
for (TestFieldSetting field : fieldSettings) {
|
for (TestFieldSetting field : fieldSettings) {
|
||||||
|
@ -196,7 +196,7 @@ public abstract class AbstractTermVectorTests extends ElasticsearchIntegrationTe
|
||||||
settings.put("number_of_shards", number_of_shards);
|
settings.put("number_of_shards", number_of_shards);
|
||||||
}
|
}
|
||||||
mappingBuilder.endObject().endObject().endObject();
|
mappingBuilder.endObject().endObject().endObject();
|
||||||
run(prepareCreate("test").addMapping("type1", mappingBuilder).setSettings(settings));
|
prepareCreate("test").addMapping("type1", mappingBuilder).setSettings(settings).get();
|
||||||
|
|
||||||
ensureYellow();
|
ensureYellow();
|
||||||
}
|
}
|
||||||
|
|
|
@ -372,7 +372,7 @@ public class GetTermVectorTests extends AbstractTermVectorTests {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
TermVectorResponse response = run(request);
|
TermVectorResponse response = request.get();
|
||||||
Fields luceneTermVectors = getTermVectorsFromLucene(directoryReader, test.doc);
|
Fields luceneTermVectors = getTermVectorsFromLucene(directoryReader, test.doc);
|
||||||
validateResponse(response, luceneTermVectors, test);
|
validateResponse(response, luceneTermVectors, test);
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
|
|
|
@ -41,7 +41,7 @@ public class MultiTermVectorsTests extends AbstractTermVectorTests {
|
||||||
requestBuilder.add(getRequestForConfig(test).request());
|
requestBuilder.add(getRequestForConfig(test).request());
|
||||||
}
|
}
|
||||||
|
|
||||||
MultiTermVectorsItemResponse[] responseItems = run(requestBuilder).getResponses();
|
MultiTermVectorsItemResponse[] responseItems = requestBuilder.get().getResponses();
|
||||||
|
|
||||||
for (int i = 0; i < testConfigs.length; i++) {
|
for (int i = 0; i < testConfigs.length; i++) {
|
||||||
TestConfig test = testConfigs[i];
|
TestConfig test = testConfigs[i];
|
||||||
|
|
|
@ -20,13 +20,9 @@ package org.elasticsearch.test;
|
||||||
|
|
||||||
import com.carrotsearch.randomizedtesting.SeedUtils;
|
import com.carrotsearch.randomizedtesting.SeedUtils;
|
||||||
import com.google.common.base.Joiner;
|
import com.google.common.base.Joiner;
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.apache.lucene.util.AbstractRandomizedTest.IntegrationTests;
|
import org.apache.lucene.util.AbstractRandomizedTest.IntegrationTests;
|
||||||
import org.apache.lucene.util.IOUtils;
|
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.ActionRequestBuilder;
|
|
||||||
import org.elasticsearch.action.ActionResponse;
|
|
||||||
import org.elasticsearch.action.ShardOperationFailedException;
|
import org.elasticsearch.action.ShardOperationFailedException;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||||
|
@ -43,17 +39,11 @@ import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||||
import org.elasticsearch.action.index.IndexResponse;
|
import org.elasticsearch.action.index.IndexResponse;
|
||||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||||
import org.elasticsearch.action.support.IgnoreIndices;
|
import org.elasticsearch.action.support.IgnoreIndices;
|
||||||
import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
|
|
||||||
import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
|
|
||||||
import org.elasticsearch.client.AdminClient;
|
import org.elasticsearch.client.AdminClient;
|
||||||
import org.elasticsearch.client.Client;
|
import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.client.Requests;
|
import org.elasticsearch.client.Requests;
|
||||||
import org.elasticsearch.cluster.ClusterService;
|
import org.elasticsearch.cluster.ClusterService;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
|
||||||
import org.elasticsearch.cluster.metadata.MetaData;
|
import org.elasticsearch.cluster.metadata.MetaData;
|
||||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
|
||||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
|
||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
|
||||||
import org.elasticsearch.common.Priority;
|
import org.elasticsearch.common.Priority;
|
||||||
import org.elasticsearch.common.collect.Tuple;
|
import org.elasticsearch.common.collect.Tuple;
|
||||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||||
|
@ -61,7 +51,6 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.index.merge.policy.*;
|
import org.elasticsearch.index.merge.policy.*;
|
||||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
|
||||||
import org.elasticsearch.indices.IndexMissingException;
|
import org.elasticsearch.indices.IndexMissingException;
|
||||||
import org.elasticsearch.indices.IndexTemplateMissingException;
|
import org.elasticsearch.indices.IndexTemplateMissingException;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
|
@ -85,38 +74,98 @@ import static org.hamcrest.Matchers.emptyIterable;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This abstract base testcase reuses a cluster instance internally and might
|
* {@link ElasticsearchIntegrationTest} is an abstract base class to run integration
|
||||||
* start an abitrary number of nodes in the background. This class might in the
|
* tests against a JVM private Elasticsearch Cluster. The test class supports 3 different
|
||||||
* future add random configureation options to created indices etc. unless
|
* cluster scopes.
|
||||||
* unless they are explicitly defined by the test.
|
* <ul>
|
||||||
|
* <li>{@link Scope#GLOBAL} - uses a cluster shared across test suites. This cluster doesn't allow any modifications to
|
||||||
|
* the cluster settings and will fail if any persistent cluster settings are applied during tear down.</li>
|
||||||
|
* <li>{@link Scope#TEST} - uses a new cluster for each individual test method.</li>
|
||||||
|
* <li>{@link Scope#SUITE} - uses a cluster shared across all test method in the same suite</li>
|
||||||
|
* </ul>
|
||||||
* <p/>
|
* <p/>
|
||||||
|
* The most common test scope it {@link Scope#GLOBAL} which shares a cluster per JVM. This cluster is only set-up once
|
||||||
|
* and can be used as long as the tests work on a per index basis without changing any cluster wide settings or require
|
||||||
|
* any specific node configuration. This is the best performing option since it sets up the cluster only once.
|
||||||
|
* <p/>
|
||||||
|
* If the tests need specific node settings or change persistent and/or transient cluster settings either {@link Scope#TEST}
|
||||||
|
* or {@link Scope#SUITE} should be used. To configure a scope for the test cluster the {@link ClusterScope} annotation
|
||||||
|
* should be used, here is an example:
|
||||||
|
* <pre>
|
||||||
|
* @ClusterScope(scope=Scope.TEST)
|
||||||
|
* public class SomeIntegrationTest extends ElasticsearchIntegrationTest {
|
||||||
|
* @Test
|
||||||
|
* public void testMethod() {}
|
||||||
|
* }
|
||||||
|
* </pre>
|
||||||
|
*
|
||||||
|
* If no {@link ClusterScope} annotation is present on an integration test the default scope it {@link Scope#GLOBAL}
|
||||||
|
* <p/>
|
||||||
|
* A test cluster creates a set of nodes in the background before the test starts. The number of nodes in the cluster is
|
||||||
|
* determined at random and can change across tests. The minimum number of nodes in the shared global cluster is <code>2</code>.
|
||||||
|
* For other scopes the {@link ClusterScope} allows configuring the initial number of nodes that are created before
|
||||||
|
* the tests start.
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* @ClusterScope(scope=Scope.SUITE, numNodes=3)
|
||||||
|
* public class SomeIntegrationTest extends ElasticsearchIntegrationTest {
|
||||||
|
* @Test
|
||||||
|
* public void testMethod() {}
|
||||||
|
* }
|
||||||
|
* </pre>
|
||||||
|
* <p/>
|
||||||
|
* Note, the {@link ElasticsearchIntegrationTest} uses randomized settings on a cluster and index level. For instance
|
||||||
|
* each test might use different directory implementation for each test or will return a random client to one of the
|
||||||
|
* nodes in the cluster for each call to {@link #client()}. Test failures might only be reproducible if the correct
|
||||||
|
* system properties are passed to the test execution environment.
|
||||||
|
*
|
||||||
* <p>
|
* <p>
|
||||||
* This test wipes all indices before a testcase is executed and uses
|
* This class supports the following system properties (passed with -Dkey=value to the application)
|
||||||
* elasticsearch features like allocation filters to ensure an index is
|
* <ul>
|
||||||
* allocated only on a certain number of nodes. The test doesn't expose explicit
|
* <li>-D{@value #TESTS_CLIENT_RATIO} - a double value in the interval [0..1] which defines the ration between node and transport clients used</li>
|
||||||
* information about the client or which client is returned, clients might be
|
* <li>-D{@value #TESTS_CLUSTER_SEED} - a random seed used to initialize the clusters random context.
|
||||||
* node clients or transport clients and the returned client might be rotated.
|
* <li>-D{@value #INDEX_SEED_SETTING} - a random seed used to initialize the index random context.
|
||||||
|
* </ul>
|
||||||
* </p>
|
* </p>
|
||||||
* <p/>
|
|
||||||
* Tests that need more explict control over the cluster or that need to change
|
|
||||||
* the cluster state aside of per-index settings should not use this class as a
|
|
||||||
* baseclass. If your test modifies the cluster state with persistent or
|
|
||||||
* transient settings the baseclass will raise and error.
|
|
||||||
*/
|
*/
|
||||||
@Ignore
|
@Ignore
|
||||||
@IntegrationTests
|
@IntegrationTests
|
||||||
public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase {
|
public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase {
|
||||||
|
|
||||||
public static final String INDEX_SEED_SETTING = "index.tests.seed";
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The random seed for the shared test cluster used in the current JVM.
|
||||||
|
*/
|
||||||
public static final long SHARED_CLUSTER_SEED = clusterSeed();
|
public static final long SHARED_CLUSTER_SEED = clusterSeed();
|
||||||
|
|
||||||
private static final double TRANSPORT_CLIENT_RATIO = transportClientRatio();
|
private static final TestCluster GLOBAL_CLUSTER = new TestCluster(SHARED_CLUSTER_SEED, TestCluster.clusterName("shared", ElasticsearchTestCase.CHILD_VM_ID, SHARED_CLUSTER_SEED));
|
||||||
|
|
||||||
private static final TestCluster globalCluster = new TestCluster(SHARED_CLUSTER_SEED, TestCluster.clusterName("shared", ElasticsearchTestCase.CHILD_VM_ID, SHARED_CLUSTER_SEED));
|
/**
|
||||||
|
* Key used to set the transport client ratio via the commandline -D{@value #TESTS_CLIENT_RATIO}
|
||||||
|
*/
|
||||||
|
public static final String TESTS_CLIENT_RATIO = "tests.client.ratio";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Key used to set the shared cluster random seed via the commandline -D{@value #TESTS_CLUSTER_SEED}
|
||||||
|
*/
|
||||||
|
public static final String TESTS_CLUSTER_SEED = "tests.cluster_seed";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Key used to retrieve the index random seed from the index settings on a running node.
|
||||||
|
* The value of this seed can be used to initialize a random context for a specific index.
|
||||||
|
* It's set once per test via a generic index template.
|
||||||
|
*/
|
||||||
|
public static final String INDEX_SEED_SETTING = "index.tests.seed";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The current cluster depending on the configured {@link Scope}.
|
||||||
|
* By default if no {@link ClusterScope} is configured this will hold a reference to the global cluster carried
|
||||||
|
* on across test suites.
|
||||||
|
*/
|
||||||
private static TestCluster currentCluster;
|
private static TestCluster currentCluster;
|
||||||
|
|
||||||
|
private static final double TRANSPORT_CLIENT_RATIO = transportClientRatio();
|
||||||
|
|
||||||
private static final Map<Class<?>, TestCluster> clusters = new IdentityHashMap<Class<?>, TestCluster>();
|
private static final Map<Class<?>, TestCluster> clusters = new IdentityHashMap<Class<?>, TestCluster>();
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
|
@ -125,7 +174,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
switch (currentClusterScope) {
|
switch (currentClusterScope) {
|
||||||
case GLOBAL:
|
case GLOBAL:
|
||||||
clearClusters();
|
clearClusters();
|
||||||
currentCluster = globalCluster;
|
currentCluster = GLOBAL_CLUSTER;
|
||||||
break;
|
break;
|
||||||
case SUITE:
|
case SUITE:
|
||||||
currentCluster = buildAndPutCluster(currentClusterScope, false);
|
currentCluster = buildAndPutCluster(currentClusterScope, false);
|
||||||
|
@ -158,7 +207,9 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
|
|
||||||
private void clearClusters() throws IOException {
|
private void clearClusters() throws IOException {
|
||||||
if (!clusters.isEmpty()) {
|
if (!clusters.isEmpty()) {
|
||||||
IOUtils.close(clusters.values());
|
for(TestCluster cluster : clusters.values()) {
|
||||||
|
cluster.close();
|
||||||
|
}
|
||||||
clusters.clear();
|
clusters.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -202,8 +253,13 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
return cluster().client();
|
return cluster().client();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a randomized index template. This template is used to pass in randomized settings on a
|
||||||
|
* per index basis.
|
||||||
|
*/
|
||||||
private static void randomIndexTemplate() {
|
private static void randomIndexTemplate() {
|
||||||
if (cluster().numNodes() > 0) {
|
// TODO move settings for random directory etc here into the index based randomized settings.
|
||||||
|
if (cluster().size() > 0) {
|
||||||
client().admin().indices().preparePutTemplate("random_index_template")
|
client().admin().indices().preparePutTemplate("random_index_template")
|
||||||
.setTemplate("*")
|
.setTemplate("*")
|
||||||
.setOrder(0)
|
.setOrder(0)
|
||||||
|
@ -239,18 +295,20 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
return cluster();
|
return cluster();
|
||||||
}
|
}
|
||||||
|
|
||||||
public ImmutableSettings.Builder randomSettingsBuilder() {
|
/**
|
||||||
// TODO RANDOMIZE
|
* Returns a settings object used in {@link #createIndex(String...)} and {@link #prepareCreate(String)} and friends.
|
||||||
return ImmutableSettings.builder();
|
* This method can be overwritten by subclasses to set defaults for the indices that are created by the test.
|
||||||
|
* By default it returns an empty settings object.
|
||||||
|
*/
|
||||||
|
public Settings indexSettings() {
|
||||||
|
return ImmutableSettings.EMPTY;
|
||||||
}
|
}
|
||||||
// TODO Randomize MergePolicyProviderBase.INDEX_COMPOUND_FORMAT [true|false|"true"|"false"|[0..1]| toString([0..1])]
|
/**
|
||||||
|
* Deletes the given indices from the tests cluster. If no index name is passed to this method
|
||||||
public Settings getSettings() {
|
* all indices are removed.
|
||||||
return randomSettingsBuilder().build();
|
*/
|
||||||
}
|
|
||||||
|
|
||||||
public static void wipeIndices(String... names) {
|
public static void wipeIndices(String... names) {
|
||||||
if (cluster().numNodes() > 0) {
|
if (cluster().size() > 0) {
|
||||||
try {
|
try {
|
||||||
assertAcked(client().admin().indices().prepareDelete(names));
|
assertAcked(client().admin().indices().prepareDelete(names));
|
||||||
} catch (IndexMissingException e) {
|
} catch (IndexMissingException e) {
|
||||||
|
@ -259,15 +317,12 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void wipeIndex(String name) {
|
|
||||||
wipeIndices(name);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes index templates, support wildcard notation.
|
* Deletes index templates, support wildcard notation.
|
||||||
|
* If no template name is passed to this method all templates are removed.
|
||||||
*/
|
*/
|
||||||
public static void wipeTemplates(String... templates) {
|
public static void wipeTemplates(String... templates) {
|
||||||
if (cluster().numNodes() > 0) {
|
if (cluster().size() > 0) {
|
||||||
// if nothing is provided, delete all
|
// if nothing is provided, delete all
|
||||||
if (templates.length == 0) {
|
if (templates.length == 0) {
|
||||||
templates = new String[]{"*"};
|
templates = new String[]{"*"};
|
||||||
|
@ -282,25 +337,59 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void createIndex(String... names) {
|
/**
|
||||||
|
* Creates one or more indices and asserts that the indices are acknowledged. If one of the indices
|
||||||
|
* already exists this method will fail and wipe all the indices created so far.
|
||||||
|
*/
|
||||||
|
public final void createIndex(String... names) {
|
||||||
|
|
||||||
|
List<String> created = new ArrayList<String>();
|
||||||
for (String name : names) {
|
for (String name : names) {
|
||||||
|
boolean success = false;
|
||||||
try {
|
try {
|
||||||
assertAcked(prepareCreate(name).setSettings(getSettings()));
|
assertAcked(prepareCreate(name));
|
||||||
continue;
|
created.add(name);
|
||||||
} catch (IndexAlreadyExistsException ex) {
|
success = true;
|
||||||
wipeIndex(name);
|
} finally {
|
||||||
|
if (!success) {
|
||||||
|
wipeIndices(created.toArray(new String[0]));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
assertAcked(prepareCreate(name).setSettings(getSettings()));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public CreateIndexRequestBuilder prepareCreate(String index, int numNodes) {
|
/**
|
||||||
|
* Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
|
||||||
|
*/
|
||||||
|
public final CreateIndexRequestBuilder prepareCreate(String index) {
|
||||||
|
return client().admin().indices().prepareCreate(index).setSettings(indexSettings());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
|
||||||
|
* The index that is created with this builder will only be allowed to allocate on the number of nodes passed to this
|
||||||
|
* method.
|
||||||
|
* <p>
|
||||||
|
* This method uses allocation deciders to filter out certain nodes to allocate the created index on. It defines allocation
|
||||||
|
* rules based on <code>index.routing.allocation.exclude._name</code>.
|
||||||
|
* </p>
|
||||||
|
*/
|
||||||
|
public final CreateIndexRequestBuilder prepareCreate(String index, int numNodes) {
|
||||||
return prepareCreate(index, numNodes, ImmutableSettings.builder());
|
return prepareCreate(index, numNodes, ImmutableSettings.builder());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
|
||||||
|
* The index that is created with this builder will only be allowed to allocate on the number of nodes passed to this
|
||||||
|
* method.
|
||||||
|
* <p>
|
||||||
|
* This method uses allocation deciders to filter out certain nodes to allocate the created index on. It defines allocation
|
||||||
|
* rules based on <code>index.routing.allocation.exclude._name</code>.
|
||||||
|
* </p>
|
||||||
|
*/
|
||||||
public CreateIndexRequestBuilder prepareCreate(String index, int numNodes, ImmutableSettings.Builder builder) {
|
public CreateIndexRequestBuilder prepareCreate(String index, int numNodes, ImmutableSettings.Builder builder) {
|
||||||
cluster().ensureAtLeastNumNodes(numNodes);
|
cluster().ensureAtLeastNumNodes(numNodes);
|
||||||
Settings settings = getSettings();
|
Settings settings = indexSettings();
|
||||||
builder.put(settings);
|
builder.put(settings);
|
||||||
if (numNodes > 0) {
|
if (numNodes > 0) {
|
||||||
getExcludeSettings(index, numNodes, builder);
|
getExcludeSettings(index, numNodes, builder);
|
||||||
|
@ -314,30 +403,17 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Set<String> getExcludeNodes(String index, int num) {
|
/**
|
||||||
Set<String> nodeExclude = cluster().nodeExclude(index);
|
* Restricts the given index to be allocated on <code>n</code> nodes using the allocation deciders.
|
||||||
Set<String> nodesInclude = cluster().nodesInclude(index);
|
* Yet if the shards can't be allocated on any other node shards for this index will remain allocated on
|
||||||
if (nodesInclude.size() < num) {
|
* more than <code>n</code> nodes.
|
||||||
Iterator<String> limit = Iterators.limit(nodeExclude.iterator(), num - nodesInclude.size());
|
*/
|
||||||
while (limit.hasNext()) {
|
public void allowNodes(String index, int n) {
|
||||||
limit.next();
|
assert index != null;
|
||||||
limit.remove();
|
cluster().ensureAtLeastNumNodes(n);
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Iterator<String> limit = Iterators.limit(nodesInclude.iterator(), nodesInclude.size() - num);
|
|
||||||
while (limit.hasNext()) {
|
|
||||||
nodeExclude.add(limit.next());
|
|
||||||
limit.remove();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodeExclude;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void allowNodes(String index, int numNodes) {
|
|
||||||
cluster().ensureAtLeastNumNodes(numNodes);
|
|
||||||
ImmutableSettings.Builder builder = ImmutableSettings.builder();
|
ImmutableSettings.Builder builder = ImmutableSettings.builder();
|
||||||
if (numNodes > 0) {
|
if (n > 0) {
|
||||||
getExcludeSettings(index, numNodes, builder);
|
getExcludeSettings(index, n, builder);
|
||||||
}
|
}
|
||||||
Settings build = builder.build();
|
Settings build = builder.build();
|
||||||
if (!build.getAsMap().isEmpty()) {
|
if (!build.getAsMap().isEmpty()) {
|
||||||
|
@ -345,10 +421,11 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public CreateIndexRequestBuilder prepareCreate(String index) {
|
/**
|
||||||
return client().admin().indices().prepareCreate(index).setSettings(getSettings());
|
* Ensures the cluster has a green state via the cluster health API. This method will also wait for relocations.
|
||||||
}
|
* It is useful to ensure that all action on the cluster have finished and all shards that were currently relocating
|
||||||
|
* are now allocated and started.
|
||||||
|
*/
|
||||||
public ClusterHealthStatus ensureGreen() {
|
public ClusterHealthStatus ensureGreen() {
|
||||||
ClusterHealthResponse actionGet = client().admin().cluster()
|
ClusterHealthResponse actionGet = client().admin().cluster()
|
||||||
.health(Requests.clusterHealthRequest().waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
|
.health(Requests.clusterHealthRequest().waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
|
||||||
|
@ -360,10 +437,17 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
return actionGet.getStatus();
|
return actionGet.getStatus();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Waits for all relocating shards to become active using the cluster health API.
|
||||||
|
*/
|
||||||
public ClusterHealthStatus waitForRelocation() {
|
public ClusterHealthStatus waitForRelocation() {
|
||||||
return waitForRelocation(null);
|
return waitForRelocation(null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Waits for all relocating shards to become active and the cluster has reached the given health status
|
||||||
|
* using the cluster health API.
|
||||||
|
*/
|
||||||
public ClusterHealthStatus waitForRelocation(ClusterHealthStatus status) {
|
public ClusterHealthStatus waitForRelocation(ClusterHealthStatus status) {
|
||||||
ClusterHealthRequest request = Requests.clusterHealthRequest().waitForRelocatingShards(0);
|
ClusterHealthRequest request = Requests.clusterHealthRequest().waitForRelocatingShards(0);
|
||||||
if (status != null) {
|
if (status != null) {
|
||||||
|
@ -381,6 +465,9 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
return actionGet.getStatus();
|
return actionGet.getStatus();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensures the cluster has a yellow state via the cluster health API.
|
||||||
|
*/
|
||||||
public ClusterHealthStatus ensureYellow() {
|
public ClusterHealthStatus ensureYellow() {
|
||||||
ClusterHealthResponse actionGet = client().admin().cluster()
|
ClusterHealthResponse actionGet = client().admin().cluster()
|
||||||
.health(Requests.clusterHealthRequest().waitForRelocatingShards(0).waitForYellowStatus().waitForEvents(Priority.LANGUID)).actionGet();
|
.health(Requests.clusterHealthRequest().waitForRelocatingShards(0).waitForYellowStatus().waitForEvents(Priority.LANGUID)).actionGet();
|
||||||
|
@ -391,33 +478,61 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
return actionGet.getStatus();
|
return actionGet.getStatus();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String commaString(Iterable<String> strings) {
|
/**
|
||||||
return Joiner.on(',').join(strings);
|
* Syntactic sugar for:
|
||||||
}
|
* <pre>
|
||||||
|
* client().prepareIndex(index, type).setSource(source).execute().actionGet();
|
||||||
// utils
|
* </pre>
|
||||||
protected IndexResponse index(String index, String type, XContentBuilder source) {
|
*/
|
||||||
|
protected final IndexResponse index(String index, String type, XContentBuilder source) {
|
||||||
return client().prepareIndex(index, type).setSource(source).execute().actionGet();
|
return client().prepareIndex(index, type).setSource(source).execute().actionGet();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected IndexResponse index(String index, String type, String id, Map<String, Object> source) {
|
/**
|
||||||
|
* Syntactic sugar for:
|
||||||
|
* <pre>
|
||||||
|
* client().prepareIndex(index, type).setSource(source).execute().actionGet();
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
protected final IndexResponse index(String index, String type, String id, Map<String, Object> source) {
|
||||||
return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
|
return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
protected GetResponse get(String index, String type, String id) {
|
* Syntactic sugar for:
|
||||||
|
* <pre>
|
||||||
|
* client().prepareGet(index, type, id).execute().actionGet();
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
protected final GetResponse get(String index, String type, String id) {
|
||||||
return client().prepareGet(index, type, id).execute().actionGet();
|
return client().prepareGet(index, type, id).execute().actionGet();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected IndexResponse index(String index, String type, String id, XContentBuilder source) {
|
/**
|
||||||
|
* Syntactic sugar for:
|
||||||
|
* <pre>
|
||||||
|
* return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
protected final IndexResponse index(String index, String type, String id, XContentBuilder source) {
|
||||||
return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
|
return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected IndexResponse index(String index, String type, String id, Object... source) {
|
/**
|
||||||
|
* Syntactic sugar for:
|
||||||
|
* <pre>
|
||||||
|
* return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
protected final IndexResponse index(String index, String type, String id, Object... source) {
|
||||||
return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
|
return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected RefreshResponse refresh() {
|
/**
|
||||||
|
* Waits for relocations and refreshes all indices in the cluster.
|
||||||
|
* @see #waitForRelocation()
|
||||||
|
*/
|
||||||
|
protected final RefreshResponse refresh() {
|
||||||
waitForRelocation();
|
waitForRelocation();
|
||||||
// TODO RANDOMIZE with flush?
|
// TODO RANDOMIZE with flush?
|
||||||
RefreshResponse actionGet = client().admin().indices().prepareRefresh().execute().actionGet();
|
RefreshResponse actionGet = client().admin().indices().prepareRefresh().execute().actionGet();
|
||||||
|
@ -425,16 +540,22 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
return actionGet;
|
return actionGet;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void flushAndRefresh() {
|
/**
|
||||||
|
* Flushes and refreshes all indices in the cluster
|
||||||
|
*/
|
||||||
|
protected final void flushAndRefresh() {
|
||||||
flush(true);
|
flush(true);
|
||||||
refresh();
|
refresh();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected FlushResponse flush() {
|
/**
|
||||||
|
* Flushes all indices in the cluster
|
||||||
|
*/
|
||||||
|
protected final FlushResponse flush() {
|
||||||
return flush(true);
|
return flush(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected FlushResponse flush(boolean ignoreNotAllowed) {
|
private FlushResponse flush(boolean ignoreNotAllowed) {
|
||||||
waitForRelocation();
|
waitForRelocation();
|
||||||
FlushResponse actionGet = client().admin().indices().prepareFlush().execute().actionGet();
|
FlushResponse actionGet = client().admin().indices().prepareFlush().execute().actionGet();
|
||||||
if (ignoreNotAllowed) {
|
if (ignoreNotAllowed) {
|
||||||
|
@ -447,6 +568,9 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
return actionGet;
|
return actionGet;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Waits for all relocations and optimized all indices in the cluster to 1 segment.
|
||||||
|
*/
|
||||||
protected OptimizeResponse optimize() {
|
protected OptimizeResponse optimize() {
|
||||||
waitForRelocation();
|
waitForRelocation();
|
||||||
OptimizeResponse actionGet = client().admin().indices().prepareOptimize().execute().actionGet();
|
OptimizeResponse actionGet = client().admin().indices().prepareOptimize().execute().actionGet();
|
||||||
|
@ -454,48 +578,29 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
return actionGet;
|
return actionGet;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected Set<String> nodeIdsWithIndex(String... indices) {
|
/**
|
||||||
ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
* Returns <code>true</code> iff the given index exists otherwise <code>false</code>
|
||||||
GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
|
*/
|
||||||
Set<String> nodes = new HashSet<String>();
|
|
||||||
for (ShardIterator shardIterator : allAssignedShardsGrouped) {
|
|
||||||
for (ShardRouting routing : shardIterator.asUnordered()) {
|
|
||||||
if (routing.active()) {
|
|
||||||
nodes.add(routing.currentNodeId());
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodes;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected int numAssignedShards(String... indices) {
|
|
||||||
ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
|
||||||
GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
|
|
||||||
return allAssignedShardsGrouped.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
protected boolean indexExists(String index) {
|
protected boolean indexExists(String index) {
|
||||||
IndicesExistsResponse actionGet = client().admin().indices().prepareExists(index).execute().actionGet();
|
IndicesExistsResponse actionGet = client().admin().indices().prepareExists(index).execute().actionGet();
|
||||||
return actionGet.isExists();
|
return actionGet.isExists();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a random admin client. This client can either be a node or a transport client pointing to any of
|
||||||
|
* the nodes in the cluster.
|
||||||
|
*/
|
||||||
protected AdminClient admin() {
|
protected AdminClient admin() {
|
||||||
return client().admin();
|
return client().admin();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected <Res extends ActionResponse> Res run(ActionRequestBuilder<?, Res, ?> builder) {
|
/**
|
||||||
Res actionGet = builder.execute().actionGet();
|
* Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
|
||||||
return actionGet;
|
* indexes they in a blocking or async fashion. This is very useful to catch problems that relate to internal document
|
||||||
}
|
* ids or index segment creations. Some features might have bug when a given document is the first or the last in a
|
||||||
|
* segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index
|
||||||
protected <Res extends BroadcastOperationResponse> Res run(BroadcastOperationRequestBuilder<?, Res, ?> builder) {
|
* layout.
|
||||||
Res actionGet = builder.execute().actionGet();
|
*/
|
||||||
assertNoFailures(actionGet);
|
|
||||||
return actionGet;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO move this into a base class for integration tests
|
|
||||||
public void indexRandom(boolean forceRefresh, IndexRequestBuilder... builders) throws InterruptedException, ExecutionException {
|
public void indexRandom(boolean forceRefresh, IndexRequestBuilder... builders) throws InterruptedException, ExecutionException {
|
||||||
if (builders.length == 0) {
|
if (builders.length == 0) {
|
||||||
return;
|
return;
|
||||||
|
@ -574,8 +679,6 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
return l;
|
return l;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
private class LatchedActionListener<Response> implements ActionListener<Response> {
|
private class LatchedActionListener<Response> implements ActionListener<Response> {
|
||||||
private final CountDownLatch latch;
|
private final CountDownLatch latch;
|
||||||
|
|
||||||
|
@ -619,14 +722,34 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clears the given scroll Ids
|
||||||
|
*/
|
||||||
public void clearScroll(String... scrollIds) {
|
public void clearScroll(String... scrollIds) {
|
||||||
ClearScrollResponse clearResponse = client().prepareClearScroll()
|
ClearScrollResponse clearResponse = client().prepareClearScroll()
|
||||||
.setScrollIds(Arrays.asList(scrollIds)).get();
|
.setScrollIds(Arrays.asList(scrollIds)).get();
|
||||||
assertThat(clearResponse.isSucceeded(), equalTo(true));
|
assertThat(clearResponse.isSucceeded(), equalTo(true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The scope of a test cluster used together with
|
||||||
|
* {@link ClusterScope} annonations on {@link ElasticsearchIntegrationTest} subclasses.
|
||||||
|
*/
|
||||||
public static enum Scope {
|
public static enum Scope {
|
||||||
GLOBAL, SUITE, TEST;
|
/**
|
||||||
|
* A globally shared cluster. This cluster doesn't allow modification of transient or persistent
|
||||||
|
* cluster settings.
|
||||||
|
*/
|
||||||
|
GLOBAL,
|
||||||
|
/**
|
||||||
|
* A cluster shared across all method in a single test suite
|
||||||
|
*/
|
||||||
|
SUITE,
|
||||||
|
/**
|
||||||
|
* A test exclusive test cluster
|
||||||
|
*/
|
||||||
|
TEST;
|
||||||
}
|
}
|
||||||
|
|
||||||
private ClusterScope getAnnotation(Class<?> clazz) {
|
private ClusterScope getAnnotation(Class<?> clazz) {
|
||||||
|
@ -651,12 +774,18 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
return annotation == null ? -1 : annotation.numNodes();
|
return annotation == null ? -1 : annotation.numNodes();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method is used to obtain settings for the <tt>Nth</tt> node in the cluster.
|
||||||
|
* Nodes in this cluster are associated with an ordinal number such that nodes can
|
||||||
|
* be started with specific configurations. This method might be called multiple
|
||||||
|
* times with the same ordinal and is expected to return the same value for each invocation.
|
||||||
|
* In other words subclasses must ensure this method is idempotent.
|
||||||
|
*/
|
||||||
protected Settings nodeSettings(int nodeOrdinal) {
|
protected Settings nodeSettings(int nodeOrdinal) {
|
||||||
return ImmutableSettings.EMPTY;
|
return ImmutableSettings.EMPTY;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected TestCluster buildTestCluster(Scope scope) {
|
private TestCluster buildTestCluster(Scope scope) {
|
||||||
long currentClusterSeed = randomLong();
|
long currentClusterSeed = randomLong();
|
||||||
int numNodes = getNumNodes();
|
int numNodes = getNumNodes();
|
||||||
NodeSettingsSource nodeSettingsSource;
|
NodeSettingsSource nodeSettingsSource;
|
||||||
|
@ -678,30 +807,56 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||||
return new TestCluster(currentClusterSeed, numNodes, TestCluster.clusterName(scope.name(), ElasticsearchTestCase.CHILD_VM_ID, currentClusterSeed), nodeSettingsSource);
|
return new TestCluster(currentClusterSeed, numNodes, TestCluster.clusterName(scope.name(), ElasticsearchTestCase.CHILD_VM_ID, currentClusterSeed), nodeSettingsSource);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Defines a cluster scope for a {@link ElasticsearchIntegrationTest} subclass.
|
||||||
|
* By default if no {@link ClusterScope} annotation is present {@link Scope#GLOBAL} is used
|
||||||
|
* together with randomly chosen settings like number of nodes etc.
|
||||||
|
*/
|
||||||
@Retention(RetentionPolicy.RUNTIME)
|
@Retention(RetentionPolicy.RUNTIME)
|
||||||
@Target({ElementType.TYPE})
|
@Target({ElementType.TYPE})
|
||||||
public @interface ClusterScope {
|
public @interface ClusterScope {
|
||||||
|
/**
|
||||||
|
* Returns the scope. {@link Scope#GLOBAL} is default.
|
||||||
|
*/
|
||||||
Scope scope() default Scope.GLOBAL;
|
Scope scope() default Scope.GLOBAL;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the number of nodes in the cluster. Default is <tt>-1</tt> which means
|
||||||
|
* a random number of nodes but at least <code>2</code></tt> is used./
|
||||||
|
*/
|
||||||
int numNodes() default -1;
|
int numNodes() default -1;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the transport client ratio. By default this returns <code>-1</code> which means a random
|
||||||
|
* ratio in the interval <code>[0..1]</code> is used.
|
||||||
|
*/
|
||||||
double transportClientRatio() default -1;
|
double transportClientRatio() default -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static long clusterSeed() {
|
private static long clusterSeed() {
|
||||||
String property = System.getProperty("tests.cluster_seed");
|
String property = System.getProperty(TESTS_CLUSTER_SEED);
|
||||||
if (property == null || property.isEmpty()) {
|
if (property == null || property.isEmpty()) {
|
||||||
return System.nanoTime();
|
return System.nanoTime();
|
||||||
}
|
}
|
||||||
return SeedUtils.parseSeed(property);
|
return SeedUtils.parseSeed(property);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the client ratio configured via
|
||||||
|
*/
|
||||||
private static double transportClientRatio() {
|
private static double transportClientRatio() {
|
||||||
String property = System.getProperty("tests.client.ratio");
|
String property = System.getProperty(TESTS_CLIENT_RATIO);
|
||||||
if (property == null || property.isEmpty()) {
|
if (property == null || property.isEmpty()) {
|
||||||
return Double.NaN;
|
return Double.NaN;
|
||||||
}
|
}
|
||||||
return Double.parseDouble(property);
|
return Double.parseDouble(property);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the transport client ratio from the class level annotation or via
|
||||||
|
* {@link System#getProperty(String)} if available. If both are not available this will
|
||||||
|
* return a random ratio in the interval <tt>[0..1]</tt>
|
||||||
|
*/
|
||||||
private double getPerTestTransportClientRatio() {
|
private double getPerTestTransportClientRatio() {
|
||||||
final ClusterScope annotation = getAnnotation(this.getClass());
|
final ClusterScope annotation = getAnnotation(this.getClass());
|
||||||
double perTestRatio = -1;
|
double perTestRatio = -1;
|
||||||
|
|
|
@ -27,6 +27,10 @@ import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||||
import org.apache.lucene.util.TimeUnits;
|
import org.apache.lucene.util.TimeUnits;
|
||||||
import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
|
import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base testcase for lucene based testing. This class should be used if low level lucene features are tested.
|
||||||
|
*/
|
||||||
@Listeners({
|
@Listeners({
|
||||||
ReproduceInfoPrinter.class
|
ReproduceInfoPrinter.class
|
||||||
})
|
})
|
||||||
|
|
|
@ -44,6 +44,9 @@ import java.util.*;
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base testcase for randomized unit testing with Elasticsearch
|
||||||
|
*/
|
||||||
@ThreadLeakFilters(defaultFilters = true, filters = {ElasticsearchThreadFilter.class})
|
@ThreadLeakFilters(defaultFilters = true, filters = {ElasticsearchThreadFilter.class})
|
||||||
@ThreadLeakScope(Scope.NONE)
|
@ThreadLeakScope(Scope.NONE)
|
||||||
@TimeoutSuite(millis = TimeUnits.HOUR) // timeout the suite after 1h and fail the test.
|
@TimeoutSuite(millis = TimeUnits.HOUR) // timeout the suite after 1h and fail the test.
|
||||||
|
|
|
@ -21,7 +21,10 @@ package org.elasticsearch.test;
|
||||||
|
|
||||||
import com.carrotsearch.randomizedtesting.ThreadFilter;
|
import com.carrotsearch.randomizedtesting.ThreadFilter;
|
||||||
|
|
||||||
public class ElasticsearchThreadFilter implements ThreadFilter {
|
/**
|
||||||
|
* Simple thread filter for randomized runner
|
||||||
|
*/
|
||||||
|
public final class ElasticsearchThreadFilter implements ThreadFilter {
|
||||||
@Override
|
@Override
|
||||||
public boolean reject(Thread t) {
|
public boolean reject(Thread t) {
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -31,6 +31,11 @@ import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
|
||||||
@ThreadLeakFilters(defaultFilters = true, filters = {ElasticsearchThreadFilter.class})
|
@ThreadLeakFilters(defaultFilters = true, filters = {ElasticsearchThreadFilter.class})
|
||||||
@ThreadLeakScope(Scope.NONE)
|
@ThreadLeakScope(Scope.NONE)
|
||||||
@TimeoutSuite(millis = TimeUnits.HOUR)
|
@TimeoutSuite(millis = TimeUnits.HOUR)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Basic test case for token streams. the assertion methods in this class will
|
||||||
|
* run basic checks to enforce correct behavior of the token streams.
|
||||||
|
*/
|
||||||
public abstract class ElasticsearchTokenStreamTestCase extends BaseTokenStreamTestCase {
|
public abstract class ElasticsearchTokenStreamTestCase extends BaseTokenStreamTestCase {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
public abstract class NodeSettingsSource {
|
abstract class NodeSettingsSource {
|
||||||
|
|
||||||
public static final NodeSettingsSource EMPTY = new NodeSettingsSource() {
|
public static final NodeSettingsSource EMPTY = new NodeSettingsSource() {
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -19,11 +19,9 @@
|
||||||
package org.elasticsearch.test;
|
package org.elasticsearch.test;
|
||||||
|
|
||||||
import com.carrotsearch.randomizedtesting.SeedUtils;
|
import com.carrotsearch.randomizedtesting.SeedUtils;
|
||||||
import com.google.common.base.Function;
|
|
||||||
import com.google.common.base.Predicate;
|
import com.google.common.base.Predicate;
|
||||||
import com.google.common.base.Predicates;
|
import com.google.common.base.Predicates;
|
||||||
import com.google.common.collect.Collections2;
|
import com.google.common.collect.Collections2;
|
||||||
import com.google.common.collect.ImmutableSet;
|
|
||||||
import com.google.common.collect.Iterators;
|
import com.google.common.collect.Iterators;
|
||||||
import com.google.common.collect.Sets;
|
import com.google.common.collect.Sets;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
|
@ -31,8 +29,6 @@ import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.client.transport.TransportClient;
|
import org.elasticsearch.client.transport.TransportClient;
|
||||||
import org.elasticsearch.cluster.ClusterService;
|
import org.elasticsearch.cluster.ClusterService;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
|
||||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
|
@ -44,9 +40,7 @@ import org.elasticsearch.common.settings.ImmutableSettings;
|
||||||
import org.elasticsearch.common.settings.ImmutableSettings.Builder;
|
import org.elasticsearch.common.settings.ImmutableSettings.Builder;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.transport.TransportAddress;
|
import org.elasticsearch.common.transport.TransportAddress;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
|
||||||
import org.elasticsearch.env.NodeEnvironment;
|
import org.elasticsearch.env.NodeEnvironment;
|
||||||
import org.elasticsearch.gateway.Gateway;
|
|
||||||
import org.elasticsearch.index.engine.IndexEngineModule;
|
import org.elasticsearch.index.engine.IndexEngineModule;
|
||||||
import org.elasticsearch.node.Node;
|
import org.elasticsearch.node.Node;
|
||||||
import org.elasticsearch.node.internal.InternalNode;
|
import org.elasticsearch.node.internal.InternalNode;
|
||||||
|
@ -67,9 +61,20 @@ import static com.google.common.collect.Maps.newTreeMap;
|
||||||
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
|
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
|
||||||
import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
|
import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
|
||||||
|
|
||||||
public class TestCluster implements Closeable, Iterable<Client> {
|
/**
|
||||||
|
* TestCluster manages a set of JVM private nodes and allows convenient access to them.
|
||||||
|
* The cluster supports randomized configuration such that nodes started in the cluster will
|
||||||
|
* automatically load asserting services tracking resources like file handles or open searchers.
|
||||||
|
* <p>
|
||||||
|
* The Cluster is bound to a test lifecycle where tests must call {@link #beforeTest(java.util.Random, double)} and
|
||||||
|
* {@link #afterTest()} to initialize and reset the cluster in order to be more reproducible. The term "more" relates
|
||||||
|
* to the async nature of Elasticsearch in combination with randomized testing. Once Threads and asynchronous calls
|
||||||
|
* are involved reproducibility is very limited. This class should only be used through {@link ElasticsearchIntegrationTest}.
|
||||||
|
* </p>
|
||||||
|
*/
|
||||||
|
public final class TestCluster implements Iterable<Client> {
|
||||||
|
|
||||||
protected final ESLogger logger = Loggers.getLogger(getClass());
|
private final ESLogger logger = Loggers.getLogger(getClass());
|
||||||
|
|
||||||
/* sorted map to make traverse order reproducible */
|
/* sorted map to make traverse order reproducible */
|
||||||
private final TreeMap<String, NodeAndClient> nodes = newTreeMap();
|
private final TreeMap<String, NodeAndClient> nodes = newTreeMap();
|
||||||
|
@ -98,11 +103,11 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
|
|
||||||
private final NodeSettingsSource nodeSettingsSource;
|
private final NodeSettingsSource nodeSettingsSource;
|
||||||
|
|
||||||
public TestCluster(long clusterSeed, String clusterName) {
|
TestCluster(long clusterSeed, String clusterName) {
|
||||||
this(clusterSeed, -1, clusterName, NodeSettingsSource.EMPTY);
|
this(clusterSeed, -1, clusterName, NodeSettingsSource.EMPTY);
|
||||||
}
|
}
|
||||||
|
|
||||||
public TestCluster(long clusterSeed, int numNodes, String clusterName, NodeSettingsSource nodeSettingsSource) {
|
TestCluster(long clusterSeed, int numNodes, String clusterName, NodeSettingsSource nodeSettingsSource) {
|
||||||
this.clusterName = clusterName;
|
this.clusterName = clusterName;
|
||||||
Random random = new Random(clusterSeed);
|
Random random = new Random(clusterSeed);
|
||||||
numSharedNodes = numNodes == -1 ? 2 + random.nextInt(4) : numNodes; // at least 2 nodes if randomized
|
numSharedNodes = numNodes == -1 ? 2 + random.nextInt(4) : numNodes; // at least 2 nodes if randomized
|
||||||
|
@ -155,7 +160,7 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String clusterName(String prefix, String childVMId, long clusterSeed) {
|
static String clusterName(String prefix, String childVMId, long clusterSeed) {
|
||||||
StringBuilder builder = new StringBuilder(prefix);
|
StringBuilder builder = new StringBuilder(prefix);
|
||||||
builder.append('-').append(NetworkUtils.getLocalAddress().getHostName());
|
builder.append('-').append(NetworkUtils.getLocalAddress().getHostName());
|
||||||
builder.append("-CHILD_VM=[").append(childVMId).append(']');
|
builder.append("-CHILD_VM=[").append(childVMId).append(']');
|
||||||
|
@ -203,24 +208,34 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void ensureAtLeastNumNodes(int num) {
|
/**
|
||||||
|
* Ensures that at least <code>n</code> nodes are present in the cluster.
|
||||||
|
* if more nodes than <code>n</code> are present this method will not
|
||||||
|
* stop any of the running nodes.
|
||||||
|
*/
|
||||||
|
public synchronized void ensureAtLeastNumNodes(int n) {
|
||||||
int size = nodes.size();
|
int size = nodes.size();
|
||||||
for (int i = size; i < num; i++) {
|
for (int i = size; i < n; i++) {
|
||||||
logger.info("increasing cluster size from {} to {}", size, num);
|
logger.info("increasing cluster size from {} to {}", size, n);
|
||||||
NodeAndClient buildNode = buildNode();
|
NodeAndClient buildNode = buildNode();
|
||||||
buildNode.node().start();
|
buildNode.node().start();
|
||||||
publishNode(buildNode);
|
publishNode(buildNode);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void ensureAtMostNumNodes(int num) {
|
/**
|
||||||
if (nodes.size() <= num) {
|
* Ensures that at most <code>n</code> are up and running.
|
||||||
|
* If less nodes that <code>n</code> are running this method
|
||||||
|
* will not start any additional nodes.
|
||||||
|
*/
|
||||||
|
public synchronized void ensureAtMostNumNodes(int n) {
|
||||||
|
if (nodes.size() <= n) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// prevent killing the master if possible
|
// prevent killing the master if possible
|
||||||
final Iterator<NodeAndClient> values = num == 0 ? nodes.values().iterator() : Iterators.filter(nodes.values().iterator(), Predicates.not(new MasterNodePredicate(getMasterName())));
|
final Iterator<NodeAndClient> values = n == 0 ? nodes.values().iterator() : Iterators.filter(nodes.values().iterator(), Predicates.not(new MasterNodePredicate(getMasterName())));
|
||||||
final Iterator<NodeAndClient> limit = Iterators.limit(values, nodes.size() - num);
|
final Iterator<NodeAndClient> limit = Iterators.limit(values, nodes.size() - n);
|
||||||
logger.info("reducing cluster size from {} to {}", nodes.size() - num, num);
|
logger.info("reducing cluster size from {} to {}", nodes.size() - n, n);
|
||||||
Set<NodeAndClient> nodesToRemove = new HashSet<NodeAndClient>();
|
Set<NodeAndClient> nodesToRemove = new HashSet<NodeAndClient>();
|
||||||
while (limit.hasNext()) {
|
while (limit.hasNext()) {
|
||||||
NodeAndClient next = limit.next();
|
NodeAndClient next = limit.next();
|
||||||
|
@ -260,12 +275,16 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
return "node_" + id;
|
return "node_" + id;
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized Client client() {
|
synchronized Client client() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
/* Randomly return a client to one of the nodes in the cluster */
|
/* Randomly return a client to one of the nodes in the cluster */
|
||||||
return getOrBuildRandomNode().client(random);
|
return getOrBuildRandomNode().client(random);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a node client to the current master node.
|
||||||
|
* Note: use this with care tests should not rely on a certain nodes client.
|
||||||
|
*/
|
||||||
public synchronized Client masterClient() {
|
public synchronized Client masterClient() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new MasterNodePredicate(getMasterName()));
|
NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new MasterNodePredicate(getMasterName()));
|
||||||
|
@ -276,6 +295,9 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
return null; // can't happen
|
return null; // can't happen
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a node client to random node but not the master. This method will fail if no non-master client is available.
|
||||||
|
*/
|
||||||
public synchronized Client nonMasterClient() {
|
public synchronized Client nonMasterClient() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
NodeAndClient randomNodeAndClient = getRandomNodeAndClient(Predicates.not(new MasterNodePredicate(getMasterName())));
|
NodeAndClient randomNodeAndClient = getRandomNodeAndClient(Predicates.not(new MasterNodePredicate(getMasterName())));
|
||||||
|
@ -286,6 +308,9 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
return null; // can't happen
|
return null; // can't happen
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a client to a node started with "node.client: true"
|
||||||
|
*/
|
||||||
public synchronized Client clientNodeClient() {
|
public synchronized Client clientNodeClient() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new ClientNodePredicate());
|
NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new ClientNodePredicate());
|
||||||
|
@ -296,15 +321,23 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
return getRandomNodeAndClient(new ClientNodePredicate()).client(random);
|
return getRandomNodeAndClient(new ClientNodePredicate()).client(random);
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized Client clientNodeClient(String nodeName) {
|
/**
|
||||||
|
* Returns a node client to a given node.
|
||||||
|
*/
|
||||||
|
public synchronized Client client(String nodeName) {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
NodeAndClient randomNodeAndClient = nodes.get(nodeName);
|
NodeAndClient nodeAndClient = nodes.get(nodeName);
|
||||||
if (randomNodeAndClient != null) {
|
if (nodeAndClient != null) {
|
||||||
return randomNodeAndClient.client(random);
|
return nodeAndClient.client(random);
|
||||||
}
|
}
|
||||||
return null;
|
Assert.fail("No node found with name: [" + nodeName + "]");
|
||||||
|
return null; // can't happen
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a "smart" node client to a random node in the cluster
|
||||||
|
*/
|
||||||
public synchronized Client smartClient() {
|
public synchronized Client smartClient() {
|
||||||
NodeAndClient randomNodeAndClient = getRandomNodeAndClient();
|
NodeAndClient randomNodeAndClient = getRandomNodeAndClient();
|
||||||
if (randomNodeAndClient != null) {
|
if (randomNodeAndClient != null) {
|
||||||
|
@ -314,6 +347,11 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
return null; // can't happen
|
return null; // can't happen
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a random node that applies to the given predicate.
|
||||||
|
* The predicate can filter nodes based on the nodes settings.
|
||||||
|
* If all nodes are filtered out this method will return <code>null</code>
|
||||||
|
*/
|
||||||
public synchronized Client client(final Predicate<Settings> filterPredicate) {
|
public synchronized Client client(final Predicate<Settings> filterPredicate) {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
final NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new Predicate<NodeAndClient>() {
|
final NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new Predicate<NodeAndClient>() {
|
||||||
|
@ -328,7 +366,7 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void close() {
|
void close() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
if (this.open.compareAndSet(true, false)) {
|
if (this.open.compareAndSet(true, false)) {
|
||||||
IOUtils.closeWhileHandlingException(nodes.values());
|
IOUtils.closeWhileHandlingException(nodes.values());
|
||||||
|
@ -336,17 +374,6 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized ImmutableSet<ClusterBlock> waitForNoBlocks(TimeValue timeout, Node node) throws InterruptedException {
|
|
||||||
ensureOpen();
|
|
||||||
long start = System.currentTimeMillis();
|
|
||||||
ImmutableSet<ClusterBlock> blocks;
|
|
||||||
do {
|
|
||||||
blocks = node.client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState().blocks()
|
|
||||||
.global(ClusterBlockLevel.METADATA);
|
|
||||||
} while (!blocks.isEmpty() && (System.currentTimeMillis() - start) < timeout.millis());
|
|
||||||
return blocks;
|
|
||||||
}
|
|
||||||
|
|
||||||
private final class NodeAndClient implements Closeable {
|
private final class NodeAndClient implements Closeable {
|
||||||
private InternalNode node;
|
private InternalNode node;
|
||||||
private Client client;
|
private Client client;
|
||||||
|
@ -439,14 +466,14 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class ClientFactory {
|
static class ClientFactory {
|
||||||
|
|
||||||
public Client client(Node node, String clusterName, Random random) {
|
public Client client(Node node, String clusterName, Random random) {
|
||||||
return node.client();
|
return node.client();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class TransportClientFactory extends ClientFactory {
|
static class TransportClientFactory extends ClientFactory {
|
||||||
|
|
||||||
private boolean sniff;
|
private boolean sniff;
|
||||||
public static TransportClientFactory NO_SNIFF_CLIENT_FACTORY = new TransportClientFactory(false);
|
public static TransportClientFactory NO_SNIFF_CLIENT_FACTORY = new TransportClientFactory(false);
|
||||||
|
@ -467,7 +494,7 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public class RandomClientFactory extends ClientFactory {
|
class RandomClientFactory extends ClientFactory {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Client client(Node node, String clusterName, Random random) {
|
public Client client(Node node, String clusterName, Random random) {
|
||||||
|
@ -486,7 +513,10 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void beforeTest(Random random, double transportClientRatio) {
|
/**
|
||||||
|
* This method should be exectued before each test to reset the cluster to it's initial state.
|
||||||
|
*/
|
||||||
|
synchronized void beforeTest(Random random, double transportClientRatio) {
|
||||||
reset(random, true, transportClientRatio);
|
reset(random, true, transportClientRatio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -521,7 +551,7 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
}
|
}
|
||||||
if (!changed && sharedNodes.size() == nodes.size()) {
|
if (!changed && sharedNodes.size() == nodes.size()) {
|
||||||
logger.debug("Cluster is consistent - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
|
logger.debug("Cluster is consistent - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
|
||||||
if (numNodes() > 0) {
|
if (size() > 0) {
|
||||||
client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get();
|
client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get();
|
||||||
}
|
}
|
||||||
return; // we are consistent - return
|
return; // we are consistent - return
|
||||||
|
@ -541,14 +571,17 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
publishNode(nodeAndClient);
|
publishNode(nodeAndClient);
|
||||||
}
|
}
|
||||||
nextNodeId.set(sharedNodesSeeds.length);
|
nextNodeId.set(sharedNodesSeeds.length);
|
||||||
assert numNodes() == sharedNodesSeeds.length;
|
assert size() == sharedNodesSeeds.length;
|
||||||
if (numNodes() > 0) {
|
if (size() > 0) {
|
||||||
client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get();
|
client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get();
|
||||||
}
|
}
|
||||||
logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
|
logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void afterTest() {
|
/**
|
||||||
|
* This method should be executed during tearDown
|
||||||
|
*/
|
||||||
|
synchronized void afterTest() {
|
||||||
wipeDataDirectories();
|
wipeDataDirectories();
|
||||||
resetClients(); /* reset all clients - each test gets it's own client based on the Random instance created above. */
|
resetClients(); /* reset all clients - each test gets it's own client based on the Random instance created above. */
|
||||||
|
|
||||||
|
@ -572,10 +605,16 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a reference to a random nodes {@link ClusterService}
|
||||||
|
*/
|
||||||
public synchronized ClusterService clusterService() {
|
public synchronized ClusterService clusterService() {
|
||||||
return getInstance(ClusterService.class);
|
return getInstance(ClusterService.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns an Iterabel to all instances for the given class >T< across all nodes in the cluster.
|
||||||
|
*/
|
||||||
public synchronized <T> Iterable<T> getInstances(Class<T> clazz) {
|
public synchronized <T> Iterable<T> getInstances(Class<T> clazz) {
|
||||||
List<T> instances = new ArrayList<T>(nodes.size());
|
List<T> instances = new ArrayList<T>(nodes.size());
|
||||||
for (NodeAndClient nodeAndClient : nodes.values()) {
|
for (NodeAndClient nodeAndClient : nodes.values()) {
|
||||||
|
@ -584,6 +623,9 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
return instances;
|
return instances;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a reference to the given nodes instances of the given class >T<
|
||||||
|
*/
|
||||||
public synchronized <T> T getInstance(Class<T> clazz, final String node) {
|
public synchronized <T> T getInstance(Class<T> clazz, final String node) {
|
||||||
final Predicate<TestCluster.NodeAndClient> predicate;
|
final Predicate<TestCluster.NodeAndClient> predicate;
|
||||||
if (node != null) {
|
if (node != null) {
|
||||||
|
@ -600,6 +642,9 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
return getInstanceFromNode(clazz, randomNodeAndClient.node);
|
return getInstanceFromNode(clazz, randomNodeAndClient.node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a reference to a random nodes instances of the given class >T<
|
||||||
|
*/
|
||||||
public synchronized <T> T getInstance(Class<T> clazz) {
|
public synchronized <T> T getInstance(Class<T> clazz) {
|
||||||
return getInstance(clazz, null);
|
return getInstance(clazz, null);
|
||||||
}
|
}
|
||||||
|
@ -608,10 +653,16 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
return node.injector().getInstance(clazz);
|
return node.injector().getInstance(clazz);
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized int numNodes() {
|
/**
|
||||||
|
* Returns the number of nodes in the cluster.
|
||||||
|
*/
|
||||||
|
public synchronized int size() {
|
||||||
return this.nodes.size();
|
return this.nodes.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stops a random node in the cluster.
|
||||||
|
*/
|
||||||
public synchronized void stopRandomNode() {
|
public synchronized void stopRandomNode() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
NodeAndClient nodeAndClient = getRandomNodeAndClient();
|
NodeAndClient nodeAndClient = getRandomNodeAndClient();
|
||||||
|
@ -622,6 +673,10 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stops a random node in the cluster that applies to the given filter or non if the non of the nodes applies to the
|
||||||
|
* filter.
|
||||||
|
*/
|
||||||
public synchronized void stopRandomNode(final Predicate<Settings> filter) {
|
public synchronized void stopRandomNode(final Predicate<Settings> filter) {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
NodeAndClient nodeAndClient = getRandomNodeAndClient(new Predicate<TestCluster.NodeAndClient>() {
|
NodeAndClient nodeAndClient = getRandomNodeAndClient(new Predicate<TestCluster.NodeAndClient>() {
|
||||||
|
@ -638,9 +693,12 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stops the current master node forcefully
|
||||||
|
*/
|
||||||
public synchronized void stopCurrentMasterNode() {
|
public synchronized void stopCurrentMasterNode() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
assert numNodes() > 0;
|
assert size() > 0;
|
||||||
String masterNodeName = getMasterName();
|
String masterNodeName = getMasterName();
|
||||||
assert nodes.containsKey(masterNodeName);
|
assert nodes.containsKey(masterNodeName);
|
||||||
logger.info("Closing master node [{}] ", masterNodeName);
|
logger.info("Closing master node [{}] ", masterNodeName);
|
||||||
|
@ -648,6 +706,9 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
remove.close();
|
remove.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stops the any of the current nodes but not the master node.
|
||||||
|
*/
|
||||||
public void stopRandomNonMasterNode() {
|
public void stopRandomNonMasterNode() {
|
||||||
NodeAndClient nodeAndClient = getRandomNodeAndClient(Predicates.not(new MasterNodePredicate(getMasterName())));
|
NodeAndClient nodeAndClient = getRandomNodeAndClient(Predicates.not(new MasterNodePredicate(getMasterName())));
|
||||||
if (nodeAndClient != null) {
|
if (nodeAndClient != null) {
|
||||||
|
@ -656,11 +717,18 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
nodeAndClient.close();
|
nodeAndClient.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Restarts a random node in the cluster
|
||||||
|
*/
|
||||||
public void restartRandomNode() throws Exception {
|
public void restartRandomNode() throws Exception {
|
||||||
restartRandomNode(EMPTY_CALLBACK);
|
restartRandomNode(EMPTY_CALLBACK);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Restarts a random node in the cluster and calls the callback during restart.
|
||||||
|
*/
|
||||||
public void restartRandomNode(RestartCallback callback) throws Exception {
|
public void restartRandomNode(RestartCallback callback) throws Exception {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
NodeAndClient nodeAndClient = getRandomNodeAndClient();
|
NodeAndClient nodeAndClient = getRandomNodeAndClient();
|
||||||
|
@ -707,24 +775,38 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public static final RestartCallback EMPTY_CALLBACK = new RestartCallback() {
|
|
||||||
|
|
||||||
|
private static final RestartCallback EMPTY_CALLBACK = new RestartCallback() {
|
||||||
public Settings onNodeStopped(String node) {
|
public Settings onNodeStopped(String node) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Restarts all nodes in the cluster. It first stops all nodes and then restarts all the nodes again.
|
||||||
|
*/
|
||||||
public void fullRestart() throws Exception {
|
public void fullRestart() throws Exception {
|
||||||
fullRestart(EMPTY_CALLBACK);
|
fullRestart(EMPTY_CALLBACK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Restarts all nodes in a rolling restart fashion ie. only restarts on node a time.
|
||||||
|
*/
|
||||||
public void rollingRestart() throws Exception {
|
public void rollingRestart() throws Exception {
|
||||||
rollingRestart(EMPTY_CALLBACK);
|
rollingRestart(EMPTY_CALLBACK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Restarts all nodes in a rolling restart fashion ie. only restarts on node a time.
|
||||||
|
*/
|
||||||
public void rollingRestart(RestartCallback function) throws Exception {
|
public void rollingRestart(RestartCallback function) throws Exception {
|
||||||
restartAllNodes(true, function);
|
restartAllNodes(true, function);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Restarts all nodes in the cluster. It first stops all nodes and then restarts all the nodes again.
|
||||||
|
*/
|
||||||
public void fullRestart(RestartCallback function) throws Exception {
|
public void fullRestart(RestartCallback function) throws Exception {
|
||||||
restartAllNodes(false, function);
|
restartAllNodes(false, function);
|
||||||
}
|
}
|
||||||
|
@ -740,12 +822,12 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized Set<String> allButN(int numNodes) {
|
synchronized Set<String> allButN(int numNodes) {
|
||||||
return nRandomNodes(numNodes() - numNodes);
|
return nRandomNodes(size() - numNodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized Set<String> nRandomNodes(int numNodes) {
|
private synchronized Set<String> nRandomNodes(int numNodes) {
|
||||||
assert numNodes() >= numNodes;
|
assert size() >= numNodes;
|
||||||
return Sets.newHashSet(Iterators.limit(this.nodes.keySet().iterator(), numNodes));
|
return Sets.newHashSet(Iterators.limit(this.nodes.keySet().iterator(), numNodes));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -754,6 +836,9 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
startNode(settingsBuilder().put(settings).put("node.client", true));
|
startNode(settingsBuilder().put(settings).put("node.client", true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a set of nodes that have at least one shard of the given index.
|
||||||
|
*/
|
||||||
public synchronized Set<String> nodesInclude(String index) {
|
public synchronized Set<String> nodesInclude(String index) {
|
||||||
if (clusterService().state().routingTable().hasIndex(index)) {
|
if (clusterService().state().routingTable().hasIndex(index)) {
|
||||||
List<ShardRouting> allShards = clusterService().state().routingTable().allShards(index);
|
List<ShardRouting> allShards = clusterService().state().routingTable().allShards(index);
|
||||||
|
@ -770,31 +855,23 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
return Collections.emptySet();
|
return Collections.emptySet();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
public synchronized Set<String> nodeExclude(String index) {
|
* Starts a node with default settings and returns it's name.
|
||||||
final Set<String> nodesInclude = nodesInclude(index);
|
*/
|
||||||
return Sets.newHashSet(Iterators.transform(Iterators.filter(nodes.values().iterator(), new Predicate<NodeAndClient>() {
|
|
||||||
@Override
|
|
||||||
public boolean apply(NodeAndClient nodeAndClient) {
|
|
||||||
return !nodesInclude.contains(nodeAndClient.name);
|
|
||||||
}
|
|
||||||
|
|
||||||
}), new Function<NodeAndClient, String>() {
|
|
||||||
@Override
|
|
||||||
public String apply(NodeAndClient nodeAndClient) {
|
|
||||||
return nodeAndClient.name;
|
|
||||||
}
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
public String startNode() {
|
public String startNode() {
|
||||||
return startNode(ImmutableSettings.EMPTY);
|
return startNode(ImmutableSettings.EMPTY);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Starts a node with the given settings builder and returns it's name.
|
||||||
|
*/
|
||||||
public String startNode(Settings.Builder settings) {
|
public String startNode(Settings.Builder settings) {
|
||||||
return startNode(settings.build());
|
return startNode(settings.build());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Starts a node with the given settings and returns it's name.
|
||||||
|
*/
|
||||||
public String startNode(Settings settings) {
|
public String startNode(Settings settings) {
|
||||||
NodeAndClient buildNode = buildNode(settings);
|
NodeAndClient buildNode = buildNode(settings);
|
||||||
buildNode.node().start();
|
buildNode.node().start();
|
||||||
|
@ -812,13 +889,6 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void resetAllGateways() throws Exception {
|
|
||||||
Collection<NodeAndClient> values = this.nodes.values();
|
|
||||||
for (NodeAndClient nodeAndClient : values) {
|
|
||||||
getInstanceFromNode(Gateway.class, ((InternalNode) nodeAndClient.node)).reset();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void closeNonSharedNodes(boolean wipeData) {
|
public void closeNonSharedNodes(boolean wipeData) {
|
||||||
reset(random, wipeData, transportClientRatio);
|
reset(random, wipeData, transportClientRatio);
|
||||||
}
|
}
|
||||||
|
@ -869,6 +939,9 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a predicate that only accepts settings of nodes with one of the given names.
|
||||||
|
*/
|
||||||
public static Predicate<Settings> nameFilter(String... nodeName) {
|
public static Predicate<Settings> nameFilter(String... nodeName) {
|
||||||
return new NodeNamePredicate(new HashSet<String>(Arrays.asList(nodeName)));
|
return new NodeNamePredicate(new HashSet<String>(Arrays.asList(nodeName)));
|
||||||
}
|
}
|
||||||
|
@ -888,23 +961,44 @@ public class TestCluster implements Closeable, Iterable<Client> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An abstract class that is called during {@link #rollingRestart(org.elasticsearch.test.TestCluster.RestartCallback)}
|
||||||
|
* and / or {@link #fullRestart(org.elasticsearch.test.TestCluster.RestartCallback)} to execute actions at certain
|
||||||
|
* stages of the restart.
|
||||||
|
*/
|
||||||
public static abstract class RestartCallback {
|
public static abstract class RestartCallback {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Executed once the give node name has been stopped.
|
||||||
|
*/
|
||||||
public Settings onNodeStopped(String nodeName) throws Exception {
|
public Settings onNodeStopped(String nodeName) throws Exception {
|
||||||
return ImmutableSettings.EMPTY;
|
return ImmutableSettings.EMPTY;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void doAfterNodes(int numNodes, Client client) throws Exception {
|
/**
|
||||||
|
* Executed for each node before the <tt>n+1</tt> node is restarted. The given client is
|
||||||
|
* an active client to the node that will be restarted next.
|
||||||
|
*/
|
||||||
|
public void doAfterNodes(int n, Client client) throws Exception {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If this returns <code>true</code> all data for the node with the given node name will be cleared including
|
||||||
|
* gateways and all index data. Returns <code>false</code> by default.
|
||||||
|
*/
|
||||||
public boolean clearData(String nodeName) {
|
public boolean clearData(String nodeName) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If this returns <code>false</code> the node with the given node name will not be restarted. It will be
|
||||||
|
* closed and removed from the cluster. Returns <code>true</code> by default.
|
||||||
|
*/
|
||||||
public boolean doRestart(String nodeName) {
|
public boolean doRestart(String nodeName) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -225,39 +225,39 @@ public class UpdateTests extends ElasticsearchIntegrationTest {
|
||||||
assertThrows(client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v2'").setVersion(2).execute(),
|
assertThrows(client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v2'").setVersion(2).execute(),
|
||||||
VersionConflictEngineException.class);
|
VersionConflictEngineException.class);
|
||||||
|
|
||||||
run(client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v2'").setVersion(1));
|
client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v2'").setVersion(1).get();
|
||||||
assertThat(run(client().prepareGet("test", "type", "1")).getVersion(), equalTo(2l));
|
assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(2l));
|
||||||
|
|
||||||
// and again with a higher version..
|
// and again with a higher version..
|
||||||
run(client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v3'").setVersion(2));
|
client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v3'").setVersion(2).get();
|
||||||
|
|
||||||
assertThat(run(client().prepareGet("test", "type", "1")).getVersion(), equalTo(3l));
|
assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(3l));
|
||||||
|
|
||||||
// after delete
|
// after delete
|
||||||
run(client().prepareDelete("test", "type", "1"));
|
client().prepareDelete("test", "type", "1").get();
|
||||||
assertThrows(client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v2'").setVersion(3).execute(),
|
assertThrows(client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v2'").setVersion(3).execute(),
|
||||||
DocumentMissingException.class);
|
DocumentMissingException.class);
|
||||||
|
|
||||||
// external versioning
|
// external versioning
|
||||||
run(client().prepareIndex("test", "type", "2").setSource("text", "value").setVersion(10).setVersionType(VersionType.EXTERNAL));
|
client().prepareIndex("test", "type", "2").setSource("text", "value").setVersion(10).setVersionType(VersionType.EXTERNAL).get();
|
||||||
assertThrows(client().prepareUpdate("test", "type", "2").setScript("ctx._source.text = 'v2'").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(),
|
assertThrows(client().prepareUpdate("test", "type", "2").setScript("ctx._source.text = 'v2'").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(),
|
||||||
VersionConflictEngineException.class);
|
VersionConflictEngineException.class);
|
||||||
|
|
||||||
run(client().prepareUpdate("test", "type", "2").setScript("ctx._source.text = 'v2'").setVersion(11).setVersionType(VersionType.EXTERNAL));
|
client().prepareUpdate("test", "type", "2").setScript("ctx._source.text = 'v2'").setVersion(11).setVersionType(VersionType.EXTERNAL).get();
|
||||||
|
|
||||||
assertThat(run(client().prepareGet("test", "type", "2")).getVersion(), equalTo(11l));
|
assertThat(client().prepareGet("test", "type", "2").get().getVersion(), equalTo(11l));
|
||||||
|
|
||||||
// upserts - the combination with versions is a bit weird. Test are here to ensure we do not change our behavior unintentionally
|
// upserts - the combination with versions is a bit weird. Test are here to ensure we do not change our behavior unintentionally
|
||||||
|
|
||||||
// With internal versions, tt means "if object is there with version X, update it or explode. If it is not there, index.
|
// With internal versions, tt means "if object is there with version X, update it or explode. If it is not there, index.
|
||||||
run(client().prepareUpdate("test", "type", "3").setScript("ctx._source.text = 'v2'").setVersion(10).setUpsert("{ \"text\": \"v0\" }"));
|
client().prepareUpdate("test", "type", "3").setScript("ctx._source.text = 'v2'").setVersion(10).setUpsert("{ \"text\": \"v0\" }").get();
|
||||||
GetResponse get = get("test", "type", "3");
|
GetResponse get = get("test", "type", "3");
|
||||||
assertThat(get.getVersion(), equalTo(1l));
|
assertThat(get.getVersion(), equalTo(1l));
|
||||||
assertThat((String) get.getSource().get("text"), equalTo("v0"));
|
assertThat((String) get.getSource().get("text"), equalTo("v0"));
|
||||||
|
|
||||||
// With external versions, it means - if object is there with version lower than X, update it or explode. If it is not there, insert with new version.
|
// With external versions, it means - if object is there with version lower than X, update it or explode. If it is not there, insert with new version.
|
||||||
run(client().prepareUpdate("test", "type", "4").setScript("ctx._source.text = 'v2'").
|
client().prepareUpdate("test", "type", "4").setScript("ctx._source.text = 'v2'").
|
||||||
setVersion(10).setVersionType(VersionType.EXTERNAL).setUpsert("{ \"text\": \"v0\" }"));
|
setVersion(10).setVersionType(VersionType.EXTERNAL).setUpsert("{ \"text\": \"v0\" }").get();
|
||||||
get = get("test", "type", "4");
|
get = get("test", "type", "4");
|
||||||
assertThat(get.getVersion(), equalTo(10l));
|
assertThat(get.getVersion(), equalTo(10l));
|
||||||
assertThat((String) get.getSource().get("text"), equalTo("v0"));
|
assertThat((String) get.getSource().get("text"), equalTo("v0"));
|
||||||
|
|
|
@ -23,7 +23,7 @@ public class ConcurrentDocumentOperationTests extends ElasticsearchIntegrationTe
|
||||||
|
|
||||||
logger.info("--> create an index with 1 shard and max replicas based on nodes");
|
logger.info("--> create an index with 1 shard and max replicas based on nodes");
|
||||||
client().admin().indices().prepareCreate("test")
|
client().admin().indices().prepareCreate("test")
|
||||||
.setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", cluster().numNodes()-1))
|
.setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", cluster().size()-1))
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
|
|
||||||
logger.info("execute concurrent updates on the same doc");
|
logger.info("execute concurrent updates on the same doc");
|
||||||
|
@ -54,7 +54,7 @@ public class ConcurrentDocumentOperationTests extends ElasticsearchIntegrationTe
|
||||||
|
|
||||||
logger.info("done indexing, check all have the same field value");
|
logger.info("done indexing, check all have the same field value");
|
||||||
Map masterSource = client().prepareGet("test", "type1", "1").execute().actionGet().getSourceAsMap();
|
Map masterSource = client().prepareGet("test", "type1", "1").execute().actionGet().getSourceAsMap();
|
||||||
for (int i = 0; i < (cluster().numNodes() * 5); i++) {
|
for (int i = 0; i < (cluster().size() * 5); i++) {
|
||||||
assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().getSourceAsMap(), equalTo(masterSource));
|
assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().getSourceAsMap(), equalTo(masterSource));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue