[TEST] Fix test cluster naming

This commit renames `TestCluster` -> `InternalTestCluster` and
`ImmutableTestCluster` to `TestCluster` for consistency. This also
makes `ExternalTestCluster` and `InternalTestCluster` consistent
with respect to their execution environment.

Closes #6510
This commit is contained in:
Simon Willnauer 2014-06-16 14:44:37 +02:00
parent 0f180bd5fd
commit 61eac483ed
83 changed files with 2017 additions and 2018 deletions

View File

@ -88,7 +88,7 @@ public class HotThreadsTest extends ElasticsearchIntegrationTest {
try {
assertThat(nodeHotThreads, notNullValue());
Map<String,NodeHotThreads> nodesMap = nodeHotThreads.getNodesMap();
assertThat(nodesMap.size(), equalTo(immutableCluster().size()));
assertThat(nodesMap.size(), equalTo(cluster().size()));
for (NodeHotThreads ht : nodeHotThreads) {
assertNotNull(ht.getHotThreads());
//logger.info(ht.getHotThreads());

View File

@ -56,17 +56,17 @@ public class ClusterStatsTests extends ElasticsearchIntegrationTest {
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
assertCounts(response.getNodesStats().getCounts(), 1, 0, 0, 1, 0);
cluster().startNode(ImmutableSettings.builder().put("node.data", false));
internalCluster().startNode(ImmutableSettings.builder().put("node.data", false));
waitForNodes(2);
response = client().admin().cluster().prepareClusterStats().get();
assertCounts(response.getNodesStats().getCounts(), 2, 1, 0, 1, 0);
cluster().startNode(ImmutableSettings.builder().put("node.master", false));
internalCluster().startNode(ImmutableSettings.builder().put("node.master", false));
response = client().admin().cluster().prepareClusterStats().get();
waitForNodes(3);
assertCounts(response.getNodesStats().getCounts(), 3, 1, 1, 1, 0);
cluster().startNode(ImmutableSettings.builder().put("node.client", true));
internalCluster().startNode(ImmutableSettings.builder().put("node.client", true));
response = client().admin().cluster().prepareClusterStats().get();
waitForNodes(4);
assertCounts(response.getNodesStats().getCounts(), 4, 1, 1, 1, 1);
@ -95,7 +95,7 @@ public class ClusterStatsTests extends ElasticsearchIntegrationTest {
assertShardStats(response.getIndicesStats().getShards(), 1, 2, 2, 0.0);
// add another node, replicas should get assigned
cluster().startNode();
internalCluster().startNode();
ensureGreen();
index("test1", "type", "1", "f", "f");
refresh(); // make the doc visible
@ -127,9 +127,9 @@ public class ClusterStatsTests extends ElasticsearchIntegrationTest {
@Test
public void testValuesSmokeScreen() {
cluster().ensureAtMostNumDataNodes(5);
cluster().ensureAtLeastNumDataNodes(1);
SigarService sigarService = cluster().getInstance(SigarService.class);
internalCluster().ensureAtMostNumDataNodes(5);
internalCluster().ensureAtLeastNumDataNodes(1);
SigarService sigarService = internalCluster().getInstance(SigarService.class);
index("test1", "type", "1", "f", "f");
/*
* Ensure at least one shard is allocated otherwise the FS stats might

View File

@ -89,7 +89,7 @@ public class BenchmarkIntegrationTest extends ElasticsearchIntegrationTest {
public void beforeBenchmarkIntegrationTests() throws Exception {
waitForTestLatch = null;
waitForQuery = null;
numExecutorNodes = cluster().numBenchNodes();
numExecutorNodes = internalCluster().numBenchNodes();
competitionSettingsMap = new HashMap<>();
logger.info("--> indexing random data");
indices = randomData();
@ -166,7 +166,7 @@ public class BenchmarkIntegrationTest extends ElasticsearchIntegrationTest {
without doing busy waiting etc. This Script calls the two static latches above and this test
will not work if somebody messes around with them but it's much faster and less resource intensive / hardware
dependent to run massive benchmarks and do busy waiting. */
cluster(); // mark that we need a JVM local cluster!
internalCluster(); // mark that we need a JVM local cluster!
waitForQuery = new CountDownLatch(1);
waitForTestLatch = new CountDownLatch(1);
String className = "BenchmarkIntegrationTest";

View File

@ -44,7 +44,7 @@ public class BenchmarkNegativeTest extends ElasticsearchIntegrationTest {
@Test(expected = BenchmarkNodeMissingException.class)
public void testSubmitBenchmarkNegative() {
client().bench(BenchmarkTestUtil.randomRequest(
client(), new String[] {INDEX_NAME}, cluster().size(), null)).actionGet();
client(), new String[] {INDEX_NAME}, internalCluster().size(), null)).actionGet();
}
public void testListBenchmarkNegative() {

View File

@ -526,7 +526,7 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest {
assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
logger.info("--> verify that filter was updated");
AliasMetaData aliasMetaData = cluster().clusterService().state().metaData().aliases().get("alias1").get("test");
AliasMetaData aliasMetaData = internalCluster().clusterService().state().metaData().aliases().get("alias1").get("test");
assertThat(aliasMetaData.getFilter().toString(), equalTo("{\"term\":{\"name\":\"bar\"}}"));
logger.info("--> deleting alias1");

View File

@ -32,9 +32,9 @@ public class TransportClientTests extends ElasticsearchIntegrationTest {
@Test
public void testPickingUpChangesInDiscoveryNode() {
String nodeName = cluster().startNode(ImmutableSettings.builder().put("node.data", false));
String nodeName = internalCluster().startNode(ImmutableSettings.builder().put("node.data", false));
TransportClient client = (TransportClient) cluster().client(nodeName);
TransportClient client = (TransportClient) internalCluster().client(nodeName);
assertThat(client.connectedNodes().get(0).dataNode(), Matchers.equalTo(false));
}

View File

@ -58,8 +58,8 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
cluster().startNode(settings);
ClusterService clusterService1 = cluster().getInstance(ClusterService.class);
internalCluster().startNode(settings);
ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class);
final CountDownLatch block = new CountDownLatch(1);
clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
@ -113,8 +113,8 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
cluster().startNode(settings);
ClusterService clusterService = cluster().getInstance(ClusterService.class);
internalCluster().startNode(settings);
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
final AtomicBoolean ackTimeout = new AtomicBoolean(false);
@ -184,8 +184,8 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
cluster().startNode(settings);
ClusterService clusterService = cluster().getInstance(ClusterService.class);
internalCluster().startNode(settings);
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
final AtomicBoolean ackTimeout = new AtomicBoolean(false);
@ -255,8 +255,8 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
cluster().startNode(settings);
ClusterService clusterService = cluster().getInstance(ClusterService.class);
internalCluster().startNode(settings);
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
final AtomicBoolean ackTimeout = new AtomicBoolean(false);
@ -322,8 +322,8 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
cluster().startNode(settings);
ClusterService clusterService = cluster().getInstance(ClusterService.class);
internalCluster().startNode(settings);
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
final AtomicBoolean ackTimeout = new AtomicBoolean(false);
@ -392,11 +392,11 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest {
public void testPendingUpdateTask() throws Exception {
Settings zenSettings = settingsBuilder()
.put("discovery.type", "zen").build();
String node_0 = cluster().startNode(zenSettings);
cluster().startNodeClient(zenSettings);
String node_0 = internalCluster().startNode(zenSettings);
internalCluster().startNodeClient(zenSettings);
ClusterService clusterService = cluster().getInstance(ClusterService.class, node_0);
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node_0);
final CountDownLatch block1 = new CountDownLatch(1);
final CountDownLatch invoked1 = new CountDownLatch(1);
clusterService.submitStateUpdateTask("1", new ClusterStateUpdateTask() {
@ -444,7 +444,7 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest {
assertTrue(controlSources.isEmpty());
controlSources = new HashSet<>(Arrays.asList("2", "3", "4", "5", "6", "7", "8", "9", "10"));
PendingClusterTasksResponse response = cluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().execute().actionGet();
PendingClusterTasksResponse response = internalCluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().execute().actionGet();
assertThat(response.pendingTasks().size(), equalTo(9));
for (PendingClusterTask task : response) {
assertTrue(controlSources.remove(task.source().string()));
@ -455,7 +455,7 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest {
pendingClusterTasks = clusterService.pendingTasks();
assertThat(pendingClusterTasks, empty());
response = cluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().execute().actionGet();
response = internalCluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().execute().actionGet();
assertThat(response.pendingTasks(), empty());
final CountDownLatch block2 = new CountDownLatch(1);
@ -503,7 +503,7 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest {
}
assertTrue(controlSources.isEmpty());
response = cluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().execute().actionGet();
response = internalCluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().execute().actionGet();
assertThat(response.pendingTasks().size(), equalTo(4));
controlSources = new HashSet<>(Arrays.asList("2", "3", "4", "5"));
for (PendingClusterTask task : response) {
@ -524,18 +524,18 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest {
.put("plugin.types", TestPlugin.class.getName())
.build();
cluster().startNode(settings);
ClusterService clusterService1 = cluster().getInstance(ClusterService.class);
MasterAwareService testService1 = cluster().getInstance(MasterAwareService.class);
internalCluster().startNode(settings);
ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class);
MasterAwareService testService1 = internalCluster().getInstance(MasterAwareService.class);
// the first node should be a master as the minimum required is 1
assertThat(clusterService1.state().nodes().masterNode(), notNullValue());
assertThat(clusterService1.state().nodes().localNodeMaster(), is(true));
assertThat(testService1.master(), is(true));
String node_1 = cluster().startNode(settings);
final ClusterService clusterService2 = cluster().getInstance(ClusterService.class, node_1);
MasterAwareService testService2 = cluster().getInstance(MasterAwareService.class, node_1);
String node_1 = internalCluster().startNode(settings);
final ClusterService clusterService2 = internalCluster().getInstance(ClusterService.class, node_1);
MasterAwareService testService2 = internalCluster().getInstance(MasterAwareService.class, node_1);
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
@ -544,7 +544,7 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest {
assertThat(clusterService2.state().nodes().localNodeMaster(), is(false));
assertThat(testService2.master(), is(false));
cluster().stopCurrentMasterNode();
internalCluster().stopCurrentMasterNode();
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("1").execute().actionGet();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
@ -567,9 +567,9 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest {
assertThat(testService2.master(), is(false));
String node_2 = cluster().startNode(settings);
clusterService1 = cluster().getInstance(ClusterService.class, node_2);
testService1 = cluster().getInstance(MasterAwareService.class, node_2);
String node_2 = internalCluster().startNode(settings);
clusterService1 = internalCluster().getInstance(ClusterService.class, node_2);
testService1 = internalCluster().getInstance(MasterAwareService.class, node_2);
// make sure both nodes see each other otherwise the masternode below could be null if node 2 is master and node 1 did'r receive the updated cluster state...
assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).setWaitForNodes("2").execute().actionGet().isTimedOut(), is(false));
@ -594,8 +594,8 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
cluster().startNode(settings);
ClusterService clusterService = cluster().getInstance(ClusterService.class);
internalCluster().startNode(settings);
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
BlockingTask block = new BlockingTask();
clusterService.submitStateUpdateTask("test", Priority.IMMEDIATE, block);
int taskCount = randomIntBetween(5, 20);

View File

@ -55,14 +55,14 @@ public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
.build();
logger.info("--> start first node");
cluster().startNode(settings);
internalCluster().startNode(settings);
logger.info("--> should be blocked, no master...");
ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(true));
logger.info("--> start second node, cluster should be formed");
cluster().startNode(settings);
internalCluster().startNode(settings);
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
@ -92,7 +92,7 @@ public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
}
cluster().stopCurrentMasterNode();
internalCluster().stopCurrentMasterNode();
awaitBusy(new Predicate<Object>() {
public boolean apply(Object obj) {
ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
@ -103,7 +103,7 @@ public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(true));
logger.info("--> starting the previous master node again...");
cluster().startNode(settings);
internalCluster().startNode(settings);
clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes("2").execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
@ -128,7 +128,7 @@ public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
}
cluster().stopRandomNonMasterNode();
internalCluster().stopRandomNonMasterNode();
assertThat(awaitBusy(new Predicate<Object>() {
public boolean apply(Object obj) {
ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
@ -137,7 +137,7 @@ public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
}), equalTo(true));
logger.info("--> starting the previous master node again...");
cluster().startNode(settings);
internalCluster().startNode(settings);
clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setWaitForGreenStatus().execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
@ -172,8 +172,8 @@ public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
.build();
logger.info("--> start first 2 nodes");
cluster().startNode(settings);
cluster().startNode(settings);
internalCluster().startNode(settings);
internalCluster().startNode(settings);
ClusterState state;
@ -197,8 +197,8 @@ public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
assertThat(state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK), equalTo(true));
logger.info("--> start two more nodes");
cluster().startNode(settings);
cluster().startNode(settings);
internalCluster().startNode(settings);
internalCluster().startNode(settings);
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
@ -223,16 +223,16 @@ public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
}
cluster().stopRandomNonMasterNode();
cluster().stopRandomNonMasterNode();
internalCluster().stopRandomNonMasterNode();
internalCluster().stopRandomNonMasterNode();
logger.info("--> verify that there is no master anymore on remaining nodes");
// spin here to wait till the state is set
assertNoMasterBlockOnAllNodes();
logger.info("--> start back the 2 nodes ");
cluster().startNode(settings);
cluster().startNode(settings);
internalCluster().startNode(settings);
internalCluster().startNode(settings);
clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
@ -262,8 +262,8 @@ public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
.build();
logger.info("--> start 2 nodes");
cluster().startNode(settings);
cluster().startNode(settings);
internalCluster().startNode(settings);
internalCluster().startNode(settings);
// wait until second node join the cluster
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get();
@ -273,18 +273,18 @@ public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
setMinimumMasterNodes(2);
// make sure it has been processed on all nodes (master node spawns a secondary cluster state update task)
for (Client client : cluster()) {
for (Client client : internalCluster()) {
assertThat(client.admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).get().isTimedOut(),
equalTo(false));
}
logger.info("--> stopping a node");
cluster().stopRandomDataNode();
internalCluster().stopRandomDataNode();
logger.info("--> verifying min master node has effect");
assertNoMasterBlockOnAllNodes();
logger.info("--> bringing another node up");
cluster().startNode(settingsBuilder().put(settings).put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2).build());
internalCluster().startNode(settingsBuilder().put(settings).put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2).build());
clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
}
@ -293,7 +293,7 @@ public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
assertThat(awaitBusy(new Predicate<Object>() {
public boolean apply(Object obj) {
boolean success = true;
for (Client client : cluster()) {
for (Client client : internalCluster()) {
ClusterState state = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
success &= state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK);
if (logger.isDebugEnabled()) {

View File

@ -57,12 +57,12 @@ public class NoMasterNodeTests extends ElasticsearchIntegrationTest {
TimeValue timeout = TimeValue.timeValueMillis(200);
cluster().startNode(settings);
internalCluster().startNode(settings);
// start a second node, create an index, and then shut it down so we have no master block
cluster().startNode(settings);
internalCluster().startNode(settings);
createIndex("test");
client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
cluster().stopRandomDataNode();
internalCluster().stopRandomDataNode();
assertThat(awaitBusy(new Predicate<Object>() {
public boolean apply(Object o) {
ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
@ -128,7 +128,7 @@ public class NoMasterNodeTests extends ElasticsearchIntegrationTest {
assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
}
cluster().startNode(settings);
internalCluster().startNode(settings);
client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
}
}

View File

@ -64,7 +64,7 @@ public class SimpleClusterStateTests extends ElasticsearchIntegrationTest {
@Test
public void testNodes() throws Exception {
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().setNodes(true).get();
assertThat(clusterStateResponse.getState().nodes().nodes().size(), is(immutableCluster().size()));
assertThat(clusterStateResponse.getState().nodes().nodes().size(), is(cluster().size()));
ClusterStateResponse clusterStateResponseFiltered = client().admin().cluster().prepareState().clear().get();
assertThat(clusterStateResponseFiltered.getState().nodes().nodes().size(), is(0));

View File

@ -41,7 +41,7 @@ public class SimpleDataNodesTests extends ElasticsearchIntegrationTest {
@Test
public void testDataNodes() throws Exception {
cluster().startNode(settingsBuilder().put("node.data", false).build());
internalCluster().startNode(settingsBuilder().put("node.data", false).build());
client().admin().indices().create(createIndexRequest("test")).actionGet();
try {
client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")).timeout(timeValueSeconds(1))).actionGet();
@ -50,7 +50,7 @@ public class SimpleDataNodesTests extends ElasticsearchIntegrationTest {
// all is well
}
cluster().startNode(settingsBuilder().put("node.data", false).build());
internalCluster().startNode(settingsBuilder().put("node.data", false).build());
assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false));
// still no shard should be allocated
@ -62,7 +62,7 @@ public class SimpleDataNodesTests extends ElasticsearchIntegrationTest {
}
// now, start a node data, and see that it gets with shards
cluster().startNode(settingsBuilder().put("node.data", true).build());
internalCluster().startNode(settingsBuilder().put("node.data", true).build());
assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false));
IndexResponse indexResponse = client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();

View File

@ -44,7 +44,7 @@ public class SpecificMasterNodesTests extends ElasticsearchIntegrationTest {
@Test
public void simpleOnlyMasterNodeElection() {
logger.info("--> start data node / non master node");
cluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false).put("discovery.initial_state_timeout", "1s"));
internalCluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false).put("discovery.initial_state_timeout", "1s"));
try {
assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().masterNodeId(), nullValue());
fail("should not be able to find master");
@ -52,12 +52,12 @@ public class SpecificMasterNodesTests extends ElasticsearchIntegrationTest {
// all is well, no master elected
}
logger.info("--> start master node");
final String masterNodeName = cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
final String masterNodeName = internalCluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
logger.info("--> stop master node");
cluster().stopCurrentMasterNode();
internalCluster().stopCurrentMasterNode();
try {
assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().masterNodeId(), nullValue());
@ -67,15 +67,15 @@ public class SpecificMasterNodesTests extends ElasticsearchIntegrationTest {
}
logger.info("--> start master node");
final String nextMasterEligibleNodeName = cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligibleNodeName));
assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligibleNodeName));
final String nextMasterEligibleNodeName = internalCluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligibleNodeName));
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligibleNodeName));
}
@Test
public void electOnlyBetweenMasterNodes() {
logger.info("--> start data node / non master node");
cluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false).put("discovery.initial_state_timeout", "1s"));
internalCluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false).put("discovery.initial_state_timeout", "1s"));
try {
assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().masterNodeId(), nullValue());
fail("should not be able to find master");
@ -83,20 +83,20 @@ public class SpecificMasterNodesTests extends ElasticsearchIntegrationTest {
// all is well, no master elected
}
logger.info("--> start master node (1)");
final String masterNodeName = cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
final String masterNodeName = internalCluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
logger.info("--> start master node (2)");
final String nextMasterEligableNodeName = cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
final String nextMasterEligableNodeName = internalCluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
logger.info("--> closing master node (1)");
cluster().stopCurrentMasterNode();
assertThat(cluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
assertThat(cluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
internalCluster().stopCurrentMasterNode();
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
}
/**
@ -106,10 +106,10 @@ public class SpecificMasterNodesTests extends ElasticsearchIntegrationTest {
@Test
public void testCustomDefaultMapping() throws Exception {
logger.info("--> start master node / non data");
cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
internalCluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
logger.info("--> start data node / non master node");
cluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false));
internalCluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false));
createIndex("test");
assertAcked(client().admin().indices().preparePutMapping("test").setType("_default_").setSource("_timestamp", "enabled=true"));
@ -127,10 +127,10 @@ public class SpecificMasterNodesTests extends ElasticsearchIntegrationTest {
@Test
public void testAliasFilterValidation() throws Exception {
logger.info("--> start master node / non data");
cluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
internalCluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
logger.info("--> start data node / non master node");
cluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false));
internalCluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false));
assertAcked(prepareCreate("test").addMapping("type1", "{\"type1\" : {\"properties\" : {\"table_a\" : { \"type\" : \"nested\", \"properties\" : {\"field_a\" : { \"type\" : \"string\" },\"field_b\" :{ \"type\" : \"string\" }}}}}}"));
client().admin().indices().prepareAliases().addAlias("test", "a_test", FilterBuilders.nestedFilter("table_a", FilterBuilders.termFilter("table_a.field_b", "y"))).get();

View File

@ -39,7 +39,7 @@ public class UpdateSettingsValidationTests extends ElasticsearchIntegrationTest
@Test
public void testUpdateSettingsValidation() throws Exception {
List<String> nodes = cluster().startNodesAsync(
List<String> nodes = internalCluster().startNodesAsync(
settingsBuilder().put("node.data", false).build(),
settingsBuilder().put("node.master", false).build(),
settingsBuilder().put("node.master", false).build()

View File

@ -57,7 +57,7 @@ public class AckClusterUpdateSettingsTests extends ElasticsearchIntegrationTest
@Override
protected int minimumNumberOfShards() {
return immutableCluster().numDataNodes();
return cluster().numDataNodes();
}
@Override
@ -106,7 +106,7 @@ public class AckClusterUpdateSettingsTests extends ElasticsearchIntegrationTest
public void testClusterUpdateSettingsNoAcknowledgement() {
client().admin().indices().prepareCreate("test")
.setSettings(settingsBuilder()
.put("number_of_shards", between(immutableCluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
.put("number_of_shards", between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
.put("number_of_replicas", 0)).get();
ensureGreen();

View File

@ -179,7 +179,7 @@ public class AckTests extends ElasticsearchIntegrationTest {
public void testClusterRerouteAcknowledgement() throws InterruptedException {
assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder()
.put(indexSettings())
.put(SETTING_NUMBER_OF_SHARDS, between(immutableCluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
.put(SETTING_NUMBER_OF_SHARDS, between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
.put(SETTING_NUMBER_OF_REPLICAS, 0)
));
ensureGreen();
@ -214,7 +214,7 @@ public class AckTests extends ElasticsearchIntegrationTest {
public void testClusterRerouteNoAcknowledgement() throws InterruptedException {
client().admin().indices().prepareCreate("test")
.setSettings(settingsBuilder()
.put(SETTING_NUMBER_OF_SHARDS, between(immutableCluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
.put(SETTING_NUMBER_OF_SHARDS, between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
.put(SETTING_NUMBER_OF_REPLICAS, 0)).get();
ensureGreen();
@ -228,7 +228,7 @@ public class AckTests extends ElasticsearchIntegrationTest {
public void testClusterRerouteAcknowledgementDryRun() throws InterruptedException {
client().admin().indices().prepareCreate("test")
.setSettings(settingsBuilder()
.put(SETTING_NUMBER_OF_SHARDS, between(immutableCluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
.put(SETTING_NUMBER_OF_SHARDS, between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
.put(SETTING_NUMBER_OF_REPLICAS, 0)).get();
ensureGreen();
@ -261,7 +261,7 @@ public class AckTests extends ElasticsearchIntegrationTest {
public void testClusterRerouteNoAcknowledgementDryRun() throws InterruptedException {
client().admin().indices().prepareCreate("test")
.setSettings(settingsBuilder()
.put(SETTING_NUMBER_OF_SHARDS, between(immutableCluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
.put(SETTING_NUMBER_OF_SHARDS, between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
.put(SETTING_NUMBER_OF_REPLICAS, 0)).get();
ensureGreen();

View File

@ -64,7 +64,7 @@ public class AwarenessAllocationTests extends ElasticsearchIntegrationTest {
logger.info("--> starting 2 nodes on the same rack");
cluster().startNodesAsync(2, ImmutableSettings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_1").build()).get();
internalCluster().startNodesAsync(2, ImmutableSettings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_1").build()).get();
createIndex("test1");
createIndex("test2");
@ -77,7 +77,7 @@ public class AwarenessAllocationTests extends ElasticsearchIntegrationTest {
ensureGreen();
logger.info("--> starting 1 node on a different rack");
final String node3 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_2").build());
final String node3 = internalCluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_2").build());
// On slow machines the initial relocation might be delayed
assertThat(awaitBusy(new Predicate<Object>() {
@ -115,7 +115,7 @@ public class AwarenessAllocationTests extends ElasticsearchIntegrationTest {
.build();
logger.info("--> starting 4 nodes on different zones");
List<String> nodes = cluster().startNodesAsync(
List<String> nodes = internalCluster().startNodesAsync(
ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "a").build(),
ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build(),
ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build(),
@ -155,7 +155,7 @@ public class AwarenessAllocationTests extends ElasticsearchIntegrationTest {
.build();
logger.info("--> starting 2 nodes on zones 'a' & 'b'");
List<String> nodes = cluster().startNodesAsync(
List<String> nodes = internalCluster().startNodesAsync(
ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "a").build(),
ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build()
).get();
@ -180,7 +180,7 @@ public class AwarenessAllocationTests extends ElasticsearchIntegrationTest {
assertThat(counts.get(B_0), equalTo(5));
logger.info("--> starting another node in zone 'b'");
String B_1 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
String B_1 = internalCluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
client().admin().cluster().prepareReroute().get();
@ -202,7 +202,7 @@ public class AwarenessAllocationTests extends ElasticsearchIntegrationTest {
assertThat(counts.get(B_0), equalTo(3));
assertThat(counts.get(B_1), equalTo(2));
String noZoneNode = cluster().startNode();
String noZoneNode = internalCluster().startNode();
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
client().admin().cluster().prepareReroute().get();

View File

@ -76,7 +76,7 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest {
}
private void rerouteWithCommands(Settings commonSettings) throws Exception {
List<String> nodesIds = cluster().startNodesAsync(2, commonSettings).get();
List<String> nodesIds = internalCluster().startNodesAsync(2, commonSettings).get();
final String node_1 = nodesIds.get(0);
final String node_2 = nodesIds.get(1);
@ -157,9 +157,9 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest {
private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exception {
logger.info("--> starting 2 nodes");
String node_1 = cluster().startNode(commonSettings);
cluster().startNode(commonSettings);
assertThat(immutableCluster().size(), equalTo(2));
String node_1 = internalCluster().startNode(commonSettings);
internalCluster().startNode(commonSettings);
assertThat(cluster().size(), equalTo(2));
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
assertThat(healthResponse.isTimedOut(), equalTo(false));
@ -190,17 +190,17 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest {
client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).execute().actionGet();
logger.info("--> closing all nodes");
File[] shardLocation = cluster().getInstance(NodeEnvironment.class, node_1).shardLocations(new ShardId("test", 0));
File[] shardLocation = internalCluster().getInstance(NodeEnvironment.class, node_1).shardLocations(new ShardId("test", 0));
assertThat(FileSystemUtils.exists(shardLocation), equalTo(true)); // make sure the data is there!
cluster().closeNonSharedNodes(false); // don't wipe data directories the index needs to be there!
internalCluster().closeNonSharedNodes(false); // don't wipe data directories the index needs to be there!
logger.info("--> deleting the shard data [{}] ", Arrays.toString(shardLocation));
assertThat(FileSystemUtils.exists(shardLocation), equalTo(true)); // verify again after cluster was shut down
assertThat(FileSystemUtils.deleteRecursively(shardLocation), equalTo(true));
logger.info("--> starting nodes back, will not allocate the shard since it has no data, but the index will be there");
node_1 = cluster().startNode(commonSettings);
cluster().startNode(commonSettings);
node_1 = internalCluster().startNode(commonSettings);
internalCluster().startNode(commonSettings);
// wait a bit for the cluster to realize that the shard is not there...
// TODO can we get around this? the cluster is RED, so what do we wait for?
client().admin().cluster().prepareReroute().get();
@ -228,9 +228,9 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest {
Settings commonSettings = settingsBuilder().build();
logger.info("--> starting a node");
String node_1 = cluster().startNode(commonSettings);
String node_1 = internalCluster().startNode(commonSettings);
assertThat(immutableCluster().size(), equalTo(1));
assertThat(cluster().size(), equalTo(1));
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("1").execute().actionGet();
assertThat(healthResponse.isTimedOut(), equalTo(false));
@ -248,8 +248,8 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest {
client().admin().cluster().prepareUpdateSettings().setTransientSettings(newSettings).execute().actionGet();
logger.info("--> starting a second node");
String node_2 = cluster().startNode(commonSettings);
assertThat(immutableCluster().size(), equalTo(2));
String node_2 = internalCluster().startNode(commonSettings);
assertThat(cluster().size(), equalTo(2));
healthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
assertThat(healthResponse.isTimedOut(), equalTo(false));

View File

@ -45,10 +45,10 @@ public class FilteringAllocationTests extends ElasticsearchIntegrationTest {
@Test
public void testDecommissionNodeNoReplicas() throws Exception {
logger.info("--> starting 2 nodes");
List<String> nodesIds = cluster().startNodesAsync(2).get();
List<String> nodesIds = internalCluster().startNodesAsync(2).get();
final String node_0 = nodesIds.get(0);
final String node_1 = nodesIds.get(1);
assertThat(immutableCluster().size(), equalTo(2));
assertThat(cluster().size(), equalTo(2));
logger.info("--> creating an index with no replicas");
client().admin().indices().prepareCreate("test")
@ -85,10 +85,10 @@ public class FilteringAllocationTests extends ElasticsearchIntegrationTest {
@Test
public void testDisablingAllocationFiltering() throws Exception {
logger.info("--> starting 2 nodes");
List<String> nodesIds = cluster().startNodesAsync(2).get();
List<String> nodesIds = internalCluster().startNodesAsync(2).get();
final String node_0 = nodesIds.get(0);
final String node_1 = nodesIds.get(1);
assertThat(immutableCluster().size(), equalTo(2));
assertThat(cluster().size(), equalTo(2));
logger.info("--> creating an index with no replicas");
client().admin().indices().prepareCreate("test")

View File

@ -57,11 +57,11 @@ public class ShardsAllocatorModuleTests extends ElasticsearchIntegrationTest {
}
private void assertAllocatorInstance(Settings settings, Class<? extends ShardsAllocator> clazz) {
while (immutableCluster().size() != 0) {
cluster().stopRandomDataNode();
while (cluster().size() != 0) {
internalCluster().stopRandomDataNode();
}
cluster().startNode(settings);
ShardsAllocator instance = cluster().getInstance(ShardsAllocator.class);
internalCluster().startNode(settings);
ShardsAllocator instance = internalCluster().getInstance(ShardsAllocator.class);
assertThat(instance, instanceOf(clazz));
}
}

View File

@ -110,7 +110,7 @@ public class ClusterSettingsTests extends ElasticsearchIntegrationTest {
@Test
public void testUpdateDiscoveryPublishTimeout() {
DiscoverySettings discoverySettings = cluster().getInstance(DiscoverySettings.class);
DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class);
assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.DEFAULT_PUBLISH_TIMEOUT));

View File

@ -55,7 +55,7 @@ public class DiscoveryWithNetworkFailuresTests extends ElasticsearchIntegrationT
.put("discovery.zen.fd.ping_timeout", "1s") // <-- for hitting simulated network failures quickly
.put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName())
.build();
List<String>nodes = cluster().startNodesAsync(3, settings).get();
List<String>nodes = internalCluster().startNodesAsync(3, settings).get();
// Wait until a green status has been reaches and 3 nodes are part of the cluster
List<String> nodesList = Arrays.asList(nodes.toArray(new String[3]));
@ -68,7 +68,7 @@ public class DiscoveryWithNetworkFailuresTests extends ElasticsearchIntegrationT
// Figure out what is the elected master node
DiscoveryNode masterDiscoNode = null;
for (String node : nodesList) {
ClusterState state = cluster().client(node).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
ClusterState state = internalCluster().client(node).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
assertThat(state.nodes().size(), equalTo(3));
if (masterDiscoNode == null) {
masterDiscoNode = state.nodes().masterNode();
@ -78,7 +78,7 @@ public class DiscoveryWithNetworkFailuresTests extends ElasticsearchIntegrationT
}
assert masterDiscoNode != null;
logger.info("---> legit elected master node=" + masterDiscoNode);
final Client masterClient = cluster().masterClient();
final Client masterClient = internalCluster().masterClient();
// Everything is stable now, it is now time to simulate evil...
@ -105,7 +105,7 @@ public class DiscoveryWithNetworkFailuresTests extends ElasticsearchIntegrationT
// The unlucky node must report *no* master node, since it can't connect to master and in fact it should
// continuously ping until network failures have been resolved.
Client isolatedNodeClient = cluster().client(unluckyNode);
Client isolatedNodeClient = internalCluster().client(unluckyNode);
ClusterState localClusterState = isolatedNodeClient.admin().cluster().prepareState().setLocal(true).get().getState();
DiscoveryNodes localDiscoveryNodes = localClusterState.nodes();
assertThat(localDiscoveryNodes.masterNode(), nullValue());
@ -124,7 +124,7 @@ public class DiscoveryWithNetworkFailuresTests extends ElasticsearchIntegrationT
assertThat(clusterHealthResponse.isTimedOut(), is(false));
for (String node : nodesList) {
ClusterState state = cluster().client(node).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
ClusterState state = internalCluster().client(node).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
assertThat(state.nodes().size(), equalTo(3));
// The elected master shouldn't have changed, since the unlucky node never could have elected himself as
// master since m_m_n of 2 could never be satisfied.
@ -133,13 +133,13 @@ public class DiscoveryWithNetworkFailuresTests extends ElasticsearchIntegrationT
}
private void addFailToSendNoConnectRule(String fromNode, String toNode) {
TransportService mockTransportService = cluster().getInstance(TransportService.class, fromNode);
((MockTransportService) mockTransportService).addFailToSendNoConnectRule(cluster().getInstance(Discovery.class, toNode).localNode());
TransportService mockTransportService = internalCluster().getInstance(TransportService.class, fromNode);
((MockTransportService) mockTransportService).addFailToSendNoConnectRule(internalCluster().getInstance(Discovery.class, toNode).localNode());
}
private void clearNoConnectRule(String fromNode, String toNode) {
TransportService mockTransportService = cluster().getInstance(TransportService.class, fromNode);
((MockTransportService) mockTransportService).clearRule(cluster().getInstance(Discovery.class, toNode).localNode());
TransportService mockTransportService = internalCluster().getInstance(TransportService.class, fromNode);
((MockTransportService) mockTransportService).clearRule(internalCluster().getInstance(Discovery.class, toNode).localNode());
}
}

View File

@ -49,14 +49,14 @@ public class ZenUnicastDiscoveryTestsMinimumMasterNodes extends ElasticsearchInt
.put("transport.tcp.port", "25400-25500") // Need to use custom tcp port range otherwise we collide with the shared cluster
.build();
List<String> nodes = cluster().startNodesAsync(3, settings).get();
List<String> nodes = internalCluster().startNodesAsync(3, settings).get();
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
DiscoveryNode masterDiscoNode = null;
for (String node : nodes.toArray(new String[3])) {
ClusterState state = cluster().client(node).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
ClusterState state = internalCluster().client(node).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
assertThat(state.nodes().size(), equalTo(3));
if (masterDiscoNode == null) {
masterDiscoNode = state.nodes().masterNode();

View File

@ -56,14 +56,14 @@ public class ZenUnicastDiscoveryTestsSpecificNodes extends ElasticsearchIntegrat
.put("discovery.zen.ping.unicast.hosts", "localhost:15300,localhost:15301,localhost:15302")
.put("transport.tcp.port", "15300-15400")
.build();
List<String> nodes = cluster().startNodesAsync(3, settings).get();
List<String> nodes = internalCluster().startNodesAsync(3, settings).get();
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
DiscoveryNode masterDiscoNode = null;
for (String node : nodes.toArray(new String[3])) {
ClusterState state = cluster().client(node).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
ClusterState state = internalCluster().client(node).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
assertThat(state.nodes().size(), equalTo(3));
if (masterDiscoNode == null) {
masterDiscoNode = state.nodes().masterNode();

View File

@ -348,7 +348,7 @@ public class BulkTests extends ElasticsearchIntegrationTest {
int replica = randomInt(2);
cluster().ensureAtLeastNumDataNodes(1 + replica);
internalCluster().ensureAtLeastNumDataNodes(1 + replica);
assertAcked(prepareCreate("test").setSettings(
ImmutableSettings.builder()

View File

@ -52,7 +52,7 @@ import static org.hamcrest.Matchers.nullValue;
public class DocumentActionsTests extends ElasticsearchIntegrationTest {
protected void createIndex() {
immutableCluster().wipeIndices(getConcreteIndexName());
cluster().wipeIndices(getConcreteIndexName());
createIndex(getConcreteIndexName());
}

View File

@ -37,8 +37,8 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.gateway.Gateway;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
import org.elasticsearch.test.TestCluster;
import org.elasticsearch.test.TestCluster.RestartCallback;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.InternalTestCluster.RestartCallback;
import org.junit.Test;
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
@ -60,7 +60,7 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
public void testMappingMetaDataParsed() throws Exception {
logger.info("--> starting 1 nodes");
cluster().startNode(settingsBuilder().put("gateway.type", "local"));
internalCluster().startNode(settingsBuilder().put("gateway.type", "local"));
logger.info("--> creating test index, with meta routing");
client().admin().indices().prepareCreate("test")
@ -80,7 +80,7 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
assertThat(mappingMd.routing().required(), equalTo(true));
logger.info("--> restarting nodes...");
cluster().fullRestart();
internalCluster().fullRestart();
logger.info("--> waiting for yellow status");
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(5).setWaitForYellowStatus().execute().actionGet();
@ -99,7 +99,7 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
public void testSimpleOpenClose() throws Exception {
logger.info("--> starting 2 nodes");
cluster().startNodesAsync(2, settingsBuilder().put("gateway.type", "local").build()).get();
internalCluster().startNodesAsync(2, settingsBuilder().put("gateway.type", "local").build()).get();
logger.info("--> creating test index");
createIndex("test");
@ -169,7 +169,7 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
logger.info("--> restarting nodes...");
cluster().fullRestart();
internalCluster().fullRestart();
logger.info("--> waiting for two nodes and green status");
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
@ -211,16 +211,16 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
logger.info("--> cleaning nodes");
logger.info("--> starting 1 master node non data");
cluster().startNode(settingsBuilder().put("node.data", false).put("gateway.type", "local").build());
internalCluster().startNode(settingsBuilder().put("node.data", false).put("gateway.type", "local").build());
logger.info("--> create an index");
client().admin().indices().prepareCreate("test").execute().actionGet();
logger.info("--> closing master node");
cluster().closeNonSharedNodes(false);
internalCluster().closeNonSharedNodes(false);
logger.info("--> starting 1 master node non data again");
cluster().startNode(settingsBuilder().put("node.data", false).put("gateway.type", "local").build());
internalCluster().startNode(settingsBuilder().put("node.data", false).put("gateway.type", "local").build());
logger.info("--> waiting for test index to be created");
ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setIndices("test").execute().actionGet();
@ -236,8 +236,8 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
logger.info("--> cleaning nodes");
logger.info("--> starting 1 master node non data");
cluster().startNode(settingsBuilder().put("node.data", false).put("gateway.type", "local").build());
cluster().startNode(settingsBuilder().put("node.master", false).put("gateway.type", "local").build());
internalCluster().startNode(settingsBuilder().put("node.data", false).put("gateway.type", "local").build());
internalCluster().startNode(settingsBuilder().put("node.master", false).put("gateway.type", "local").build());
logger.info("--> create an index");
client().admin().indices().prepareCreate("test").execute().actionGet();
@ -254,8 +254,8 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
logger.info("--> cleaning nodes");
logger.info("--> starting 2 nodes");
cluster().startNode(settingsBuilder().put("gateway.type", "local").build());
cluster().startNode(settingsBuilder().put("gateway.type", "local").build());
internalCluster().startNode(settingsBuilder().put("gateway.type", "local").build());
internalCluster().startNode(settingsBuilder().put("gateway.type", "local").build());
logger.info("--> indexing a simple document");
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
@ -297,8 +297,8 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
.put("gateway.type", "local").put("gateway.local.auto_import_dangled", "yes")
.build();
logger.info("--> starting two nodes");
final String node_1 = cluster().startNode(settings);
cluster().startNode(settings);
final String node_1 = internalCluster().startNode(settings);
internalCluster().startNode(settings);
logger.info("--> indexing a simple document");
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
@ -314,8 +314,8 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
logger.info("--> restarting the nodes");
final Gateway gateway1 = cluster().getInstance(Gateway.class, node_1);
cluster().fullRestart(new RestartCallback() {
final Gateway gateway1 = internalCluster().getInstance(Gateway.class, node_1);
internalCluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
if (node_1.equals(nodeName)) {
@ -356,8 +356,8 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
logger.info("--> starting two nodes");
final String node_1 = cluster().startNode(settings);
cluster().startNode(settings);
final String node_1 = internalCluster().startNode(settings);
internalCluster().startNode(settings);
logger.info("--> indexing a simple document");
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
@ -373,8 +373,8 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
logger.info("--> restarting the nodes");
final Gateway gateway1 = cluster().getInstance(Gateway.class, node_1);
cluster().fullRestart(new RestartCallback() {
final Gateway gateway1 = internalCluster().getInstance(Gateway.class, node_1);
internalCluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
if (node_1.equals(nodeName)) {
@ -421,8 +421,8 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
.put("gateway.type", "local").put("gateway.local.auto_import_dangled", "no")
.build();
logger.info("--> starting two nodes");
final String node_1 = cluster().startNode(settings);
cluster().startNode(settings);
final String node_1 = internalCluster().startNode(settings);
internalCluster().startNode(settings);
logger.info("--> indexing a simple document");
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
@ -438,8 +438,8 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
logger.info("--> restarting the nodes");
final Gateway gateway1 = cluster().getInstance(Gateway.class, node_1);
cluster().fullRestart(new RestartCallback() {
final Gateway gateway1 = internalCluster().getInstance(Gateway.class, node_1);
internalCluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
@ -463,7 +463,7 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(false));
logger.info("--> restart start the nodes, but make sure we do recovery only after we have 2 nodes in the cluster");
cluster().fullRestart(new RestartCallback() {
internalCluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
return settingsBuilder().put("gateway.recover_after_nodes", 2).build();
@ -487,8 +487,8 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
.build();
logger.info("--> starting two nodes");
final String node_1 = cluster().startNode(settings);
cluster().startNode(settings);
final String node_1 = internalCluster().startNode(settings);
internalCluster().startNode(settings);
logger.info("--> indexing a simple document");
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
@ -503,8 +503,8 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
}
logger.info("--> restarting the nodes");
final Gateway gateway1 = cluster().getInstance(Gateway.class, node_1);
cluster().fullRestart(new RestartCallback() {
final Gateway gateway1 = internalCluster().getInstance(Gateway.class, node_1);
internalCluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
@ -524,7 +524,7 @@ public class LocalGatewayIndexStateTests extends ElasticsearchIntegrationTest {
assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(false));
logger.info("--> close the first node, so we remain with the second that has the dangling index");
cluster().stopRandomNode(TestCluster.nameFilter(node_1));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_1));
logger.info("--> index a different doc");
client().prepareIndex("test", "type1", "2").setSource("field1", "value2").setRefresh(true).execute().actionGet();

View File

@ -28,7 +28,7 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
import org.elasticsearch.test.TestCluster.RestartCallback;
import org.elasticsearch.test.InternalTestCluster.RestartCallback;
import org.junit.Test;
import java.util.concurrent.TimeUnit;
@ -57,7 +57,7 @@ public class QuorumLocalGatewayTests extends ElasticsearchIntegrationTest {
@Slow
public void testChangeInitialShardsRecovery() throws Exception {
logger.info("--> starting 3 nodes");
final String[] nodes = cluster().startNodesAsync(3, settingsBuilder().put("gateway.type", "local").build()).get().toArray(new String[0]);
final String[] nodes = internalCluster().startNodesAsync(3, settingsBuilder().put("gateway.type", "local").build()).get().toArray(new String[0]);
createIndex("test");
ensureGreen();
@ -77,7 +77,7 @@ public class QuorumLocalGatewayTests extends ElasticsearchIntegrationTest {
final String nodeToRemove = nodes[between(0,2)];
logger.info("--> restarting 1 nodes -- kill 2");
cluster().fullRestart(new RestartCallback() {
internalCluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
return settingsBuilder().put("gateway.type", "local").build();
@ -97,10 +97,10 @@ public class QuorumLocalGatewayTests extends ElasticsearchIntegrationTest {
assertThat(awaitBusy(new Predicate<Object>() {
@Override
public boolean apply(Object input) {
ClusterStateResponse clusterStateResponse = cluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get();
ClusterStateResponse clusterStateResponse = internalCluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get();
return clusterStateResponse.getState() != null && clusterStateResponse.getState().routingTable().index("test") != null;
}}), equalTo(true)); // wait until we get a cluster state - could be null if we quick enough.
final ClusterStateResponse clusterStateResponse = cluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get();
final ClusterStateResponse clusterStateResponse = internalCluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get();
assertThat(clusterStateResponse.getState(), notNullValue());
assertThat(clusterStateResponse.getState().routingTable().index("test"), notNullValue());
assertThat(clusterStateResponse.getState().routingTable().index("test").allPrimaryShardsActive(), is(false));
@ -123,7 +123,7 @@ public class QuorumLocalGatewayTests extends ElasticsearchIntegrationTest {
public void testQuorumRecovery() throws Exception {
logger.info("--> starting 3 nodes");
cluster().startNodesAsync(3, settingsBuilder().put("gateway.type", "local").build()).get();
internalCluster().startNodesAsync(3, settingsBuilder().put("gateway.type", "local").build()).get();
// we are shutting down nodes - make sure we don't have 2 clusters if we test network
setMinimumMasterNodes(2);
@ -143,7 +143,7 @@ public class QuorumLocalGatewayTests extends ElasticsearchIntegrationTest {
assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2l);
}
logger.info("--> restart all nodes");
cluster().fullRestart(new RestartCallback() {
internalCluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
return null;

View File

@ -34,7 +34,7 @@ import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
import org.elasticsearch.test.TestCluster.RestartCallback;
import org.elasticsearch.test.InternalTestCluster.RestartCallback;
import org.elasticsearch.test.store.MockDirectoryHelper;
import org.junit.Test;
@ -63,7 +63,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
@Slow
public void testX() throws Exception {
cluster().startNode(settingsBuilder().build());
internalCluster().startNode(settingsBuilder().build());
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
.startObject("properties").startObject("appAccountIds").field("type", "string").endObject().endObject()
@ -88,7 +88,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
refresh();
assertHitCount(client().prepareCount().setQuery(termQuery("appAccountIds", 179)).execute().actionGet(), 2);
cluster().fullRestart();
internalCluster().fullRestart();
logger.info("Running Cluster Health (wait for the shards to startup)");
ensureYellow();
@ -96,7 +96,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
client().admin().indices().prepareRefresh().execute().actionGet();
assertHitCount(client().prepareCount().setQuery(termQuery("appAccountIds", 179)).execute().actionGet(), 2);
cluster().fullRestart();
internalCluster().fullRestart();
logger.info("Running Cluster Health (wait for the shards to startup)");
ensureYellow();
@ -109,7 +109,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
@Slow
public void testSingleNodeNoFlush() throws Exception {
cluster().startNode(settingsBuilder().build());
internalCluster().startNode(settingsBuilder().build());
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
.startObject("properties").startObject("field").field("type", "string").endObject().startObject("num").field("type", "integer").endObject().endObject()
@ -166,7 +166,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
logger.info("Ensure all primaries have been started");
ensureYellow();
}
cluster().fullRestart();
internalCluster().fullRestart();
logger.info("Running Cluster Health (wait for the shards to startup)");
ensureYellow();
@ -178,7 +178,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
assertHitCount(client().prepareCount().setQuery(termQuery("num", 179)).get(), value1Docs);
}
cluster().fullRestart();
internalCluster().fullRestart();
logger.info("Running Cluster Health (wait for the shards to startup)");
@ -197,7 +197,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
@Slow
public void testSingleNodeWithFlush() throws Exception {
cluster().startNode(settingsBuilder().build());
internalCluster().startNode(settingsBuilder().build());
client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
flush();
client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute().actionGet();
@ -205,7 +205,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
cluster().fullRestart();
internalCluster().fullRestart();
logger.info("Running Cluster Health (wait for the shards to startup)");
ensureYellow();
@ -214,7 +214,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
}
cluster().fullRestart();
internalCluster().fullRestart();
logger.info("Running Cluster Health (wait for the shards to startup)");
ensureYellow();
@ -228,8 +228,8 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
@Slow
public void testTwoNodeFirstNodeCleared() throws Exception {
final String firstNode = cluster().startNode(settingsBuilder().build());
cluster().startNode(settingsBuilder().build());
final String firstNode = internalCluster().startNode(settingsBuilder().build());
internalCluster().startNode(settingsBuilder().build());
client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
flush();
@ -243,7 +243,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
}
cluster().fullRestart(new RestartCallback() {
internalCluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
return settingsBuilder().put("gateway.recover_after_nodes", 2).build();
@ -268,7 +268,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
@Slow
public void testLatestVersionLoaded() throws Exception {
// clean two nodes
cluster().startNodesAsync(2, settingsBuilder().put("gateway.recover_after_nodes", 2).build()).get();
internalCluster().startNodesAsync(2, settingsBuilder().put("gateway.recover_after_nodes", 2).build()).get();
client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
client().admin().indices().prepareFlush().execute().actionGet();
@ -286,7 +286,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
assertThat(metaDataUuid, not(equalTo("_na_")));
logger.info("--> closing first node, and indexing more data to the second node");
cluster().fullRestart(new RestartCallback() {
internalCluster().fullRestart(new RestartCallback() {
@Override
public void doAfterNodes(int numNodes, Client client) throws Exception {
@ -344,7 +344,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
.put(MockDirectoryHelper.CRASH_INDEX, false)
.put(BalancedShardsAllocator.SETTING_THRESHOLD, 1.1f); // use less agressive settings
cluster().startNodesAsync(4, settings.build()).get();
internalCluster().startNodesAsync(4, settings.build()).get();
logger.info("--> indexing docs");
for (int i = 0; i < 1000; i++) {
@ -361,7 +361,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
logger.info("--> shutting down the nodes");
// Disable allocations while we are closing nodes
client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)).execute().actionGet();
cluster().fullRestart();
internalCluster().fullRestart();
logger.info("Running Cluster Health");
ensureGreen();
@ -369,7 +369,7 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
logger.info("--> shutting down the nodes");
// Disable allocations while we are closing nodes
client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)).execute().actionGet();
cluster().fullRestart();
internalCluster().fullRestart();
logger.info("Running Cluster Health");
@ -393,15 +393,15 @@ public class SimpleRecoveryLocalGatewayTests extends ElasticsearchIntegrationTes
public void testRecoveryDifferentNodeOrderStartup() throws Exception {
// we need different data paths so we make sure we start the second node fresh
final String node_1 = cluster().startNode(settingsBuilder().put("path.data", "data/data1").build());
final String node_1 = internalCluster().startNode(settingsBuilder().put("path.data", "data/data1").build());
client().prepareIndex("test", "type1", "1").setSource("field", "value").execute().actionGet();
cluster().startNode(settingsBuilder().put("path.data", "data/data2").build());
internalCluster().startNode(settingsBuilder().put("path.data", "data/data2").build());
ensureGreen();
cluster().fullRestart(new RestartCallback() {
internalCluster().fullRestart(new RestartCallback() {
@Override
public boolean doRestart(String nodeName) {

View File

@ -55,8 +55,8 @@ public class RecoverAfterNodesTests extends ElasticsearchIntegrationTest {
}
public Client startNode(Settings.Builder settings) {
String name = cluster().startNode(settings);
return cluster().client(name);
String name = internalCluster().startNode(settings);
return internalCluster().client(name);
}
@Test

View File

@ -56,7 +56,7 @@ public class SuggestStatsTests extends ElasticsearchIntegrationTest {
public void testSimpleStats() throws Exception {
// clear all stats first
client().admin().indices().prepareStats().clear().execute().actionGet();
final int numNodes = immutableCluster().numDataNodes();
final int numNodes = cluster().numDataNodes();
assertThat(numNodes, greaterThanOrEqualTo(2));
final int shardsIdx1 = randomIntBetween(1, 10); // we make sure each node gets at least a single shard...
final int shardsIdx2 = Math.max(numNodes - shardsIdx1, randomIntBetween(1, 10));
@ -85,15 +85,15 @@ public class SuggestStatsTests extends ElasticsearchIntegrationTest {
long startTime = System.currentTimeMillis();
for (int i = 0; i < suggestAllIdx; i++) {
SuggestResponse suggestResponse = addSuggestions(cluster().clientNodeClient().prepareSuggest(), i).get();
SuggestResponse suggestResponse = addSuggestions(internalCluster().clientNodeClient().prepareSuggest(), i).get();
assertAllSuccessful(suggestResponse);
}
for (int i = 0; i < suggestIdx1; i++) {
SuggestResponse suggestResponse = addSuggestions(cluster().clientNodeClient().prepareSuggest("test1"), i).get();
SuggestResponse suggestResponse = addSuggestions(internalCluster().clientNodeClient().prepareSuggest("test1"), i).get();
assertAllSuccessful(suggestResponse);
}
for (int i = 0; i < suggestIdx2; i++) {
SuggestResponse suggestResponse = addSuggestions(cluster().clientNodeClient().prepareSuggest("test2"), i).get();
SuggestResponse suggestResponse = addSuggestions(internalCluster().clientNodeClient().prepareSuggest("test2"), i).get();
assertAllSuccessful(suggestResponse);
}
long endTime = System.currentTimeMillis();

View File

@ -86,7 +86,7 @@ public class IndexActionTests extends ElasticsearchIntegrationTest {
if (firstError != null) {
fail(firstError.getMessage());
}
cluster().wipeIndices("test");
internalCluster().wipeIndices("test");
}
}

View File

@ -36,7 +36,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
import org.elasticsearch.test.TestCluster;
import org.elasticsearch.test.InternalTestCluster;
import org.junit.Test;
import java.util.Set;
@ -78,7 +78,7 @@ public class IndexLifecycleActionTests extends ElasticsearchIntegrationTest {
// start one server
logger.info("Starting sever1");
final String server_1 = cluster().startNode(settings);
final String server_1 = internalCluster().startNode(settings);
final String node1 = getLocalNodeId(server_1);
logger.info("Creating index [test]");
@ -97,7 +97,7 @@ public class IndexLifecycleActionTests extends ElasticsearchIntegrationTest {
logger.info("Starting server2");
// start another server
String server_2 = cluster().startNode(settings);
String server_2 = internalCluster().startNode(settings);
// first wait for 2 nodes in the cluster
logger.info("Running Cluster Health");
@ -133,7 +133,7 @@ public class IndexLifecycleActionTests extends ElasticsearchIntegrationTest {
logger.info("Starting server3");
// start another server
String server_3 = cluster().startNode(settings);
String server_3 = internalCluster().startNode(settings);
// first wait for 3 nodes in the cluster
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3")).actionGet();
@ -178,7 +178,7 @@ public class IndexLifecycleActionTests extends ElasticsearchIntegrationTest {
logger.info("Closing server1");
// kill the first server
cluster().stopRandomNode(TestCluster.nameFilter(server_1));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(server_1));
// verify health
logger.info("Running Cluster Health");
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
@ -224,7 +224,7 @@ public class IndexLifecycleActionTests extends ElasticsearchIntegrationTest {
}
private String getLocalNodeId(String name) {
Discovery discovery = cluster().getInstance(Discovery.class, name);
Discovery discovery = internalCluster().getInstance(Discovery.class, name);
String nodeId = discovery.localNode().getId();
assertThat(nodeId, not(nullValue()));
return nodeId;
@ -243,7 +243,7 @@ public class IndexLifecycleActionTests extends ElasticsearchIntegrationTest {
// start one server
logger.info("Starting server1");
final String server_1 = cluster().startNode(settings);
final String server_1 = internalCluster().startNode(settings);
final String node1 = getLocalNodeId(server_1);
@ -267,7 +267,7 @@ public class IndexLifecycleActionTests extends ElasticsearchIntegrationTest {
// start another server
logger.info("Starting server2");
final String server_2 = cluster().startNode(settings);
final String server_2 = internalCluster().startNode(settings);
// first wait for 2 nodes in the cluster
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
@ -301,7 +301,7 @@ public class IndexLifecycleActionTests extends ElasticsearchIntegrationTest {
// start another server
logger.info("Starting server3");
final String server_3 = cluster().startNode();
final String server_3 = internalCluster().startNode();
// first wait for 3 nodes in the cluster
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3")).actionGet();
@ -342,7 +342,7 @@ public class IndexLifecycleActionTests extends ElasticsearchIntegrationTest {
logger.info("Closing server1");
// kill the first server
cluster().stopRandomNode(TestCluster.nameFilter(server_1));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(server_1));
logger.info("Running Cluster Health");
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();

View File

@ -50,10 +50,10 @@ public class IndicesLifecycleListenerTests extends ElasticsearchIntegrationTest
public void testIndexStateShardChanged() throws Throwable {
//start with a single node
String node1 = cluster().startNode();
String node1 = internalCluster().startNode();
IndexShardStateChangeListener stateChangeListenerNode1 = new IndexShardStateChangeListener();
//add a listener that keeps track of the shard state changes
cluster().getInstance(IndicesLifecycle.class, node1).addListener(stateChangeListenerNode1);
internalCluster().getInstance(IndicesLifecycle.class, node1).addListener(stateChangeListenerNode1);
//create an index
assertAcked(client().admin().indices().prepareCreate("test")
@ -68,10 +68,10 @@ public class IndicesLifecycleListenerTests extends ElasticsearchIntegrationTest
//disable allocation before starting a new node, as we need to register the listener first
assertAcked(client().admin().cluster().prepareUpdateSettings()
.setPersistentSettings(builder().put(CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)));
String node2 = cluster().startNode();
String node2 = internalCluster().startNode();
IndexShardStateChangeListener stateChangeListenerNode2 = new IndexShardStateChangeListener();
//add a listener that keeps track of the shard state changes
cluster().getInstance(IndicesLifecycle.class, node2).addListener(stateChangeListenerNode2);
internalCluster().getInstance(IndicesLifecycle.class, node2).addListener(stateChangeListenerNode2);
//re-enable allocation
assertAcked(client().admin().cluster().prepareUpdateSettings()
.setPersistentSettings(builder().put(CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, false)));

View File

@ -45,8 +45,8 @@ public class HunspellServiceTests extends ElasticsearchIntegrationTest {
.put("indices.analysis.hunspell.dictionary.ignore_case", true)
.build();
cluster().startNode(settings);
Dictionary dictionary = cluster().getInstance(HunspellService.class).getDictionary("en_US");
internalCluster().startNode(settings);
Dictionary dictionary = internalCluster().getInstance(HunspellService.class).getDictionary("en_US");
assertThat(dictionary, notNullValue());
assertIgnoreCase(true, dictionary);
}
@ -61,15 +61,15 @@ public class HunspellServiceTests extends ElasticsearchIntegrationTest {
.put("indices.analysis.hunspell.dictionary.en_US.ignore_case", false)
.build();
cluster().startNode(settings);
Dictionary dictionary = cluster().getInstance(HunspellService.class).getDictionary("en_US");
internalCluster().startNode(settings);
Dictionary dictionary = internalCluster().getInstance(HunspellService.class).getDictionary("en_US");
assertThat(dictionary, notNullValue());
assertIgnoreCase(false, dictionary);
// testing that dictionary specific settings override node level settings
dictionary = cluster().getInstance(HunspellService.class).getDictionary("en_US_custom");
dictionary = internalCluster().getInstance(HunspellService.class).getDictionary("en_US_custom");
assertThat(dictionary, notNullValue());
assertIgnoreCase(true, dictionary);
}
@ -80,8 +80,8 @@ public class HunspellServiceTests extends ElasticsearchIntegrationTest {
.put("indices.analysis.hunspell.dictionary.location", getResource("/indices/analyze/conf_dir/hunspell"))
.build();
cluster().startNode(settings);
Dictionary dictionary = cluster().getInstance(HunspellService.class).getDictionary("en_US");
internalCluster().startNode(settings);
Dictionary dictionary = internalCluster().getInstance(HunspellService.class).getDictionary("en_US");
assertThat(dictionary, notNullValue());
}

View File

@ -61,9 +61,9 @@ public class CacheTests extends ElasticsearchIntegrationTest {
SearchResponse searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), FilterBuilders.termFilter("field", "value").cacheKey("test_key"))).execute().actionGet();
assertThat(searchResponse.getHits().getHits().length, equalTo(1));
nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), cluster().hasFilterCache() ? greaterThan(0l) : is(0L));
assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), internalCluster().hasFilterCache() ? greaterThan(0l) : is(0L));
indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet();
assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), cluster().hasFilterCache() ? greaterThan(0l) : is(0L));
assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), internalCluster().hasFilterCache() ? greaterThan(0l) : is(0L));
client().admin().indices().prepareClearCache().setFilterKeys("test_key").execute().actionGet();
nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
@ -152,13 +152,13 @@ public class CacheTests extends ElasticsearchIntegrationTest {
nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true)
.execute().actionGet();
assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), cluster().hasFilterCache() ? greaterThan(0l) : is(0L));
assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), internalCluster().hasFilterCache() ? greaterThan(0l) : is(0L));
indicesStats = client().admin().indices().prepareStats("test")
.clear().setFieldData(true).setFilterCache(true)
.execute().actionGet();
assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), cluster().hasFilterCache() ? greaterThan(0l) : is(0L));
assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), internalCluster().hasFilterCache() ? greaterThan(0l) : is(0L));
client().admin().indices().prepareClearCache().execute().actionGet();
Thread.sleep(100); // Make sure the filter cache entries have been removed...

View File

@ -54,7 +54,7 @@ public class IndicesLeaksTests extends ElasticsearchIntegrationTest {
client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
IndicesService indicesService = cluster().getInstance(IndicesService.class);
IndicesService indicesService = internalCluster().getInstance(IndicesService.class);
IndexService indexService = indicesService.indexServiceSafe("test");
Injector indexInjector = indexService.injector();
IndexShard shard = indexService.shardSafe(0);

View File

@ -52,7 +52,7 @@ public class ConcurrentDynamicTemplateTests extends ElasticsearchIntegrationTest
int iters = scaledRandomIntBetween(5, 15);
for (int i = 0; i < iters; i++) {
immutableCluster().wipeIndices("test");
cluster().wipeIndices("test");
assertAcked(prepareCreate("test")
.addMapping(mappingType, mapping));
ensureYellow();

View File

@ -39,7 +39,7 @@ public class DedicatedMasterGetFieldMappingTests extends SimpleGetFieldMappingsT
Settings settings = settingsBuilder()
.put("node.data", false)
.build();
cluster().startNodesAsync(settings, ImmutableSettings.EMPTY).get();
internalCluster().startNodesAsync(settings, ImmutableSettings.EMPTY).get();
}
}

View File

@ -63,12 +63,12 @@ public class IndexRecoveryTests extends ElasticsearchIntegrationTest {
@Test
public void gatewayRecoveryTest() throws Exception {
logger.info("--> start nodes");
String node = cluster().startNode(settingsBuilder().put("gateway.type", "local"));
String node = internalCluster().startNode(settingsBuilder().put("gateway.type", "local"));
createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT);
logger.info("--> restarting cluster");
cluster().fullRestart();
internalCluster().fullRestart();
ensureGreen();
logger.info("--> request recoveries");
@ -94,12 +94,12 @@ public class IndexRecoveryTests extends ElasticsearchIntegrationTest {
@Test
public void gatewayRecoveryTestActiveOnly() throws Exception {
logger.info("--> start nodes");
cluster().startNode(settingsBuilder().put("gateway.type", "local"));
internalCluster().startNode(settingsBuilder().put("gateway.type", "local"));
createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT);
logger.info("--> restarting cluster");
cluster().fullRestart();
internalCluster().fullRestart();
ensureGreen();
logger.info("--> request recoveries");
@ -112,13 +112,13 @@ public class IndexRecoveryTests extends ElasticsearchIntegrationTest {
@Test
public void replicaRecoveryTest() throws Exception {
logger.info("--> start node A");
String nodeA = cluster().startNode(settingsBuilder().put("gateway.type", "local"));
String nodeA = internalCluster().startNode(settingsBuilder().put("gateway.type", "local"));
logger.info("--> create index on node: {}", nodeA);
createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT);
logger.info("--> start node B");
String nodeB = cluster().startNode(settingsBuilder().put("gateway.type", "local"));
String nodeB = internalCluster().startNode(settingsBuilder().put("gateway.type", "local"));
ensureGreen();
// force a shard recovery from nodeA to nodeB
@ -161,13 +161,13 @@ public class IndexRecoveryTests extends ElasticsearchIntegrationTest {
@Test
public void rerouteRecoveryTest() throws Exception {
logger.info("--> start node A");
String nodeA = cluster().startNode(settingsBuilder().put("gateway.type", "local"));
String nodeA = internalCluster().startNode(settingsBuilder().put("gateway.type", "local"));
logger.info("--> create index on node: {}", nodeA);
createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT);
logger.info("--> start node B");
String nodeB = cluster().startNode(settingsBuilder().put("gateway.type", "local"));
String nodeB = internalCluster().startNode(settingsBuilder().put("gateway.type", "local"));
ensureGreen();
logger.info("--> move shard from: {} to: {}", nodeA, nodeB);
@ -197,7 +197,7 @@ public class IndexRecoveryTests extends ElasticsearchIntegrationTest {
@Test
public void snapshotRecoveryTest() throws Exception {
logger.info("--> start node A");
String nodeA = cluster().startNode(settingsBuilder().put("gateway.type", "local"));
String nodeA = internalCluster().startNode(settingsBuilder().put("gateway.type", "local"));
logger.info("--> create repository");
assertAcked(client().admin().cluster().preparePutRepository(REPO_NAME)

View File

@ -119,7 +119,7 @@ public class UpdateNumberOfReplicasTests extends ElasticsearchIntegrationTest {
@Test
public void testAutoExpandNumberOfReplicas0ToData() {
cluster().ensureAtMostNumDataNodes(2);
internalCluster().ensureAtMostNumDataNodes(2);
logger.info("--> creating index test with auto expand replicas");
assertAcked(prepareCreate("test", 2, settingsBuilder().put("auto_expand_replicas", "0-all")));
@ -147,7 +147,7 @@ public class UpdateNumberOfReplicasTests extends ElasticsearchIntegrationTest {
assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 3));
logger.info("--> closing one node");
cluster().ensureAtMostNumDataNodes(2);
internalCluster().ensureAtMostNumDataNodes(2);
allowNodes("test", 2);
logger.info("--> running cluster health");
@ -160,7 +160,7 @@ public class UpdateNumberOfReplicasTests extends ElasticsearchIntegrationTest {
assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2));
logger.info("--> closing another node");
cluster().ensureAtMostNumDataNodes(1);
internalCluster().ensureAtMostNumDataNodes(1);
allowNodes("test", 1);
logger.info("--> running cluster health");
@ -176,7 +176,7 @@ public class UpdateNumberOfReplicasTests extends ElasticsearchIntegrationTest {
@Test
public void testAutoExpandNumberReplicas1ToData() {
logger.info("--> creating index test with auto expand replicas");
cluster().ensureAtMostNumDataNodes(2);
internalCluster().ensureAtMostNumDataNodes(2);
assertAcked(prepareCreate("test", 2, settingsBuilder().put("auto_expand_replicas", "1-all")));
NumShards numShards = getNumShards("test");
@ -203,7 +203,7 @@ public class UpdateNumberOfReplicasTests extends ElasticsearchIntegrationTest {
assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 3));
logger.info("--> closing one node");
cluster().ensureAtMostNumDataNodes(2);
internalCluster().ensureAtMostNumDataNodes(2);
allowNodes("test", 2);
logger.info("--> running cluster health");
@ -216,7 +216,7 @@ public class UpdateNumberOfReplicasTests extends ElasticsearchIntegrationTest {
assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2));
logger.info("--> closing another node");
cluster().ensureAtMostNumDataNodes(1);
internalCluster().ensureAtMostNumDataNodes(1);
allowNodes("test", 1);
logger.info("--> running cluster health");

View File

@ -26,7 +26,7 @@ import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
import org.elasticsearch.test.TestCluster;
import org.elasticsearch.test.InternalTestCluster;
import org.junit.Test;
import java.io.File;
@ -46,8 +46,8 @@ public class IndicesStoreTests extends ElasticsearchIntegrationTest {
@Test
public void shardsCleanup() throws Exception {
final String node_1 = cluster().startNode(SETTINGS);
final String node_2 = cluster().startNode(SETTINGS);
final String node_1 = internalCluster().startNode(SETTINGS);
final String node_2 = internalCluster().startNode(SETTINGS);
logger.info("--> creating index [test] with one shard and on replica");
client().admin().indices().create(createIndexRequest("test")
.settings(settingsBuilder().put("index.numberOfReplicas", 1).put("index.numberOfShards", 1))).actionGet();
@ -63,14 +63,14 @@ public class IndicesStoreTests extends ElasticsearchIntegrationTest {
assertThat(shardDirectory(node_2, "test", 0).exists(), equalTo(true));
logger.info("--> starting node server3");
String node_3 = cluster().startNode(SETTINGS);
String node_3 = internalCluster().startNode(SETTINGS);
logger.info("--> making sure that shard is not allocated on server3");
assertThat(waitForShardDeletion(node_3, "test", 0), equalTo(false));
File server2Shard = shardDirectory(node_2, "test", 0);
logger.info("--> stopping node node_2");
cluster().stopRandomNode(TestCluster.nameFilter(node_2));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_2));
assertThat(server2Shard.exists(), equalTo(true));
logger.info("--> running cluster_health");
@ -84,7 +84,7 @@ public class IndicesStoreTests extends ElasticsearchIntegrationTest {
assertThat(shardDirectory(node_3, "test", 0).exists(), equalTo(true));
logger.info("--> starting node node_4");
final String node_4 = cluster().startNode(SETTINGS);
final String node_4 = internalCluster().startNode(SETTINGS);
logger.info("--> running cluster_health");
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
@ -98,7 +98,7 @@ public class IndicesStoreTests extends ElasticsearchIntegrationTest {
}
private File shardDirectory(String server, String index, int shard) {
NodeEnvironment env = cluster().getInstance(NodeEnvironment.class, server);
NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server);
return env.shardLocations(new ShardId(index, shard))[0];
}

View File

@ -50,7 +50,7 @@ public class SimpleDistributorTests extends ElasticsearchIntegrationTest {
@Test
public void testDirectoryToString() throws IOException {
cluster().wipeTemplates(); // no random settings please
internalCluster().wipeTemplates(); // no random settings please
createIndexWithStoreType("test", "niofs", "least_used");
String storeString = getStoreDirectory("test", 0).toString();
logger.info(storeString);
@ -109,7 +109,7 @@ public class SimpleDistributorTests extends ElasticsearchIntegrationTest {
}
private void createIndexWithStoreType(String index, String storeType, String distributor) {
immutableCluster().wipeIndices(index);
cluster().wipeIndices(index);
client().admin().indices().prepareCreate(index)
.setSettings(settingsBuilder()
.put("index.store.distributor", distributor)
@ -122,7 +122,7 @@ public class SimpleDistributorTests extends ElasticsearchIntegrationTest {
}
private void createIndexWithoutRateLimitingStoreType(String index, String storeType, String distributor) {
immutableCluster().wipeIndices(index);
cluster().wipeIndices(index);
client().admin().indices().prepareCreate(index)
.setSettings(settingsBuilder()
.put("index.store.distributor", distributor)
@ -137,16 +137,16 @@ public class SimpleDistributorTests extends ElasticsearchIntegrationTest {
private File[] dataPaths() {
Set<String> nodes = cluster().nodesInclude("test");
Set<String> nodes = internalCluster().nodesInclude("test");
assertThat(nodes.isEmpty(), equalTo(false));
NodeEnvironment env = cluster().getInstance(NodeEnvironment.class, nodes.iterator().next());
NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, nodes.iterator().next());
return env.nodeDataLocations();
}
private Directory getStoreDirectory(String index, int shardId) {
Set<String> nodes = cluster().nodesInclude("test");
Set<String> nodes = internalCluster().nodesInclude("test");
assertThat(nodes.isEmpty(), equalTo(false));
IndicesService indicesService = cluster().getInstance(IndicesService.class, nodes.iterator().next());
IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodes.iterator().next());
InternalIndexShard indexShard = (InternalIndexShard) (indicesService.indexService(index).shard(shardId));
return indexShard.store().directory();
}

View File

@ -28,7 +28,7 @@ import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
import org.elasticsearch.test.TestCluster.RestartCallback;
import org.elasticsearch.test.InternalTestCluster.RestartCallback;
import org.hamcrest.Matchers;
import org.junit.Test;
@ -48,7 +48,7 @@ public class LocalGatewayIndicesWarmerTests extends ElasticsearchIntegrationTest
public void testStatePersistence() throws Exception {
logger.info("--> starting 1 nodes");
cluster().startNode(settingsBuilder().put("gateway.type", "local"));
internalCluster().startNode(settingsBuilder().put("gateway.type", "local"));
logger.info("--> putting two templates");
createIndex("test");
@ -89,7 +89,7 @@ public class LocalGatewayIndicesWarmerTests extends ElasticsearchIntegrationTest
assertThat(templateWarmers.entries().size(), equalTo(1));
logger.info("--> restarting the node");
cluster().fullRestart(new RestartCallback() {
internalCluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
return settingsBuilder().put("gateway.type", "local").build();
@ -127,7 +127,7 @@ public class LocalGatewayIndicesWarmerTests extends ElasticsearchIntegrationTest
assertThat(warmersMetaData.entries().size(), equalTo(1));
logger.info("--> restarting the node");
cluster().fullRestart(new RestartCallback() {
internalCluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
return settingsBuilder().put("gateway.type", "local").build();

View File

@ -136,7 +136,7 @@ public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest {
assertThat(mltResponse.getHits().getAt(0).id(), equalTo("2"));
logger.info("Running moreLikeThis on alias with node client");
mltResponse = cluster().clientNodeClient().moreLikeThis(moreLikeThisRequest("beta").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet();
mltResponse = internalCluster().clientNodeClient().moreLikeThis(moreLikeThisRequest("beta").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet();
assertHitCount(mltResponse, 1l);
assertThat(mltResponse.getHits().getAt(0).id(), equalTo("3"));

View File

@ -60,15 +60,15 @@ public class SimpleNodesInfoTests extends ElasticsearchIntegrationTest {
@Test
public void testNodesInfos() throws Exception {
List<String> nodesIds = cluster().startNodesAsync(2).get();
List<String> nodesIds = internalCluster().startNodesAsync(2).get();
final String node_1 = nodesIds.get(0);
final String node_2 = nodesIds.get(1);
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get();
logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
String server1NodeId = cluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId();
String server2NodeId = cluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId();
String server1NodeId = internalCluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId();
String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId();
logger.info("--> started nodes: " + server1NodeId + " and " + server2NodeId);
NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet();
@ -168,12 +168,12 @@ public class SimpleNodesInfoTests extends ElasticsearchIntegrationTest {
settings.putArray("plugin.types", pluginClassNames);
}
String nodeName = cluster().startNode(settings);
String nodeName = internalCluster().startNode(settings);
// We wait for a Green status
client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
return cluster().getInstance(ClusterService.class, nodeName).state().nodes().localNodeId();
return internalCluster().getInstance(ClusterService.class, nodeName).state().nodes().localNodeId();
}

View File

@ -296,7 +296,7 @@ public class PercolatorTests extends ElasticsearchIntegrationTest {
.setRefresh(true)
.execute().actionGet();
immutableCluster().wipeIndices("test");
cluster().wipeIndices("test");
createIndex("test");
ensureGreen();
@ -1582,7 +1582,7 @@ public class PercolatorTests extends ElasticsearchIntegrationTest {
awaitBusy(new Predicate<Object>() {
@Override
public boolean apply(Object o) {
for (Client client : cluster()) {
for (Client client : internalCluster()) {
GetMappingsResponse getMappingsResponse = client.admin().indices().prepareGetMappings("test1", "test2").get();
boolean hasPercolatorType = getMappingsResponse.getMappings().get("test1").containsKey(PercolatorService.TYPE_NAME);
if (!hasPercolatorType) {

View File

@ -71,7 +71,7 @@ public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
@Test
@Slow
public void testRestartNodePercolator1() throws Exception {
cluster().startNode();
internalCluster().startNode();
createIndex("test");
logger.info("--> register a query");
@ -91,7 +91,7 @@ public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
.get();
assertThat(percolate.getMatches(), arrayWithSize(1));
cluster().rollingRestart();
internalCluster().rollingRestart();
logger.info("Running Cluster Health (wait for the shards to startup)");
ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
@ -111,7 +111,7 @@ public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
@Test
@Slow
public void testRestartNodePercolator2() throws Exception {
cluster().startNode();
internalCluster().startNode();
createIndex("test");
logger.info("--> register a query");
@ -134,7 +134,7 @@ public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
assertMatchCount(percolate, 1l);
assertThat(percolate.getMatches(), arrayWithSize(1));
cluster().rollingRestart();
internalCluster().rollingRestart();
logger.info("Running Cluster Health (wait for the shards to startup)");
ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
@ -185,8 +185,8 @@ public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
@Slow
@TestLogging("index.percolator:TRACE,percolator:TRACE")
public void testLoadingPercolateQueriesDuringCloseAndOpen() throws Exception {
cluster().startNode();
cluster().startNode();
internalCluster().startNode();
internalCluster().startNode();
assertAcked(client().admin().indices().prepareCreate("test")
.setSettings(settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
@ -248,9 +248,9 @@ public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
// 3 nodes, 2 primary + 2 replicas per primary, so each node should have a copy of the data.
// We only start and stop nodes 2 and 3, so all requests should succeed and never be partial.
private void percolatorRecovery(final boolean multiPercolate) throws Exception {
cluster().startNode(settingsBuilder().put("node.stay", true));
cluster().startNode(settingsBuilder().put("node.stay", false));
cluster().startNode(settingsBuilder().put("node.stay", false));
internalCluster().startNode(settingsBuilder().put("node.stay", true));
internalCluster().startNode(settingsBuilder().put("node.stay", false));
internalCluster().startNode(settingsBuilder().put("node.stay", false));
ensureGreen();
client().admin().indices().prepareCreate("test")
.setSettings(settingsBuilder()
@ -260,7 +260,7 @@ public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
.get();
ensureGreen();
final Client client = cluster().client(new Predicate<Settings>() {
final Client client = internalCluster().client(new Predicate<Settings>() {
@Override
public boolean apply(Settings input) {
return input.getAsBoolean("node.stay", true);
@ -384,7 +384,7 @@ public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
try {
// 1 index, 2 primaries, 2 replicas per primary
for (int i = 0; i < 4; i++) {
cluster().stopRandomNode(nodePredicate);
internalCluster().stopRandomNode(nodePredicate);
client.admin().cluster().prepareHealth("test")
.setWaitForEvents(Priority.LANGUID)
.setTimeout(TimeValue.timeValueMinutes(2))
@ -392,7 +392,7 @@ public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
.setWaitForActiveShards(4) // 2 nodes, so 4 shards (2 primaries, 2 replicas)
.get();
assertThat(error.get(), nullValue());
cluster().stopRandomNode(nodePredicate);
internalCluster().stopRandomNode(nodePredicate);
client.admin().cluster().prepareHealth("test")
.setWaitForEvents(Priority.LANGUID)
.setTimeout(TimeValue.timeValueMinutes(2))
@ -400,7 +400,7 @@ public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
.setWaitForActiveShards(2) // 1 node, so 2 shards (2 primaries, 0 replicas)
.get();
assertThat(error.get(), nullValue());
cluster().startNode();
internalCluster().startNode();
client.admin().cluster().prepareHealth("test")
.setWaitForEvents(Priority.LANGUID)
.setTimeout(TimeValue.timeValueMinutes(2))
@ -408,7 +408,7 @@ public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
.setWaitForActiveShards(4) // 2 nodes, so 4 shards (2 primaries, 2 replicas)
.get();
assertThat(error.get(), nullValue());
cluster().startNode();
internalCluster().startNode();
client.admin().cluster().prepareHealth("test")
.setWaitForEvents(Priority.LANGUID)
.setTimeout(TimeValue.timeValueMinutes(2))

View File

@ -203,7 +203,7 @@ public class TTLPercolatorTests extends ElasticsearchIntegrationTest {
return indicesStatsResponse.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount() != 0;
}
}, 5, TimeUnit.SECONDS), equalTo(true));
cluster().wipeIndices("test");
internalCluster().wipeIndices("test");
client().admin().indices().prepareCreate("test")
.addMapping("type1", typeMapping)
.execute().actionGet();

View File

@ -78,7 +78,7 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest {
URI uri = URI.create(PluginManagerTests.class.getResource("plugin_single_folder.zip").toString());
downloadAndExtract(pluginName, "file://" + uri.getPath());
cluster().startNode(SETTINGS);
internalCluster().startNode(SETTINGS);
assertPluginLoaded(pluginName);
assertPluginAvailable(pluginName);
@ -92,7 +92,7 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest {
URI uri = URI.create(PluginManagerTests.class.getResource("plugin_folder_site.zip").toString());
downloadAndExtract(pluginName, "file://" + uri.getPath());
String nodeName = cluster().startNode(SETTINGS);
String nodeName = internalCluster().startNode(SETTINGS);
assertPluginLoaded(pluginName);
assertPluginAvailable(pluginName);
@ -105,7 +105,7 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest {
URI uri = URI.create(PluginManagerTests.class.getResource("plugin_without_folders.zip").toString());
downloadAndExtract(pluginName, "file://" + uri.getPath());
cluster().startNode(SETTINGS);
internalCluster().startNode(SETTINGS);
assertPluginLoaded(pluginName);
assertPluginAvailable(pluginName);
@ -118,7 +118,7 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest {
URI uri = URI.create(PluginManagerTests.class.getResource("plugin_folder_file.zip").toString());
downloadAndExtract(pluginName, "file://" + uri.getPath());
cluster().startNode(SETTINGS);
internalCluster().startNode(SETTINGS);
assertPluginLoaded(pluginName);
assertPluginAvailable(pluginName);
@ -167,7 +167,7 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest {
}
private void assertPluginAvailable(String pluginName) throws InterruptedException {
HttpServerTransport httpServerTransport = cluster().getInstance(HttpServerTransport.class);
HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class);
final HttpClient httpClient = new HttpClient(httpServerTransport.boundAddress().publishAddress());
logger.info("--> tested http address [{}]", httpServerTransport.info().getAddress());
@ -179,7 +179,7 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest {
if (response.errorCode() != RestStatus.OK.getStatus()) {
// We want to trace what's going on here before failing the test
logger.info("--> error caught [{}], headers [{}]", response.errorCode(), response.getHeaders());
logger.info("--> cluster state [{}]", cluster().clusterService().state());
logger.info("--> cluster state [{}]", internalCluster().clusterService().state());
return false;
}
return true;

View File

@ -65,7 +65,7 @@ public class ResponseHeaderPluginTests extends ElasticsearchIntegrationTest {
}
private HttpClient httpClient() {
HttpServerTransport httpServerTransport = cluster().getDataNodeInstance(HttpServerTransport.class);
HttpServerTransport httpServerTransport = internalCluster().getDataNodeInstance(HttpServerTransport.class);
return new HttpClient(httpServerTransport.boundAddress().publishAddress());
}
}

View File

@ -57,7 +57,7 @@ public class SitePluginTests extends ElasticsearchIntegrationTest {
}
public HttpClient httpClient() {
HttpServerTransport httpServerTransport = cluster().getDataNodeInstance(HttpServerTransport.class);
HttpServerTransport httpServerTransport = internalCluster().getDataNodeInstance(HttpServerTransport.class);
return new HttpClient(httpServerTransport.boundAddress().publishAddress());
}

View File

@ -56,7 +56,7 @@ public class FullRollingRestartTests extends ElasticsearchIntegrationTest {
@Slow
@TestLogging("indices.cluster:TRACE,cluster.service:TRACE")
public void testFullRollingRestart() throws Exception {
cluster().startNode();
internalCluster().startNode();
createIndex("test");
for (int i = 0; i < 1000; i++) {
@ -70,13 +70,13 @@ public class FullRollingRestartTests extends ElasticsearchIntegrationTest {
}
// now start adding nodes
cluster().startNodesAsync(2).get();
internalCluster().startNodesAsync(2).get();
// make sure the cluster state is green, and all has been recovered
assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("3"));
// now start adding nodes
cluster().startNodesAsync(2).get();
internalCluster().startNodesAsync(2).get();
// We now have 5 nodes
setMinimumMasterNodes(3);
@ -90,14 +90,14 @@ public class FullRollingRestartTests extends ElasticsearchIntegrationTest {
}
// now start shutting nodes down
cluster().stopRandomDataNode();
internalCluster().stopRandomDataNode();
// make sure the cluster state is green, and all has been recovered
assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("4"));
// going down to 3 nodes. note that the min_master_node may not be in effect when we shutdown the 4th
// node, but that's OK as it is set to 3 before.
setMinimumMasterNodes(2);
cluster().stopRandomDataNode();
internalCluster().stopRandomDataNode();
// make sure the cluster state is green, and all has been recovered
assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("3"));
@ -107,13 +107,13 @@ public class FullRollingRestartTests extends ElasticsearchIntegrationTest {
}
// closing the 3rd node
cluster().stopRandomDataNode();
internalCluster().stopRandomDataNode();
// make sure the cluster state is green, and all has been recovered
assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("2"));
// closing the 2nd node
setMinimumMasterNodes(1);
cluster().stopRandomDataNode();
internalCluster().stopRandomDataNode();
// make sure the cluster state is green, and all has been recovered
assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForYellowStatus().setWaitForRelocatingShards(0).setWaitForNodes("1"));

View File

@ -53,7 +53,7 @@ public class RelocationTests extends ElasticsearchIntegrationTest {
@Test
public void testSimpleRelocationNoIndexing() {
logger.info("--> starting [node1] ...");
final String node_1 = cluster().startNode();
final String node_1 = internalCluster().startNode();
logger.info("--> creating test index ...");
client().admin().indices().prepareCreate("test")
@ -79,7 +79,7 @@ public class RelocationTests extends ElasticsearchIntegrationTest {
assertThat(client().prepareCount("test").execute().actionGet().getCount(), equalTo(20l));
logger.info("--> start another node");
final String node_2 = cluster().startNode();
final String node_2 = internalCluster().startNode();
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
@ -109,7 +109,7 @@ public class RelocationTests extends ElasticsearchIntegrationTest {
String[] nodes = new String[numberOfNodes];
logger.info("--> starting [node1] ...");
nodes[0] = cluster().startNode();
nodes[0] = internalCluster().startNode();
logger.info("--> creating test index ...");
client().admin().indices().prepareCreate("test")
@ -121,7 +121,7 @@ public class RelocationTests extends ElasticsearchIntegrationTest {
for (int i = 1; i < numberOfNodes; i++) {
logger.info("--> starting [node{}] ...", i + 1);
nodes[i] = cluster().startNode();
nodes[i] = internalCluster().startNode();
if (i != numberOfNodes - 1) {
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID)
.setWaitForNodes(Integer.toString(i + 1)).setWaitForGreenStatus().execute().actionGet();

View File

@ -153,7 +153,7 @@ public class RandomTests extends ElasticsearchIntegrationTest {
final int maxNumTerms = randomIntBetween(10, 5000);
final IntOpenHashSet valuesSet = new IntOpenHashSet();
immutableCluster().wipeIndices("idx");
cluster().wipeIndices("idx");
prepareCreate("idx")
.setSettings(ImmutableSettings.builder().put(InternalGlobalOrdinalsBuilder.ORDINAL_MAPPING_THRESHOLD_INDEX_SETTING_KEY, randomIntBetween(1, maxNumTerms)))
.addMapping("type", jsonBuilder().startObject()

View File

@ -114,7 +114,7 @@ public class DateHistogramTests extends ElasticsearchIntegrationTest {
@After
public void afterEachTest() throws IOException {
cluster().wipeIndices("idx2");
internalCluster().wipeIndices("idx2");
}
private static DateHistogram.Bucket getBucket(DateHistogram histogram, DateTime key) {

View File

@ -68,7 +68,7 @@ public class SearchWhileCreatingIndexTests extends ElasticsearchIntegrationTest
// TODO: add a smarter choice based on actual consistency (when that is randomized)
int shardsNo = numberOfReplicas + 1;
int neededNodes = shardsNo <= 2 ? 1 : shardsNo / 2 + 1;
cluster().ensureAtLeastNumDataNodes(randomIntBetween(neededNodes, shardsNo));
internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(neededNodes, shardsNo));
for (int i = 0; i < 20; i++) {
logger.info("running iteration {}", i);
if (createIndex) {
@ -99,9 +99,9 @@ public class SearchWhileCreatingIndexTests extends ElasticsearchIntegrationTest
}
assertHitCount(searchResponse, 1);
status = client().admin().cluster().prepareHealth("test").get().getStatus();
cluster().ensureAtLeastNumDataNodes(numberOfReplicas + 1);
internalCluster().ensureAtLeastNumDataNodes(numberOfReplicas + 1);
}
immutableCluster().wipeIndices("test");
cluster().wipeIndices("test");
}
}
}

View File

@ -38,13 +38,13 @@ public class SearchPreferenceTests extends ElasticsearchIntegrationTest {
@Test // see #2896
public void testStopOneNodePreferenceWithRedState() throws InterruptedException {
assertAcked(prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", immutableCluster().numDataNodes()+2).put("index.number_of_replicas", 0)));
assertAcked(prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", cluster().numDataNodes()+2).put("index.number_of_replicas", 0)));
ensureGreen();
for (int i = 0; i < 10; i++) {
client().prepareIndex("test", "type1", ""+i).setSource("field1", "value1").execute().actionGet();
}
refresh();
cluster().stopRandomDataNode();
internalCluster().stopRandomDataNode();
client().admin().cluster().prepareHealth().setWaitForStatus(ClusterHealthStatus.RED).execute().actionGet();
String[] preferences = new String[] {"_primary", "_local", "_primary_first", "_prefer_node:somenode", "_prefer_node:server2"};
for (String pref : preferences) {
@ -76,7 +76,7 @@ public class SearchPreferenceTests extends ElasticsearchIntegrationTest {
client().prepareIndex("test", "type1").setSource("field1", "value1").execute().actionGet();
refresh();
final Client client = cluster().smartClient();
final Client client = internalCluster().smartClient();
SearchResponse searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).execute().actionGet();
String firstNodeId = searchResponse.getHits().getAt(0).shard().nodeId();
searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).execute().actionGet();

View File

@ -83,7 +83,7 @@ public class SimpleQueryTests extends ElasticsearchIntegrationTest {
SearchHit[] hits = searchResponse.getHits().hits();
assertThat(hits.length, equalTo(3));
assertThat(hits[0].score(), allOf(equalTo(hits[1].getScore()), equalTo(hits[2].getScore())));
immutableCluster().wipeIndices("test");
cluster().wipeIndices("test");
createIndex("test");
indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumps"),
@ -375,7 +375,7 @@ public class SimpleQueryTests extends ElasticsearchIntegrationTest {
} catch (SearchPhaseExecutionException e) {
assertTrue(e.getMessage().endsWith("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery (term=quick)]; }"));
}
immutableCluster().wipeIndices("test");
cluster().wipeIndices("test");
} catch (MapperParsingException ex) {
assertThat(version.toString(), version.onOrAfter(Version.V_1_0_0_RC2), equalTo(true));
assertThat(ex.getCause().getMessage(), equalTo("'omit_term_freq_and_positions' is not supported anymore - use ['index_options' : 'DOCS_ONLY'] instead"));

View File

@ -124,7 +124,7 @@ public class ScriptFilterSearchTests extends ElasticsearchIntegrationTest {
.execute().actionGet();
assertThat(response.getHits().totalHits(), equalTo(1l));
assertThat(scriptCounter.get(), equalTo(cluster().hasFilterCache() ? 3 : 1));
assertThat(scriptCounter.get(), equalTo(internalCluster().hasFilterCache() ? 3 : 1));
scriptCounter.set(0);
logger.info("running script filter the second time");

View File

@ -401,8 +401,8 @@ public class SearchScrollTests extends ElasticsearchIntegrationTest {
assertThat(clearResponse.getNumFreed(), greaterThan(0));
assertThat(clearResponse.status(), equalTo(RestStatus.OK));
assertThrows(cluster().transportClient().prepareSearchScroll(searchResponse1.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND);
assertThrows(cluster().transportClient().prepareSearchScroll(searchResponse2.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND);
assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse1.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND);
assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse2.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND);
}
@Test
@ -446,6 +446,6 @@ public class SearchScrollTests extends ElasticsearchIntegrationTest {
ClearScrollResponse clearScrollResponse = client().prepareClearScroll().addScrollId(searchResponse.getScrollId()).get();
assertThat(clearScrollResponse.isSucceeded(), is(true));
assertThrows(cluster().transportClient().prepareSearchScroll(searchResponse.getScrollId()), RestStatus.NOT_FOUND);
assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse.getScrollId()), RestStatus.NOT_FOUND);
}
}

View File

@ -55,8 +55,8 @@ public class SearchScrollWithFailingNodesTests extends ElasticsearchIntegrationT
@Test
@TestLogging("action.search:TRACE")
public void testScanScrollWithShardExceptions() throws Exception {
cluster().startNode();
cluster().startNode();
internalCluster().startNode();
internalCluster().startNode();
assertAcked(
prepareCreate("test")
// Enforces that only one shard can only be allocated to a single node
@ -90,7 +90,7 @@ public class SearchScrollWithFailingNodesTests extends ElasticsearchIntegrationT
assertThat(numHits, equalTo(100l));
clearScroll("_all");
cluster().stopRandomNonMasterNode();
internalCluster().stopRandomNonMasterNode();
searchResponse = client().prepareSearch()
.setQuery(matchAllQuery())

View File

@ -39,7 +39,7 @@ public class SlowDuelScrollTests extends DuelScrollTests {
@Override
protected Settings nodeSettings(int nodeOrdinal) {
// If we add a constructor to InternalNode that allows us to define a version, then in the TestCluster
// If we add a constructor to InternalNode that allows us to define a version, then in the InternalTestCluster
// we can start nodes with different versions and then we don't need this setting and would also be helpful
// for other tests
Settings settings = super.nodeSettings(nodeOrdinal);

View File

@ -38,7 +38,7 @@ public class SlowSearchScrollTests extends SearchScrollTests {
@Override
protected Settings nodeSettings(int nodeOrdinal) {
// If we add a constructor to InternalNode that allows us to define a version, then in the TestCluster
// If we add a constructor to InternalNode that allows us to define a version, then in the InternalTestCluster
// we can start nodes with different versions and then we don't need this setting and would also be helpful
// for other tests
Settings settings = super.nodeSettings(nodeOrdinal);

View File

@ -57,7 +57,7 @@ public class SearchStatsTests extends ElasticsearchIntegrationTest {
public void testSimpleStats() throws Exception {
// clear all stats first
client().admin().indices().prepareStats().clear().execute().actionGet();
final int numNodes = immutableCluster().numDataNodes();
final int numNodes = cluster().numDataNodes();
assertThat(numNodes, greaterThanOrEqualTo(2));
final int shardsIdx1 = randomIntBetween(1, 10); // we make sure each node gets at least a single shard...
final int shardsIdx2 = Math.max(numNodes - shardsIdx1, randomIntBetween(1, 10));
@ -89,7 +89,7 @@ public class SearchStatsTests extends ElasticsearchIntegrationTest {
refresh();
int iters = scaledRandomIntBetween(100, 150);
for (int i = 0; i < iters; i++) {
SearchResponse searchResponse = cluster().clientNodeClient().prepareSearch()
SearchResponse searchResponse = internalCluster().clientNodeClient().prepareSearch()
.setQuery(QueryBuilders.termQuery("field", "value")).setStats("group1", "group2")
.addHighlightedField("field")
.addScriptField("scrip1", "_source.field")

View File

@ -41,7 +41,7 @@ public abstract class AbstractSnapshotTests extends ElasticsearchIntegrationTest
public static long getFailureCount(String repository) {
long failureCount = 0;
for (RepositoriesService repositoriesService : cluster().getDataNodeInstances(RepositoriesService.class)) {
for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository);
failureCount += mockRepository.getFailureCount();
}
@ -64,7 +64,7 @@ public abstract class AbstractSnapshotTests extends ElasticsearchIntegrationTest
}
public static void stopNode(final String node) {
cluster().stopRandomNode(new Predicate<Settings>() {
internalCluster().stopRandomNode(new Predicate<Settings>() {
@Override
public boolean apply(Settings settings) {
return settings.get("name").equals(node);
@ -74,7 +74,7 @@ public abstract class AbstractSnapshotTests extends ElasticsearchIntegrationTest
public void waitForBlock(String node, String repository, TimeValue timeout) throws InterruptedException {
long start = System.currentTimeMillis();
RepositoriesService repositoriesService = cluster().getInstance(RepositoriesService.class, node);
RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, node);
MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository);
while (System.currentTimeMillis() - start < timeout.millis()) {
if (mockRepository.blocked()) {
@ -106,8 +106,8 @@ public abstract class AbstractSnapshotTests extends ElasticsearchIntegrationTest
}
public static String blockNodeWithIndex(String index) {
for(String node : cluster().nodesInclude("test-idx")) {
((MockRepository)cluster().getInstance(RepositoriesService.class, node).repository("test-repo")).blockOnDataFiles(true);
for(String node : internalCluster().nodesInclude("test-idx")) {
((MockRepository)internalCluster().getInstance(RepositoriesService.class, node).repository("test-repo")).blockOnDataFiles(true);
return node;
}
fail("No nodes for the index " + index + " found");
@ -115,6 +115,6 @@ public abstract class AbstractSnapshotTests extends ElasticsearchIntegrationTest
}
public static void unblockNode(String node) {
((MockRepository)cluster().getInstance(RepositoriesService.class, node).repository("test-repo")).unblock();
((MockRepository)internalCluster().getInstance(RepositoriesService.class, node).repository("test-repo")).unblock();
}
}

View File

@ -63,7 +63,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
@Test
public void restorePersistentSettingsTest() throws Exception {
logger.info("--> start node");
cluster().startNode(settingsBuilder().put("gateway.type", "local"));
internalCluster().startNode(settingsBuilder().put("gateway.type", "local"));
Client client = client();
// Add dummy persistent setting
@ -145,8 +145,8 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
public void snapshotWithStuckNodeTest() throws Exception {
logger.info("--> start 2 nodes");
ArrayList<String> nodes = newArrayList();
nodes.add(cluster().startNode());
nodes.add(cluster().startNode());
nodes.add(internalCluster().startNode());
nodes.add(internalCluster().startNode());
Client client = client();
assertAcked(prepareCreate("test-idx", 2, settingsBuilder().put("number_of_shards", 2).put("number_of_replicas", 0).put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false)));
@ -182,7 +182,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
logger.info("--> execution was blocked on node [{}], aborting snapshot", blockedNode);
ListenableActionFuture<DeleteSnapshotResponse> deleteSnapshotResponseFuture = cluster().client(nodes.get(0)).admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").execute();
ListenableActionFuture<DeleteSnapshotResponse> deleteSnapshotResponseFuture = internalCluster().client(nodes.get(0)).admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").execute();
// Make sure that abort makes some progress
Thread.sleep(100);
unblockNode(blockedNode);
@ -206,9 +206,9 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
@TestLogging("snapshots:TRACE")
public void restoreIndexWithMissingShards() throws Exception {
logger.info("--> start 2 nodes");
cluster().startNode(settingsBuilder().put("gateway.type", "local"));
cluster().startNode(settingsBuilder().put("gateway.type", "local"));
immutableCluster().wipeIndices("_all");
internalCluster().startNode(settingsBuilder().put("gateway.type", "local"));
internalCluster().startNode(settingsBuilder().put("gateway.type", "local"));
cluster().wipeIndices("_all");
assertAcked(prepareCreate("test-idx-1", 2, settingsBuilder().put("number_of_shards", 6)
.put("number_of_replicas", 0)
@ -223,7 +223,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
assertThat(client().prepareCount("test-idx-1").get().getCount(), equalTo(100L));
logger.info("--> shutdown one of the nodes");
cluster().stopRandomDataNode();
internalCluster().stopRandomDataNode();
assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("<2").execute().actionGet().isTimedOut(), equalTo(false));
assertAcked(prepareCreate("test-idx-2", 1, settingsBuilder().put("number_of_shards", 6)
@ -308,7 +308,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
int initialNodes = between(1, 3);
logger.info("--> start {} nodes", initialNodes);
for (int i = 0; i < initialNodes; i++) {
cluster().startNode(settings);
internalCluster().startNode(settings);
}
logger.info("--> creating repository");
@ -327,7 +327,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
int asyncNodes = between(0, 5);
logger.info("--> start {} additional nodes asynchronously", asyncNodes);
ListenableFuture<List<String>> asyncNodesFuture = cluster().startNodesAsync(asyncNodes, settings);
ListenableFuture<List<String>> asyncNodesFuture = internalCluster().startNodesAsync(asyncNodes, settings);
int asyncIndices = between(0, 10);
logger.info("--> create {} additional indices asynchronously", asyncIndices);
@ -360,13 +360,13 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
if (indices.size() > 0) {
String index = indices.remove(randomInt(indices.size() - 1));
logger.info("--> deleting random index [{}]", index);
cluster().wipeIndices(index);
internalCluster().wipeIndices(index);
}
} else if (chaosType < 6) {
// Randomly shutdown a node
if (cluster().size() > 1) {
logger.info("--> shutting down random node");
cluster().stopRandomDataNode();
internalCluster().stopRandomDataNode();
}
} else if (chaosType < 8) {
// Randomly create an index

View File

@ -133,7 +133,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
// Test restore after index deletion
logger.info("--> delete indices");
immutableCluster().wipeIndices("test-idx-1", "test-idx-2");
cluster().wipeIndices("test-idx-1", "test-idx-2");
logger.info("--> restore one index after deletion");
restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
@ -170,7 +170,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
logger.info("--> delete the index and recreate it with bar type");
immutableCluster().wipeIndices("test-idx");
cluster().wipeIndices("test-idx");
assertAcked(prepareCreate("test-idx", 2, ImmutableSettings.builder()
.put(SETTING_NUMBER_OF_SHARDS, numShards.numPrimaries).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 5)));
assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("bar").setSource("baz", "type=string"));
@ -269,7 +269,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-with-global-state").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
logger.info("--> delete test template");
immutableCluster().wipeTemplates("test-template");
cluster().wipeTemplates("test-template");
GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get();
assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template");
@ -306,8 +306,8 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state-with-index").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
logger.info("--> delete test template and index ");
immutableCluster().wipeIndices("test-idx");
immutableCluster().wipeTemplates("test-template");
cluster().wipeIndices("test-idx");
cluster().wipeTemplates("test-template");
getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get();
assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template");
@ -474,7 +474,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
// Test restore after index deletion
logger.info("--> delete index");
immutableCluster().wipeIndices("test-idx");
cluster().wipeIndices("test-idx");
logger.info("--> restore index after deletion");
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
@ -519,7 +519,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
// Test restore after index deletion
logger.info("--> delete index");
immutableCluster().wipeIndices("test-idx");
cluster().wipeIndices("test-idx");
logger.info("--> restore index after deletion");
ListenableActionFuture<RestoreSnapshotResponse> restoreSnapshotResponseFuture =
client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute();
@ -529,7 +529,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
assertThat(waitForIndex("test-idx", TimeValue.timeValueSeconds(10)), equalTo(true));
logger.info("--> delete index");
immutableCluster().wipeIndices("test-idx");
cluster().wipeIndices("test-idx");
logger.info("--> get restore results");
// Now read restore results and make sure it failed
RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotResponseFuture.actionGet(TimeValue.timeValueSeconds(10));
@ -613,7 +613,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
assertThat(numberOfFilesAfterDeletion, lessThan(numberOfFilesBeforeDeletion));
logger.info("--> delete index");
immutableCluster().wipeIndices("test-idx");
cluster().wipeIndices("test-idx");
logger.info("--> restore index");
String lastSnapshot = "test-snap-" + (numberOfSnapshots - 1);
@ -787,7 +787,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0));
logger.info("--> delete index");
immutableCluster().wipeIndices("test-idx");
cluster().wipeIndices("test-idx");
logger.info("--> replace mock repository with real one at the same location");
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
@ -872,7 +872,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0));
logger.info("--> delete index");
immutableCluster().wipeIndices("test-idx");
cluster().wipeIndices("test-idx");
logger.info("--> replace mock repository with real one at the same location");
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
@ -916,7 +916,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
logger.info("--> delete index");
immutableCluster().wipeIndices("test-idx");
cluster().wipeIndices("test-idx");
logger.info("--> create read-only URL repository");
assertAcked(client.admin().cluster().preparePutRepository("url-repo")
@ -976,7 +976,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
logger.info("--> delete index");
immutableCluster().wipeIndices("test-idx");
cluster().wipeIndices("test-idx");
logger.info("--> restore index");
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
@ -987,7 +987,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
long snapshotPause = 0L;
long restorePause = 0L;
for (RepositoriesService repositoriesService : cluster().getDataNodeInstances(RepositoriesService.class)) {
for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
snapshotPause += repositoriesService.repository("test-repo").snapshotThrottleTimeInNanos();
restorePause += repositoriesService.repository("test-repo").restoreThrottleTimeInNanos();
}
@ -1126,7 +1126,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
));
logger.info("--> start relocations");
allowNodes("test-idx", cluster().numDataNodes());
allowNodes("test-idx", internalCluster().numDataNodes());
logger.info("--> wait for relocations to start");

View File

@ -37,17 +37,17 @@ import java.util.Iterator;
import java.util.Random;
/**
* A test cluster implementation that holds a fixed set of external nodes as well as a TestCluster
* A test cluster implementation that holds a fixed set of external nodes as well as a InternalTestCluster
* which is used to run mixed version clusters in tests like backwards compatibility tests.
* Note: this is an experimental API
*/
public class CompositeTestCluster extends ImmutableTestCluster {
private final TestCluster cluster;
public class CompositeTestCluster extends TestCluster {
private final InternalTestCluster cluster;
private final ExternalNode[] externalNodes;
private final ExternalClient client = new ExternalClient();
private static final String NODE_PREFIX = "external_";
public CompositeTestCluster(TestCluster cluster, int numExternalNodes, ExternalNode externalNode) throws IOException {
public CompositeTestCluster(InternalTestCluster cluster, int numExternalNodes, ExternalNode externalNode) throws IOException {
this.cluster = cluster;
this.externalNodes = new ExternalNode[numExternalNodes];
for (int i = 0; i < externalNodes.length; i++) {

View File

@ -85,12 +85,12 @@ public abstract class ElasticsearchBackwardsCompatIntegrationTest extends Elasti
}
public CompositeTestCluster backwardsCluster() {
return (CompositeTestCluster) immutableCluster();
return (CompositeTestCluster) cluster();
}
protected ImmutableTestCluster buildTestCluster(Scope scope) throws IOException {
ImmutableTestCluster cluster = super.buildTestCluster(scope);
return new CompositeTestCluster((TestCluster) cluster, between(minExternalNodes(), maxExternalNodes()), new ExternalNode(backwardsCompatibilityPath(), randomLong()));
protected TestCluster buildTestCluster(Scope scope) throws IOException {
TestCluster cluster = super.buildTestCluster(scope);
return new CompositeTestCluster((InternalTestCluster) cluster, between(minExternalNodes(), maxExternalNodes()), new ExternalNode(backwardsCompatibilityPath(), randomLong()));
}
protected int minExternalNodes() {

View File

@ -100,7 +100,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.test.TestCluster.clusterName;
import static org.elasticsearch.test.InternalTestCluster.clusterName;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
import static org.hamcrest.Matchers.emptyIterable;
import static org.hamcrest.Matchers.equalTo;
@ -153,7 +153,7 @@ import static org.hamcrest.Matchers.equalTo;
* This class supports the following system properties (passed with -Dkey=value to the application)
* <ul>
* <li>-D{@value #TESTS_CLIENT_RATIO} - a double value in the interval [0..1] which defines the ration between node and transport clients used</li>
* <li>-D{@value TestCluster#TESTS_ENABLE_MOCK_MODULES} - a boolean value to enable or disable mock modules. This is
* <li>-D{@value InternalTestCluster#TESTS_ENABLE_MOCK_MODULES} - a boolean value to enable or disable mock modules. This is
* useful to test the system without asserting modules that to make sure they don't hide any bugs in production.</li>
* <li> - a random seed used to initialize the index random context.
* </ul>
@ -162,7 +162,7 @@ import static org.hamcrest.Matchers.equalTo;
@Ignore
@AbstractRandomizedTest.IntegrationTests
public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase {
private static ImmutableTestCluster GLOBAL_CLUSTER;
private static TestCluster GLOBAL_CLUSTER;
/**
* Key used to set the transport client ratio via the commandline -D{@value #TESTS_CLIENT_RATIO}
*/
@ -221,11 +221,11 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
* By default if no {@link ClusterScope} is configured this will hold a reference to the global cluster carried
* on across test suites.
*/
private static ImmutableTestCluster currentCluster;
private static TestCluster currentCluster;
private static final double TRANSPORT_CLIENT_RATIO = transportClientRatio();
private static final Map<Class<?>, ImmutableTestCluster> clusters = new IdentityHashMap<>();
private static final Map<Class<?>, TestCluster> clusters = new IdentityHashMap<>();
private static ElasticsearchIntegrationTest INSTANCE = null; // see @SuiteScope
@ -262,10 +262,10 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
if (COMPATIBILITY_VERSION.before(Version.V_1_2_0)) {
numClientNodes = 0;
} else {
numClientNodes = TestCluster.DEFAULT_NUM_CLIENT_NODES;
numClientNodes = InternalTestCluster.DEFAULT_NUM_CLIENT_NODES;
}
GLOBAL_CLUSTER = new TestCluster(masterSeed, TestCluster.DEFAULT_MIN_NUM_DATA_NODES, TestCluster.DEFAULT_MAX_NUM_DATA_NODES,
clusterName("shared", ElasticsearchTestCase.CHILD_VM_ID, masterSeed), numClientNodes, TestCluster.DEFAULT_ENABLE_RANDOM_BENCH_NODES);
GLOBAL_CLUSTER = new InternalTestCluster(masterSeed, InternalTestCluster.DEFAULT_MIN_NUM_DATA_NODES, InternalTestCluster.DEFAULT_MAX_NUM_DATA_NODES,
clusterName("shared", ElasticsearchTestCase.CHILD_VM_ID, masterSeed), numClientNodes, InternalTestCluster.DEFAULT_ENABLE_RANDOM_BENCH_NODES);
}
}
}
@ -288,8 +288,8 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
default:
fail("Unknown Scope: [" + currentClusterScope + "]");
}
immutableCluster().beforeTest(getRandom(), getPerTestTransportClientRatio());
immutableCluster().wipe();
cluster().beforeTest(getRandom(), getPerTestTransportClientRatio());
cluster().wipe();
randomIndexTemplate();
logger.info("[{}#{}]: before test", getTestClass().getSimpleName(), getTestName());
} catch (OutOfMemoryError e) {
@ -306,7 +306,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
*/
private void randomIndexTemplate() throws IOException {
// TODO move settings for random directory etc here into the index based randomized settings.
if (immutableCluster().size() > 0) {
if (cluster().size() > 0) {
ImmutableSettings.Builder randomSettingsBuilder =
setRandomSettings(getRandom(), ImmutableSettings.builder())
.put(SETTING_INDEX_SEED, getRandom().nextLong());
@ -314,7 +314,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
if (randomizeNumberOfShardsAndReplicas()) {
randomSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, between(DEFAULT_MIN_NUM_SHARDS, DEFAULT_MAX_NUM_SHARDS))
//use either 0 or 1 replica, yet a higher amount when possible, but only rarely
.put(SETTING_NUMBER_OF_REPLICAS, between(0, getRandom().nextInt(10) > 0 ? 1 : immutableCluster().numDataNodes() - 1));
.put(SETTING_NUMBER_OF_REPLICAS, between(0, getRandom().nextInt(10) > 0 ? 1 : cluster().numDataNodes() - 1));
}
XContentBuilder mappings = null;
if (frequently() && randomDynamicTemplates()) {
@ -476,8 +476,8 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
return builder;
}
public ImmutableTestCluster buildAndPutCluster(Scope currentClusterScope, boolean createIfExists) throws IOException {
ImmutableTestCluster testCluster = clusters.get(this.getClass());
public TestCluster buildAndPutCluster(Scope currentClusterScope, boolean createIfExists) throws IOException {
TestCluster testCluster = clusters.get(this.getClass());
if (createIfExists || testCluster == null) {
testCluster = buildTestCluster(currentClusterScope);
} else {
@ -509,8 +509,8 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
.transientSettings().getAsMap().size(), equalTo(0));
}
ensureClusterSizeConsistency();
immutableCluster().wipe(); // wipe after to make sure we fail in the test that didn't ack the delete
immutableCluster().assertAfterTest();
cluster().wipe(); // wipe after to make sure we fail in the test that didn't ack the delete
cluster().assertAfterTest();
} finally {
if (currentClusterScope == Scope.TEST) {
clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST
@ -540,23 +540,23 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
}
}
public static ImmutableTestCluster immutableCluster() {
public static TestCluster cluster() {
return currentCluster;
}
public static TestCluster cluster() {
if (!(currentCluster instanceof TestCluster)) {
public static InternalTestCluster internalCluster() {
if (!(currentCluster instanceof InternalTestCluster)) {
throw new UnsupportedOperationException("current test cluster is immutable");
}
return (TestCluster) currentCluster;
return (InternalTestCluster) currentCluster;
}
public ClusterService clusterService() {
return cluster().clusterService();
return internalCluster().clusterService();
}
public static Client client() {
Client client = immutableCluster().client();
Client client = cluster().client();
if (frequently()) {
client = new RandomizingClient((InternalClient) client, getRandom());
}
@ -564,7 +564,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
}
public static Client dataNodeClient() {
Client client = cluster().dataNodeClient();
Client client = internalCluster().dataNodeClient();
if (frequently()) {
client = new RandomizingClient((InternalClient) client, getRandom());
}
@ -572,7 +572,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
}
public static Iterable<Client> clients() {
return immutableCluster();
return cluster();
}
protected int minimumNumberOfShards() {
@ -592,7 +592,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
}
protected int maximumNumberOfReplicas() {
return Math.max(0, immutableCluster().numDataNodes() - 1);
return Math.max(0, cluster().numDataNodes() - 1);
}
protected int numberOfReplicas() {
@ -635,7 +635,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
success = true;
} finally {
if (!success && !created.isEmpty()) {
immutableCluster().wipeIndices(created.toArray(new String[created.size()]));
cluster().wipeIndices(created.toArray(new String[created.size()]));
}
}
}
@ -671,7 +671,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
* </p>
*/
public CreateIndexRequestBuilder prepareCreate(String index, int numNodes, ImmutableSettings.Builder settingsBuilder) {
cluster().ensureAtLeastNumDataNodes(numNodes);
internalCluster().ensureAtLeastNumDataNodes(numNodes);
ImmutableSettings.Builder builder = ImmutableSettings.builder().put(indexSettings()).put(settingsBuilder.build());
@ -682,7 +682,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
}
private ImmutableSettings.Builder getExcludeSettings(String index, int num, ImmutableSettings.Builder builder) {
String exclude = Joiner.on(',').join(cluster().allDataNodesButN(num));
String exclude = Joiner.on(',').join(internalCluster().allDataNodesButN(num));
builder.put("index.routing.allocation.exclude._name", exclude);
return builder;
}
@ -694,7 +694,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
*/
public void allowNodes(String index, int n) {
assert index != null;
cluster().ensureAtLeastNumDataNodes(n);
internalCluster().ensureAtLeastNumDataNodes(n);
ImmutableSettings.Builder builder = ImmutableSettings.builder();
if (n > 0) {
getExcludeSettings(index, n, builder);
@ -850,8 +850,8 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
}
void ensureClusterSizeConsistency() {
logger.trace("Check consistency for [{}] nodes", immutableCluster().size());
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(immutableCluster().size())).get());
logger.trace("Check consistency for [{}] nodes", cluster().size());
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(cluster().size())).get());
}
/**
@ -1167,28 +1167,28 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
int numDataNodes() default -1;
/**
* Returns the minimum number of nodes in the cluster. Default is {@link org.elasticsearch.test.TestCluster#DEFAULT_MIN_NUM_DATA_NODES}.
* Returns the minimum number of nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_MIN_NUM_DATA_NODES}.
* Ignored when {@link ClusterScope#numDataNodes()} is set.
*/
int minNumDataNodes() default TestCluster.DEFAULT_MIN_NUM_DATA_NODES;
int minNumDataNodes() default InternalTestCluster.DEFAULT_MIN_NUM_DATA_NODES;
/**
* Returns the maximum number of nodes in the cluster. Default is {@link org.elasticsearch.test.TestCluster#DEFAULT_MAX_NUM_DATA_NODES}.
* Returns the maximum number of nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_MAX_NUM_DATA_NODES}.
* Ignored when {@link ClusterScope#numDataNodes()} is set.
*/
int maxNumDataNodes() default TestCluster.DEFAULT_MAX_NUM_DATA_NODES;
int maxNumDataNodes() default InternalTestCluster.DEFAULT_MAX_NUM_DATA_NODES;
/**
* Returns the number of client nodes in the cluster. Default is {@link org.elasticsearch.test.TestCluster#DEFAULT_NUM_CLIENT_NODES}, a
* Returns the number of client nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_NUM_CLIENT_NODES}, a
* negative value means that the number of client nodes will be randomized.
*/
int numClientNodes() default TestCluster.DEFAULT_NUM_CLIENT_NODES;
int numClientNodes() default InternalTestCluster.DEFAULT_NUM_CLIENT_NODES;
/**
* Returns whether the ability to randomly have benchmark (client) nodes as part of the cluster needs to be enabled.
* Default is {@link org.elasticsearch.test.TestCluster#DEFAULT_ENABLE_RANDOM_BENCH_NODES}.
* Default is {@link InternalTestCluster#DEFAULT_ENABLE_RANDOM_BENCH_NODES}.
*/
boolean enableRandomBenchNodes() default TestCluster.DEFAULT_ENABLE_RANDOM_BENCH_NODES;
boolean enableRandomBenchNodes() default InternalTestCluster.DEFAULT_ENABLE_RANDOM_BENCH_NODES;
/**
* Returns the transport client ratio. By default this returns <code>-1</code> which means a random
@ -1278,22 +1278,22 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
private int getMinNumDataNodes() {
ClusterScope annotation = getAnnotation(this.getClass());
return annotation == null ? TestCluster.DEFAULT_MIN_NUM_DATA_NODES : annotation.minNumDataNodes();
return annotation == null ? InternalTestCluster.DEFAULT_MIN_NUM_DATA_NODES : annotation.minNumDataNodes();
}
private int getMaxNumDataNodes() {
ClusterScope annotation = getAnnotation(this.getClass());
return annotation == null ? TestCluster.DEFAULT_MAX_NUM_DATA_NODES : annotation.maxNumDataNodes();
return annotation == null ? InternalTestCluster.DEFAULT_MAX_NUM_DATA_NODES : annotation.maxNumDataNodes();
}
private int getNumClientNodes() {
ClusterScope annotation = getAnnotation(this.getClass());
return annotation == null ? TestCluster.DEFAULT_NUM_CLIENT_NODES : annotation.numClientNodes();
return annotation == null ? InternalTestCluster.DEFAULT_NUM_CLIENT_NODES : annotation.numClientNodes();
}
private boolean enableRandomBenchNodes() {
ClusterScope annotation = getAnnotation(this.getClass());
return annotation == null ? TestCluster.DEFAULT_ENABLE_RANDOM_BENCH_NODES : annotation.enableRandomBenchNodes();
return annotation == null ? InternalTestCluster.DEFAULT_ENABLE_RANDOM_BENCH_NODES : annotation.enableRandomBenchNodes();
}
private boolean randomDynamicTemplates() {
@ -1312,7 +1312,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
return ImmutableSettings.EMPTY;
}
protected ImmutableTestCluster buildTestCluster(Scope scope) throws IOException {
protected TestCluster buildTestCluster(Scope scope) throws IOException {
long currentClusterSeed = randomLong();
NodeSettingsSource nodeSettingsSource = new NodeSettingsSource() {
@ -1333,7 +1333,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
int numClientNodes = getNumClientNodes();
boolean enableRandomBenchNodes = enableRandomBenchNodes();
return new TestCluster(currentClusterSeed, minNumDataNodes, maxNumDataNodes, clusterName(scope.name(), ElasticsearchTestCase.CHILD_VM_ID, currentClusterSeed), nodeSettingsSource, numClientNodes, enableRandomBenchNodes);
return new InternalTestCluster(currentClusterSeed, minNumDataNodes, maxNumDataNodes, clusterName(scope.name(), ElasticsearchTestCase.CHILD_VM_ID, currentClusterSeed), nodeSettingsSource, numClientNodes, enableRandomBenchNodes);
}
/**

View File

@ -38,7 +38,7 @@ import java.util.Iterator;
* It is a pure immutable test cluster that allows to send requests to a pre-existing cluster
* and supports by nature all the needed test operations like wipeIndices etc.
*/
public final class ExternalTestCluster extends ImmutableTestCluster {
public final class ExternalTestCluster extends TestCluster {
private final ESLogger logger = Loggers.getLogger(getClass());

View File

@ -1,208 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test;
import com.carrotsearch.hppc.ObjectArrayList;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.indices.IndexMissingException;
import org.elasticsearch.indices.IndexTemplateMissingException;
import org.elasticsearch.repositories.RepositoryMissingException;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Random;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.assertThat;
/**
* Base test cluster that exposes the basis to run tests against any elasticsearch cluster, whose layout
* (e.g. number of nodes) is predefined and cannot be changed during the tests execution
*/
public abstract class ImmutableTestCluster implements Iterable<Client>, Closeable {
protected final ESLogger logger = Loggers.getLogger(getClass());
protected Random random;
protected double transportClientRatio = 0.0;
/**
* This method should be executed before each test to reset the cluster to its initial state.
*/
public void beforeTest(Random random, double transportClientRatio) throws IOException {
assert transportClientRatio >= 0.0 && transportClientRatio <= 1.0;
logger.debug("Reset test cluster with transport client ratio: [{}]", transportClientRatio);
this.transportClientRatio = transportClientRatio;
this.random = new Random(random.nextLong());
}
/**
* Wipes any data that a test can leave behind: indices, templates and repositories
*/
public void wipe() {
wipeIndices("_all");
wipeTemplates();
wipeRepositories();
}
/**
* This method checks all the things that need to be checked after each test
*/
public void assertAfterTest() {
assertAllSearchersClosed();
assertAllFilesClosed();
ensureEstimatedStats();
}
/**
* This method should be executed during tear down, after each test (but after assertAfterTest)
*/
public abstract void afterTest();
/**
* Returns a client connected to any node in the cluster
*/
public abstract Client client();
/**
* Returns the number of nodes in the cluster.
*/
public abstract int size();
/**
* Returns the number of data nodes in the cluster.
*/
public abstract int numDataNodes();
/**
* Returns the number of bench nodes in the cluster.
*/
public abstract int numBenchNodes();
/**
* Returns the http addresses of the nodes within the cluster.
* Can be used to run REST tests against the test cluster.
*/
public abstract InetSocketAddress[] httpAddresses();
/**
* Closes the current cluster
*/
public abstract void close() throws IOException;
/**
* Deletes the given indices from the tests cluster. If no index name is passed to this method
* all indices are removed.
*/
public void wipeIndices(String... indices) {
assert indices != null && indices.length > 0;
if (size() > 0) {
try {
assertAcked(client().admin().indices().prepareDelete(indices));
} catch (IndexMissingException e) {
// ignore
} catch (ElasticsearchIllegalArgumentException e) {
// Happens if `action.destructive_requires_name` is set to true
// which is the case in the CloseIndexDisableCloseAllTests
if ("_all".equals(indices[0])) {
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet();
ObjectArrayList<String> concreteIndices = new ObjectArrayList<>();
for (IndexMetaData indexMetaData : clusterStateResponse.getState().metaData()) {
concreteIndices.add(indexMetaData.getIndex());
}
if (!concreteIndices.isEmpty()) {
assertAcked(client().admin().indices().prepareDelete(concreteIndices.toArray(String.class)));
}
}
}
}
}
/**
* Deletes index templates, support wildcard notation.
* If no template name is passed to this method all templates are removed.
*/
public void wipeTemplates(String... templates) {
if (size() > 0) {
// if nothing is provided, delete all
if (templates.length == 0) {
templates = new String[]{"*"};
}
for (String template : templates) {
try {
client().admin().indices().prepareDeleteTemplate(template).execute().actionGet();
} catch (IndexTemplateMissingException e) {
// ignore
}
}
}
}
/**
* Deletes repositories, supports wildcard notation.
*/
public void wipeRepositories(String... repositories) {
if (size() > 0) {
// if nothing is provided, delete all
if (repositories.length == 0) {
repositories = new String[]{"*"};
}
for (String repository : repositories) {
try {
client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet();
} catch (RepositoryMissingException ex) {
// ignore
}
}
}
}
/**
* Ensures that the breaker statistics are reset to 0 since we wiped all indices and that
* means all stats should be set to 0 otherwise something is wrong with the field data
* calculation.
*/
public void ensureEstimatedStats() {
if (size() > 0) {
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats()
.clear().setBreaker(true).execute().actionGet();
for (NodeStats stats : nodeStats.getNodes()) {
assertThat("Breaker not reset to 0 on node: " + stats.getNode(),
stats.getBreaker().getEstimated(), equalTo(0L));
}
}
}
/**
* Return whether or not this cluster can cache filters.
*/
public abstract boolean hasFilterCache();
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -28,7 +28,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.recycler.Recycler.V;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ElasticsearchTestCase;
import org.elasticsearch.test.TestCluster;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.threadpool.ThreadPool;
import java.lang.reflect.Array;
@ -69,7 +69,7 @@ public class MockPageCacheRecycler extends PageCacheRecycler {
@Inject
public MockPageCacheRecycler(Settings settings, ThreadPool threadPool) {
super(settings, threadPool);
final long seed = settings.getAsLong(TestCluster.SETTING_CLUSTER_NODE_SEED, 0L);
final long seed = settings.getAsLong(InternalTestCluster.SETTING_CLUSTER_NODE_SEED, 0L);
random = new Random(seed);
}

View File

@ -26,7 +26,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.test.ElasticsearchTestCase;
import org.elasticsearch.test.TestCluster;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.rest.ElasticsearchRestTests;
import org.junit.internal.AssumptionViolatedException;
import org.junit.runner.Description;
@ -135,7 +135,7 @@ public class ReproduceInfoPrinter extends RunListener {
}
public ReproduceErrorMessageBuilder appendESProperties() {
appendProperties("es.logger.level", "es.node.mode", "es.node.local", TESTS_CLUSTER, TestCluster.TESTS_ENABLE_MOCK_MODULES,
appendProperties("es.logger.level", "es.node.mode", "es.node.local", TESTS_CLUSTER, InternalTestCluster.TESTS_ENABLE_MOCK_MODULES,
"tests.assertion.disabled", "tests.security.manager", "tests.nightly", "tests.jvms", "tests.client.ratio", "tests.heap.size",
"tests.bwc", "tests.bwc.path", "tests.bwc.version");
if (System.getProperty("tests.jvm.argline") != null && !System.getProperty("tests.jvm.argline").isEmpty()) {

View File

@ -183,7 +183,7 @@ public class ElasticsearchRestTests extends ElasticsearchIntegrationTest {
assumeFalse("[" + testCandidate.getTestPath() + "] skipped, reason: blacklisted", blacklistedPathMatcher.matches(Paths.get(testPath)));
}
restTestExecutionContext.resetClient(immutableCluster().httpAddresses());
restTestExecutionContext.resetClient(cluster().httpAddresses());
restTestExecutionContext.clear();
//skip test if the whole suite (yaml file) is disabled

View File

@ -45,7 +45,7 @@ public final class Features {
*/
public static boolean areAllSupported(List<String> features) {
for (String feature : features) {
if ("benchmark".equals(feature) && ElasticsearchIntegrationTest.immutableCluster().numBenchNodes() > 0) {
if ("benchmark".equals(feature) && ElasticsearchIntegrationTest.cluster().numBenchNodes() > 0) {
continue;
}
if (!SUPPORTED.contains(feature)) {

View File

@ -36,7 +36,6 @@ import org.elasticsearch.index.store.fs.MmapFsDirectoryService;
import org.elasticsearch.index.store.fs.NioFsDirectoryService;
import org.elasticsearch.index.store.fs.SimpleFsDirectoryService;
import org.elasticsearch.index.store.ram.RamDirectoryService;
import org.elasticsearch.test.TestCluster;
import java.io.IOException;
import java.util.Random;

View File

@ -53,7 +53,7 @@ public class SimpleThreadPoolTests extends ElasticsearchIntegrationTest {
@Test(timeout = 20000)
public void testUpdatingThreadPoolSettings() throws Exception {
ThreadPool threadPool = cluster().getDataNodeInstance(ThreadPool.class);
ThreadPool threadPool = internalCluster().getDataNodeInstance(ThreadPool.class);
// Check that settings are changed
assertThat(((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(5L));
client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.keep_alive", "10m").build()).execute().actionGet();

View File

@ -33,7 +33,7 @@ import org.elasticsearch.discovery.MasterNotDiscoveredException;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.TestCluster;
import org.elasticsearch.test.InternalTestCluster;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@ -49,7 +49,7 @@ import static org.hamcrest.Matchers.equalTo;
*/
public class TribeTests extends ElasticsearchIntegrationTest {
private static TestCluster cluster2;
private static InternalTestCluster cluster2;
private Node tribeNode;
private Client tribeClient;
@ -58,7 +58,7 @@ public class TribeTests extends ElasticsearchIntegrationTest {
public static void setupSecondCluster() throws Exception {
ElasticsearchIntegrationTest.beforeClass();
// create another cluster
cluster2 = new TestCluster(randomLong(), 2, 2, Strings.randomBase64UUID(getRandom()), 0, false);
cluster2 = new InternalTestCluster(randomLong(), 2, 2, Strings.randomBase64UUID(getRandom()), 0, false);
cluster2.beforeTest(getRandom(), 0.1);
cluster2.ensureAtLeastNumDataNodes(2);
}
@ -91,7 +91,7 @@ public class TribeTests extends ElasticsearchIntegrationTest {
private void setupTribeNode(Settings settings) {
Settings merged = ImmutableSettings.builder()
.put("tribe.t1.cluster.name", cluster().getClusterName())
.put("tribe.t1.cluster.name", internalCluster().getClusterName())
.put("tribe.t2.cluster.name", cluster2.getClusterName())
.put("tribe.blocks.write", false)
.put("tribe.blocks.read", false)
@ -107,7 +107,7 @@ public class TribeTests extends ElasticsearchIntegrationTest {
@Test
public void testGlobalReadWriteBlocks() throws Exception {
logger.info("create 2 indices, test1 on t1, and test2 on t2");
cluster().client().admin().indices().prepareCreate("test1").get();
internalCluster().client().admin().indices().prepareCreate("test1").get();
cluster2.client().admin().indices().prepareCreate("test2").get();
@ -145,8 +145,8 @@ public class TribeTests extends ElasticsearchIntegrationTest {
@Test
public void testIndexWriteBlocks() throws Exception {
logger.info("create 2 indices, test1 on t1, and test2 on t2");
cluster().client().admin().indices().prepareCreate("test1").get();
cluster().client().admin().indices().prepareCreate("block_test1").get();
internalCluster().client().admin().indices().prepareCreate("test1").get();
internalCluster().client().admin().indices().prepareCreate("block_test1").get();
cluster2.client().admin().indices().prepareCreate("test2").get();
cluster2.client().admin().indices().prepareCreate("block_test2").get();
@ -209,9 +209,9 @@ public class TribeTests extends ElasticsearchIntegrationTest {
logger.info("testing preference for tribe {}", tribe);
logger.info("create 2 indices, test1 on t1, and test2 on t2");
cluster().client().admin().indices().prepareCreate("conflict").get();
internalCluster().client().admin().indices().prepareCreate("conflict").get();
cluster2.client().admin().indices().prepareCreate("conflict").get();
cluster().client().admin().indices().prepareCreate("test1").get();
internalCluster().client().admin().indices().prepareCreate("test1").get();
cluster2.client().admin().indices().prepareCreate("test2").get();
setupTribeNode(ImmutableSettings.builder()
@ -232,7 +232,7 @@ public class TribeTests extends ElasticsearchIntegrationTest {
public void testTribeOnOneCluster() throws Exception {
setupTribeNode(ImmutableSettings.EMPTY);
logger.info("create 2 indices, test1 on t1, and test2 on t2");
cluster().client().admin().indices().prepareCreate("test1").get();
internalCluster().client().admin().indices().prepareCreate("test1").get();
cluster2.client().admin().indices().prepareCreate("test2").get();
@ -328,7 +328,7 @@ public class TribeTests extends ElasticsearchIntegrationTest {
@Override
public boolean apply(Object o) {
DiscoveryNodes tribeNodes = tribeNode.client().admin().cluster().prepareState().get().getState().getNodes();
return countDataNodesForTribe("t1", tribeNodes) == cluster().client().admin().cluster().prepareState().get().getState().getNodes().dataNodes().size()
return countDataNodesForTribe("t1", tribeNodes) == internalCluster().client().admin().cluster().prepareState().get().getState().getNodes().dataNodes().size()
&& countDataNodesForTribe("t2", tribeNodes) == cluster2.client().admin().cluster().prepareState().get().getState().getNodes().dataNodes().size();
}
});

View File

@ -215,7 +215,7 @@ public class SimpleValidateQueryTests extends ElasticsearchIntegrationTest {
refresh();
for (Client client : cluster()) {
for (Client client : internalCluster()) {
ValidateQueryResponse response = client.admin().indices().prepareValidateQuery("test")
.setSource("foo".getBytes(Charsets.UTF_8))
.setExplain(true)
@ -227,7 +227,7 @@ public class SimpleValidateQueryTests extends ElasticsearchIntegrationTest {
}
for (Client client : cluster()) {
for (Client client : internalCluster()) {
ValidateQueryResponse response = client.admin().indices().prepareValidateQuery("test")
.setQuery(QueryBuilders.queryString("foo"))
.setExplain(true)

View File

@ -73,7 +73,7 @@ public class ConcurrentDocumentOperationTests extends ElasticsearchIntegrationTe
logger.info("done indexing, check all have the same field value");
Map masterSource = client().prepareGet("test", "type1", "1").execute().actionGet().getSourceAsMap();
for (int i = 0; i < (immutableCluster().size() * 5); i++) {
for (int i = 0; i < (cluster().size() * 5); i++) {
assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().getSourceAsMap(), equalTo(masterSource));
}
}