Add a dedicated integ test project for multi-cluster-search

This commit adds `qa/multi-cluster-search` which currently does a
simple search across 2 clusters. This commit also adds support for IPv6
addresses and fixes an issue where all shards of the local cluster are searched
when only a remote index was given.
This commit is contained in:
Simon Willnauer 2016-11-23 18:12:42 +01:00 committed by Luca Cavanna
parent b440ea946f
commit ec86771f6e
10 changed files with 316 additions and 44 deletions

View File

@ -96,6 +96,15 @@ class ClusterConfiguration {
return seedNode.transportUri()
}
/**
* A closure to call which returns a map of settings.
*
* This can be used to pass settings to a cluster that are not available at evaluation time ie.
* the address of a remote cluster etc.
*/
@Input
Closure dynamicSettings = { -> Collections.emptyMap() }
/**
* A closure to call before the cluster is considered ready. The closure is passed the node info,
* as well as a groovy AntBuilder, to enable running ant condition checks. The default wait

View File

@ -294,6 +294,8 @@ class ClusterFormationTasks {
if (unicastTransportUri != null) {
esConfig['discovery.zen.ping.unicast.hosts'] = "\"${unicastTransportUri}\""
}
Map dynamicSettings = node.config.dynamicSettings();
esConfig.putAll(dynamicSettings)
File configFile = new File(node.confDir, 'elasticsearch.yml')
logger.info("Configuring ${configFile}")
configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8')

View File

@ -115,31 +115,35 @@ public class SearchTransportService extends AbstractComponent {
throw new IllegalArgumentException("no hosts set for remote cluster [" + clusterName + "], at least one host is required");
}
for (String remoteHost : remoteHosts) {
String[] strings = remoteHost.split(":");
if (strings.length != 2) {
int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300
if (portSeparator == -1 || portSeparator == remoteHost.length()) {
throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] " +
"instead for remote cluster [" + clusterName + "]");
"instead for remote cluster [" + clusterName + "]");
}
String port = remoteHost.substring(portSeparator+1);
try {
Integer.valueOf(strings[1]);
Integer portValue = Integer.valueOf(port);
if (portValue <= 0) {
throw new IllegalArgumentException("port number must be > 0 but was: [" + portValue + "]");
}
} catch(NumberFormatException e) {
throw new IllegalArgumentException("port must be a number, found [" + strings[1] + "] instead for remote cluster [" +
throw new IllegalArgumentException("port must be a number, found [" + port + "] instead for remote cluster [" +
clusterName + "]");
}
}
}
}
private void setRemoteClustersSeeds(Settings settings) {
static Map<String, List<DiscoveryNode>> builtRemoteClustersSeeds(Settings settings) {
Map<String, List<DiscoveryNode>> remoteClustersNodes = new HashMap<>();
for (String clusterName : settings.names()) {
String[] remoteHosts = settings.getAsArray(clusterName);
for (String remoteHost : remoteHosts) {
String[] strings = remoteHost.split(":");
String host = strings[0];
int port = Integer.valueOf(strings[1]);
int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300
String host = remoteHost.substring(0, portSeparator);
int port = Integer.valueOf(remoteHost.substring(portSeparator+1));
DiscoveryNode node = new DiscoveryNode(clusterName + "#" + remoteHost,
new TransportAddress(new InetSocketAddress(host, port)), Version.CURRENT.minimumCompatibilityVersion());
new TransportAddress(new InetSocketAddress(host, port)), Version.CURRENT.minimumCompatibilityVersion());
//don't connect yet as that would require the remote node to be up and would fail the local node startup otherwise
List<DiscoveryNode> nodes = remoteClustersNodes.get(clusterName);
if (nodes == null) {
@ -149,7 +153,11 @@ public class SearchTransportService extends AbstractComponent {
nodes.add(node);
}
}
remoteClustersSeeds = remoteClustersNodes;
return remoteClustersNodes;
}
private void setRemoteClustersSeeds(Settings settings) {
remoteClustersSeeds = builtRemoteClustersSeeds(settings);
}
private DiscoveryNode connectToRemoteCluster(String clusterName) {

View File

@ -137,38 +137,35 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
Strings.EMPTY_ARRAY, Collections.emptyList(), Collections.emptySet(), listener);
} else {
searchTransportService.sendSearchShards(searchRequest, remoteIndicesByCluster,
new ActionListener<Map<String, ClusterSearchShardsResponse>>() {
@Override
public void onResponse(Map<String, ClusterSearchShardsResponse> searchShardsResponses) {
List<ShardIterator> remoteShardIterators = new ArrayList<>();
Set<DiscoveryNode> remoteNodes = new HashSet<>();
Set<String> remoteUUIDs = new HashSet<>();
for (Map.Entry<String, ClusterSearchShardsResponse> entry : searchShardsResponses.entrySet()) {
String clusterName = entry.getKey();
ClusterSearchShardsResponse searchShardsResponse = entry.getValue();
Collections.addAll(remoteNodes, searchShardsResponse.getNodes());
for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) {
//add the cluster name to the remote index names for indices disambiguation
//this ends up in the hits returned with the search response
Index index = new Index(clusterName + REMOTE_CLUSTER_INDEX_SEPARATOR +
clusterSearchShardsGroup.getShardId().getIndex().getName(),
clusterSearchShardsGroup.getShardId().getIndex().getUUID());
ShardId shardId = new ShardId(index, clusterSearchShardsGroup.getShardId().getId());
ShardIterator shardIterator = new PlainShardIterator(shardId,
Arrays.asList(clusterSearchShardsGroup.getShards()));
remoteShardIterators.add(shardIterator);
remoteUUIDs.add(clusterSearchShardsGroup.getShardId().getIndex().getUUID());
}
}
executeSearch((SearchTask)task, startTimeInMillis, searchRequest, localIndices,
remoteUUIDs.toArray(new String[remoteUUIDs.size()]), remoteShardIterators, remoteNodes, listener);
}
ActionListener.wrap((searchShardsResponses) -> {
List<ShardIterator> remoteShardIterators = new ArrayList<>();
Set<DiscoveryNode> remoteNodes = new HashSet<>();
Set<String> remoteUUIDs = new HashSet<>();
processRemoteShards(searchShardsResponses, remoteShardIterators, remoteNodes, remoteUUIDs);
executeSearch((SearchTask)task, startTimeInMillis, searchRequest, localIndices,
remoteUUIDs.toArray(new String[remoteUUIDs.size()]), remoteShardIterators, remoteNodes, listener);
}, listener::onFailure));
}
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
private void processRemoteShards(Map<String, ClusterSearchShardsResponse> searchShardsResponses,
List<ShardIterator> remoteShardIterators, Set<DiscoveryNode> remoteNodes, Set<String> remoteUUIDs) {
for (Map.Entry<String, ClusterSearchShardsResponse> entry : searchShardsResponses.entrySet()) {
String clusterName = entry.getKey();
ClusterSearchShardsResponse searchShardsResponse = entry.getValue();
Collections.addAll(remoteNodes, searchShardsResponse.getNodes());
for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) {
//add the cluster name to the remote index names for indices disambiguation
//this ends up in the hits returned with the search response
Index index = new Index(clusterName + REMOTE_CLUSTER_INDEX_SEPARATOR +
clusterSearchShardsGroup.getShardId().getIndex().getName(),
clusterSearchShardsGroup.getShardId().getIndex().getUUID());
ShardId shardId = new ShardId(index, clusterSearchShardsGroup.getShardId().getId());
ShardIterator shardIterator = new PlainShardIterator(shardId,
Arrays.asList(clusterSearchShardsGroup.getShards()));
remoteShardIterators.add(shardIterator);
remoteUUIDs.add(clusterSearchShardsGroup.getShardId().getIndex().getUUID());
}
}
}
@ -181,8 +178,13 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
// of just for the _search api
Index[] indices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(),
startTimeInMillis, localIndices);
final Index[] indices;
if (localIndices.length == 0 && remoteUUIDs.length > 0) {
indices = new Index[0]; // don't search on ALL if nothing is specified
} else {
indices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(),
startTimeInMillis, localIndices);
}
Map<String, AliasFilter> aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteUUIDs);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(),
searchRequest.indices());

View File

@ -0,0 +1,67 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.search;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.test.ESTestCase;
import java.net.InetSocketAddress;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
public class SearchTransportServiceTests extends ESTestCase {
public void testRemoteClusterSeedSetting() {
// simple validation
SearchTransportService.REMOTE_CLUSTERS_SEEDS.get(Settings.builder()
.put("action.search.remote.foo", "192.168.0.1:8080")
.put("action.search.remote.bar", "[::1]:9090").build());
expectThrows(IllegalArgumentException.class, () ->
SearchTransportService.REMOTE_CLUSTERS_SEEDS.get(Settings.builder()
.put("action.search.remote.foo", "192.168.0.1").build()));
}
public void testBuiltRemoteClustersSeeds() {
Map<String, List<DiscoveryNode>> map = SearchTransportService.builtRemoteClustersSeeds(
SearchTransportService.REMOTE_CLUSTERS_SEEDS.get(Settings.builder()
.put("action.search.remote.foo", "192.168.0.1:8080")
.put("action.search.remote.bar", "[::1]:9090").build()));
assertEquals(2, map.size());
assertTrue(map.containsKey("foo"));
assertTrue(map.containsKey("bar"));
assertEquals(1, map.get("foo").size());
assertEquals(1, map.get("bar").size());
DiscoveryNode foo = map.get("foo").get(0);
assertEquals(foo.getAddress(), new TransportAddress(new InetSocketAddress("192.168.0.1", 8080)));
assertEquals(foo.getId(), "foo#192.168.0.1:8080");
assertEquals(foo.getVersion(), Version.CURRENT.minimumCompatibilityVersion());
DiscoveryNode bar = map.get("bar").get(0);
assertEquals(bar.getAddress(), new TransportAddress(new InetSocketAddress("[::1]", 9090)));
assertEquals(bar.getId(), "bar#[::1]:9090");
assertEquals(bar.getVersion(), Version.CURRENT.minimumCompatibilityVersion());
}
}

View File

@ -0,0 +1,53 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.elasticsearch.gradle.test.RestIntegTestTask
apply plugin: 'elasticsearch.standalone-test'
task remoteClusterTest(type: RestIntegTestTask) {
mustRunAfter(precommit)
cluster {
distribution = 'zip'
numNodes = 1
clusterName = 'remote-cluster'
}
systemProperty 'tests.rest.suite', 'remote_cluster'
}
task mixedClusterTest(type: RestIntegTestTask) {
dependsOn(remoteClusterTest)
cluster {
distribution = 'zip'
dynamicSettings = { ->
Collections.singletonMap("action.search.remote.my_remote_cluster",
"\"${remoteClusterTest.nodes.get(0).transportUri()}\"")
}
}
systemProperty 'tests.rest.suite', 'multi_cluster'
finalizedBy 'remoteClusterTest#stop'
}
task integTest {
dependsOn = [mixedClusterTest]
}
test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test
check.dependsOn(integTest)

View File

@ -0,0 +1,48 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.upgrades;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
import org.apache.lucene.util.TimeUnits;
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
import org.elasticsearch.test.rest.yaml.parser.ClientYamlTestParseException;
import java.io.IOException;
@TimeoutSuite(millis = 5 * TimeUnits.MINUTE) // to account for slow as hell VMs
public class MultiClusterSearchYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
@Override
protected boolean preserveIndicesUponCompletion() {
return true;
}
public MultiClusterSearchYamlTestSuiteIT(ClientYamlTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
return createParameters();
}
}

View File

@ -0,0 +1,47 @@
---
"Index data and search on the mixed cluster":
- do:
indices.create:
index: test_index_1
body:
settings:
index:
number_of_replicas: 0
- do:
bulk:
refresh: true
body:
- '{"index": {"_index": "test_index_1", "_type": "test_type"}}'
- '{"f1": "v1_old", "f2": 0}'
- '{"index": {"_index": "test_index_1", "_type": "test_type"}}'
- '{"f1": "v2_old", "f2": 1}'
- '{"index": {"_index": "test_index_1", "_type": "test_type"}}'
- '{"f1": "v3_old", "f2": 2}'
- '{"index": {"_index": "test_index_1", "_type": "test_type"}}'
- '{"f1": "v4_old", "f2": 3}'
- '{"index": {"_index": "test_index_1", "_type": "test_type"}}'
- '{"f1": "v5_old", "f2": 4}'
- do:
indices.flush:
index: test_index_1
- do:
search:
index: test_index_1,my_remote_cluster|test_index
- match: { hits.total: 10 }
- do:
search:
index: my_remote_cluster|test_index
- match: { hits.total: 5}
- do:
search:
index: test_index_1
- match: { hits.total: 5}

View File

@ -0,0 +1,35 @@
---
"Index data and search on the old cluster":
- do:
indices.create:
index: test_index
body:
settings:
index:
number_of_replicas: 0
- do:
bulk:
refresh: true
body:
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v1_old", "f2": 0}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v2_old", "f2": 1}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v3_old", "f2": 2}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v4_old", "f2": 3}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v5_old", "f2": 4}'
- do:
indices.flush:
index: test_index
- do:
search:
index: test_index
- match: { hits.total: 5 }

View File

@ -54,6 +54,7 @@ List projects = [
'qa:evil-tests',
'qa:no-bootstrap-tests',
'qa:rolling-upgrade',
'qa:multi-cluster-search',
'qa:smoke-test-client',
'qa:smoke-test-http',
'qa:smoke-test-ingest-with-all-dependencies',