Add Docker-based tests for proxy connection mode (#51658)
Adds more tests for the new "proxy" remote cluster connection mode, using a Docker-based setup, as well as testing SNI-based routing using HAProxy. - Checks that the new proxy mode can work in situations where the publish host of the nodes in the remote cluster are not routable. - Checks that the new proxy mode can work with clusters where nodes are hidden behind HAProxy. - Checks that the new proxy mode can work with clusters where nodes are hidden behind HAProxy, using SNI to identify the nodes/cluster behind HAProxy. Relates #49067
This commit is contained in:
parent
493f77d076
commit
39b4710081
|
@ -0,0 +1,109 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
import org.elasticsearch.gradle.testfixtures.TestFixturesPlugin
|
||||||
|
|
||||||
|
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||||
|
apply plugin: 'elasticsearch.test.fixtures'
|
||||||
|
|
||||||
|
testFixtures.useFixture()
|
||||||
|
|
||||||
|
configurations {
|
||||||
|
restSpec
|
||||||
|
}
|
||||||
|
|
||||||
|
dependencies {
|
||||||
|
restSpec project(':rest-api-spec')
|
||||||
|
testCompile project(':client:rest-high-level')
|
||||||
|
}
|
||||||
|
|
||||||
|
task copyKeystore(type: Sync) {
|
||||||
|
from project(':x-pack:plugin:core')
|
||||||
|
.file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks')
|
||||||
|
into "${buildDir}/certs"
|
||||||
|
doLast {
|
||||||
|
file("${buildDir}/certs").setReadable(true, false)
|
||||||
|
file("${buildDir}/certs/testnode.jks").setReadable(true, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
preProcessFixture {
|
||||||
|
if (TestFixturesPlugin.dockerComposeSupported()) {
|
||||||
|
if ('default'.equalsIgnoreCase(System.getProperty('tests.distribution', 'default'))) {
|
||||||
|
dependsOn ":distribution:docker:buildDockerImage"
|
||||||
|
} else {
|
||||||
|
dependsOn ":distribution:docker:buildOssDockerImage"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dependsOn copyKeystore
|
||||||
|
doLast {
|
||||||
|
// tests expect to have an empty repo
|
||||||
|
project.delete(
|
||||||
|
"${buildDir}/repo",
|
||||||
|
"${buildDir}/oss-repo"
|
||||||
|
)
|
||||||
|
createAndSetWritable(
|
||||||
|
"${buildDir}/repo",
|
||||||
|
"${buildDir}/oss-repo",
|
||||||
|
"${buildDir}/logs/default-1",
|
||||||
|
"${buildDir}/logs/default-2",
|
||||||
|
"${buildDir}/logs/oss-1",
|
||||||
|
"${buildDir}/logs/oss-2"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (TestFixturesPlugin.dockerComposeSupported()) {
|
||||||
|
dockerCompose {
|
||||||
|
tcpPortsToIgnoreWhenWaiting = [9600, 9601]
|
||||||
|
if ('default'.equalsIgnoreCase(System.getProperty('tests.distribution', 'default'))) {
|
||||||
|
useComposeFiles = ['docker-compose.yml']
|
||||||
|
} else {
|
||||||
|
useComposeFiles = ['docker-compose-oss.yml']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def createAndSetWritable(Object... locations) {
|
||||||
|
locations.each { location ->
|
||||||
|
File file = file(location)
|
||||||
|
file.mkdirs()
|
||||||
|
file.setWritable(true, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
processTestResources {
|
||||||
|
from({ zipTree(configurations.restSpec.singleFile) }) {
|
||||||
|
include 'rest-api-spec/api/**'
|
||||||
|
}
|
||||||
|
from project(':x-pack:plugin:core')
|
||||||
|
.file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks')
|
||||||
|
dependsOn configurations.restSpec
|
||||||
|
}
|
||||||
|
|
||||||
|
task integTest(type: Test) {
|
||||||
|
outputs.doNotCacheIf('Build cache is disabled for Docker tests') { true }
|
||||||
|
maxParallelForks = '1'
|
||||||
|
include '**/*IT.class'
|
||||||
|
// don't add the tasks to build the docker images if we have no way of testing them
|
||||||
|
if (TestFixturesPlugin.dockerComposeSupported()) {
|
||||||
|
dependsOn ":distribution:docker:buildDockerImage"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
check.dependsOn integTest
|
|
@ -0,0 +1,76 @@
|
||||||
|
# Only used for testing the docker images
|
||||||
|
version: '3.7'
|
||||||
|
services:
|
||||||
|
elasticsearch-oss-1:
|
||||||
|
image: elasticsearch:test
|
||||||
|
environment:
|
||||||
|
- node.name=elasticsearch-oss-1
|
||||||
|
- cluster.initial_master_nodes=elasticsearch-oss-1
|
||||||
|
- cluster.name=elasticsearch-oss-1
|
||||||
|
- bootstrap.memory_lock=true
|
||||||
|
- network.publish_host=127.0.0.1
|
||||||
|
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||||
|
- path.repo=/tmp/es-repo
|
||||||
|
- node.attr.testattr=test
|
||||||
|
- cluster.routing.allocation.disk.watermark.low=1b
|
||||||
|
- cluster.routing.allocation.disk.watermark.high=1b
|
||||||
|
- cluster.routing.allocation.disk.watermark.flood_stage=1b
|
||||||
|
- script.max_compilations_rate=2048/1m
|
||||||
|
- node.store.allow_mmap=false
|
||||||
|
volumes:
|
||||||
|
- ./build/oss-repo:/tmp/es-repo
|
||||||
|
- ./build/logs/oss-1:/usr/share/elasticsearch/logs
|
||||||
|
ports:
|
||||||
|
- "9200"
|
||||||
|
- "9300"
|
||||||
|
ulimits:
|
||||||
|
memlock:
|
||||||
|
soft: -1
|
||||||
|
hard: -1
|
||||||
|
nofile:
|
||||||
|
soft: 65536
|
||||||
|
hard: 65536
|
||||||
|
healthcheck:
|
||||||
|
start_period: 15s
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:9200"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 2s
|
||||||
|
retries: 5
|
||||||
|
elasticsearch-oss-2:
|
||||||
|
image: elasticsearch:test
|
||||||
|
environment:
|
||||||
|
- node.name=elasticsearch-oss-2
|
||||||
|
- cluster.initial_master_nodes=elasticsearch-oss-2
|
||||||
|
- cluster.name=elasticsearch-oss-2
|
||||||
|
- bootstrap.memory_lock=true
|
||||||
|
- network.publish_host=127.0.0.1
|
||||||
|
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||||
|
- path.repo=/tmp/es-repo
|
||||||
|
- node.attr.testattr=test
|
||||||
|
- cluster.routing.allocation.disk.watermark.low=1b
|
||||||
|
- cluster.routing.allocation.disk.watermark.high=1b
|
||||||
|
- cluster.routing.allocation.disk.watermark.flood_stage=1b
|
||||||
|
- script.max_compilations_rate=2048/1m
|
||||||
|
- node.store.allow_mmap=false
|
||||||
|
volumes:
|
||||||
|
- ./build/oss-repo:/tmp/es-repo
|
||||||
|
- ./build/logs/oss-2:/usr/share/elasticsearch/logs
|
||||||
|
ports:
|
||||||
|
- "9200"
|
||||||
|
- "9300"
|
||||||
|
ulimits:
|
||||||
|
memlock:
|
||||||
|
soft: -1
|
||||||
|
hard: -1
|
||||||
|
healthcheck:
|
||||||
|
start_period: 15s
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:9200"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 2s
|
||||||
|
retries: 5
|
||||||
|
haproxy:
|
||||||
|
image: haproxy:2.1.2
|
||||||
|
ports:
|
||||||
|
- "9600"
|
||||||
|
volumes:
|
||||||
|
- ./haproxy-oss.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
|
@ -0,0 +1,109 @@
|
||||||
|
# Only used for testing the docker images
|
||||||
|
version: '3.7'
|
||||||
|
services:
|
||||||
|
elasticsearch-default-1:
|
||||||
|
image: elasticsearch:test
|
||||||
|
environment:
|
||||||
|
- node.name=elasticsearch-default-1
|
||||||
|
- cluster.initial_master_nodes=elasticsearch-default-1
|
||||||
|
- cluster.name=elasticsearch-default-1
|
||||||
|
- bootstrap.memory_lock=true
|
||||||
|
- network.publish_host=127.0.0.1
|
||||||
|
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||||
|
- path.repo=/tmp/es-repo
|
||||||
|
- node.attr.testattr=test
|
||||||
|
- cluster.routing.allocation.disk.watermark.low=1b
|
||||||
|
- cluster.routing.allocation.disk.watermark.high=1b
|
||||||
|
- cluster.routing.allocation.disk.watermark.flood_stage=1b
|
||||||
|
- script.max_compilations_rate=2048/1m
|
||||||
|
- node.store.allow_mmap=false
|
||||||
|
- xpack.security.enabled=true
|
||||||
|
- xpack.security.transport.ssl.enabled=true
|
||||||
|
- xpack.security.http.ssl.enabled=true
|
||||||
|
- xpack.security.authc.token.enabled=true
|
||||||
|
- xpack.security.audit.enabled=true
|
||||||
|
- xpack.security.authc.realms.file.file1.order=0
|
||||||
|
- xpack.security.authc.realms.native.native1.order=1
|
||||||
|
- xpack.security.transport.ssl.keystore.path=/usr/share/elasticsearch/config/testnode.jks
|
||||||
|
- xpack.security.http.ssl.keystore.path=/usr/share/elasticsearch/config/testnode.jks
|
||||||
|
- xpack.http.ssl.verification_mode=certificate
|
||||||
|
- xpack.security.transport.ssl.verification_mode=certificate
|
||||||
|
- xpack.license.self_generated.type=trial
|
||||||
|
volumes:
|
||||||
|
- ./build/repo:/tmp/es-repo
|
||||||
|
- ./build/certs/testnode.jks:/usr/share/elasticsearch/config/testnode.jks
|
||||||
|
- ./build/logs/default-1:/usr/share/elasticsearch/logs
|
||||||
|
- ./docker-test-entrypoint.sh:/docker-test-entrypoint.sh
|
||||||
|
ports:
|
||||||
|
- "9200"
|
||||||
|
- "9300"
|
||||||
|
ulimits:
|
||||||
|
memlock:
|
||||||
|
soft: -1
|
||||||
|
hard: -1
|
||||||
|
nofile:
|
||||||
|
soft: 65536
|
||||||
|
hard: 65536
|
||||||
|
entrypoint: /docker-test-entrypoint.sh
|
||||||
|
healthcheck:
|
||||||
|
start_period: 15s
|
||||||
|
test: ["CMD", "curl", "-f", "-u", "x_pack_rest_user:x-pack-test-password", "-k", "https://localhost:9200"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 2s
|
||||||
|
retries: 5
|
||||||
|
elasticsearch-default-2:
|
||||||
|
image: elasticsearch:test
|
||||||
|
environment:
|
||||||
|
- node.name=elasticsearch-default-2
|
||||||
|
- cluster.initial_master_nodes=elasticsearch-default-2
|
||||||
|
- cluster.name=elasticsearch-default-2
|
||||||
|
- bootstrap.memory_lock=true
|
||||||
|
- network.publish_host=127.0.0.1
|
||||||
|
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||||
|
- path.repo=/tmp/es-repo
|
||||||
|
- node.attr.testattr=test
|
||||||
|
- cluster.routing.allocation.disk.watermark.low=1b
|
||||||
|
- cluster.routing.allocation.disk.watermark.high=1b
|
||||||
|
- cluster.routing.allocation.disk.watermark.flood_stage=1b
|
||||||
|
- script.max_compilations_rate=2048/1m
|
||||||
|
- node.store.allow_mmap=false
|
||||||
|
- xpack.security.enabled=true
|
||||||
|
- xpack.security.transport.ssl.enabled=true
|
||||||
|
- xpack.security.http.ssl.enabled=true
|
||||||
|
- xpack.security.authc.token.enabled=true
|
||||||
|
- xpack.security.audit.enabled=true
|
||||||
|
- xpack.security.authc.realms.file.file1.order=0
|
||||||
|
- xpack.security.authc.realms.native.native1.order=1
|
||||||
|
- xpack.security.transport.ssl.keystore.path=/usr/share/elasticsearch/config/testnode.jks
|
||||||
|
- xpack.security.http.ssl.keystore.path=/usr/share/elasticsearch/config/testnode.jks
|
||||||
|
- xpack.http.ssl.verification_mode=certificate
|
||||||
|
- xpack.security.transport.ssl.verification_mode=certificate
|
||||||
|
- xpack.license.self_generated.type=trial
|
||||||
|
volumes:
|
||||||
|
- ./build/repo:/tmp/es-repo
|
||||||
|
- ./build/certs/testnode.jks:/usr/share/elasticsearch/config/testnode.jks
|
||||||
|
- ./build/logs/default-2:/usr/share/elasticsearch/logs
|
||||||
|
- ./docker-test-entrypoint.sh:/docker-test-entrypoint.sh
|
||||||
|
ports:
|
||||||
|
- "9200"
|
||||||
|
- "9300"
|
||||||
|
ulimits:
|
||||||
|
memlock:
|
||||||
|
soft: -1
|
||||||
|
hard: -1
|
||||||
|
nofile:
|
||||||
|
soft: 65536
|
||||||
|
hard: 65536
|
||||||
|
entrypoint: /docker-test-entrypoint.sh
|
||||||
|
healthcheck:
|
||||||
|
start_period: 15s
|
||||||
|
test: ["CMD", "curl", "-f", "-u", "x_pack_rest_user:x-pack-test-password", "-k", "https://localhost:9200"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 2s
|
||||||
|
retries: 5
|
||||||
|
haproxy:
|
||||||
|
image: haproxy:2.1.2
|
||||||
|
ports:
|
||||||
|
- "9600"
|
||||||
|
volumes:
|
||||||
|
- ./haproxy-default.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
|
@ -0,0 +1,7 @@
|
||||||
|
#!/bin/bash
|
||||||
|
cd /usr/share/elasticsearch/bin/
|
||||||
|
./elasticsearch-users useradd x_pack_rest_user -p x-pack-test-password -r superuser || true
|
||||||
|
echo "testnode" > /tmp/password
|
||||||
|
cat /tmp/password | ./elasticsearch-keystore add -x -f -v 'xpack.security.transport.ssl.keystore.secure_password'
|
||||||
|
cat /tmp/password | ./elasticsearch-keystore add -x -f -v 'xpack.security.http.ssl.keystore.secure_password'
|
||||||
|
/usr/local/bin/docker-entrypoint.sh | tee > /usr/share/elasticsearch/logs/console.log
|
|
@ -0,0 +1,27 @@
|
||||||
|
global
|
||||||
|
log localhost local1 notice
|
||||||
|
maxconn 2000
|
||||||
|
daemon
|
||||||
|
|
||||||
|
defaults
|
||||||
|
log global
|
||||||
|
mode tcp
|
||||||
|
retries 3
|
||||||
|
option tcpka
|
||||||
|
option tcp-check
|
||||||
|
timeout client 30s
|
||||||
|
timeout server 30s
|
||||||
|
timeout connect 5s
|
||||||
|
|
||||||
|
frontend ft_ssl
|
||||||
|
bind *:9600
|
||||||
|
tcp-request inspect-delay 5s
|
||||||
|
tcp-request content accept if { req_ssl_hello_type 1 }
|
||||||
|
default_backend bk_ssl
|
||||||
|
|
||||||
|
backend bk_ssl
|
||||||
|
use-server server1 if { req_ssl_sni -i application1.example.com }
|
||||||
|
server server1 elasticsearch-default-1:9300 weight 0 check
|
||||||
|
use-server server2 if { req_ssl_sni -i application2.example.com }
|
||||||
|
server server2 elasticsearch-default-2:9300 weight 0 check
|
||||||
|
server default elasticsearch-default-2:9300 check
|
|
@ -0,0 +1,21 @@
|
||||||
|
global
|
||||||
|
log localhost local1 notice
|
||||||
|
maxconn 2000
|
||||||
|
daemon
|
||||||
|
|
||||||
|
defaults
|
||||||
|
log global
|
||||||
|
mode tcp
|
||||||
|
retries 3
|
||||||
|
option tcpka
|
||||||
|
option tcp-check
|
||||||
|
timeout client 30s
|
||||||
|
timeout server 30s
|
||||||
|
timeout connect 5s
|
||||||
|
|
||||||
|
frontend ft_reg
|
||||||
|
bind *:9600
|
||||||
|
default_backend bk_reg
|
||||||
|
|
||||||
|
backend bk_reg
|
||||||
|
server default elasticsearch-oss-2:9300 check
|
|
@ -0,0 +1,192 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.cluster.remote.test;
|
||||||
|
|
||||||
|
import org.apache.http.HttpHost;
|
||||||
|
import org.elasticsearch.ElasticsearchException;
|
||||||
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||||
|
import org.elasticsearch.client.RequestOptions;
|
||||||
|
import org.elasticsearch.client.RestClient;
|
||||||
|
import org.elasticsearch.client.RestHighLevelClient;
|
||||||
|
import org.elasticsearch.common.CharArrays;
|
||||||
|
import org.elasticsearch.common.io.PathUtils;
|
||||||
|
import org.elasticsearch.common.settings.SecureString;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
|
import org.elasticsearch.core.internal.io.IOUtils;
|
||||||
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URISyntaxException;
|
||||||
|
import java.nio.CharBuffer;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Base64;
|
||||||
|
import java.util.Collections;
|
||||||
|
|
||||||
|
public abstract class AbstractMultiClusterRemoteTestCase extends ESRestTestCase {
|
||||||
|
|
||||||
|
private static final String USER = "x_pack_rest_user";
|
||||||
|
private static final String PASS = "x-pack-test-password";
|
||||||
|
private static final String KEYSTORE_PASS = "testnode";
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected boolean preserveClusterUponCompletion() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static RestHighLevelClient cluster1Client;
|
||||||
|
private static RestHighLevelClient cluster2Client;
|
||||||
|
private static boolean initialized = false;
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected String getTestRestCluster() {
|
||||||
|
return "localhost:" + getProperty("test.fixtures.elasticsearch-" + getDistribution() + "-1.tcp.9200");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void initClientsAndConfigureClusters() throws Exception {
|
||||||
|
if (initialized) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster1Client = buildClient("localhost:" + getProperty("test.fixtures.elasticsearch-" + getDistribution() + "-1.tcp.9200"));
|
||||||
|
cluster2Client = buildClient("localhost:" + getProperty("test.fixtures.elasticsearch-" + getDistribution() + "-2.tcp.9200"));
|
||||||
|
|
||||||
|
cluster1Client().cluster().health(new ClusterHealthRequest().waitForNodes("1").waitForYellowStatus(), RequestOptions.DEFAULT);
|
||||||
|
cluster2Client().cluster().health(new ClusterHealthRequest().waitForNodes("1").waitForYellowStatus(), RequestOptions.DEFAULT);
|
||||||
|
|
||||||
|
initialized = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected String getDistribution() {
|
||||||
|
String distribution = System.getProperty("tests.distribution", "default");
|
||||||
|
if (distribution.equals("oss") == false && distribution.equals("default") == false) {
|
||||||
|
throw new IllegalArgumentException("supported values for tests.distribution are oss or default but it was " + distribution);
|
||||||
|
}
|
||||||
|
return distribution;
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void destroyClients() throws IOException {
|
||||||
|
try {
|
||||||
|
IOUtils.close(cluster1Client, cluster2Client);
|
||||||
|
} finally {
|
||||||
|
cluster1Client = null;
|
||||||
|
cluster2Client = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected static RestHighLevelClient cluster1Client() {
|
||||||
|
return cluster1Client;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected static RestHighLevelClient cluster2Client() {
|
||||||
|
return cluster2Client;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class HighLevelClient extends RestHighLevelClient {
|
||||||
|
private HighLevelClient(RestClient restClient) {
|
||||||
|
super(restClient, RestClient::close, Collections.emptyList());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private RestHighLevelClient buildClient(final String url) throws IOException {
|
||||||
|
int portSeparator = url.lastIndexOf(':');
|
||||||
|
HttpHost httpHost = new HttpHost(url.substring(0, portSeparator),
|
||||||
|
Integer.parseInt(url.substring(portSeparator + 1)), getProtocol());
|
||||||
|
return new HighLevelClient(buildClient(restAdminSettings(), new HttpHost[]{httpHost}));
|
||||||
|
}
|
||||||
|
|
||||||
|
protected boolean isOss() {
|
||||||
|
return getDistribution().equals("oss");
|
||||||
|
}
|
||||||
|
|
||||||
|
static Path keyStore;
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void getKeyStore() {
|
||||||
|
try {
|
||||||
|
keyStore = PathUtils.get(AbstractMultiClusterRemoteTestCase.class.getResource("/testnode.jks").toURI());
|
||||||
|
} catch (URISyntaxException e) {
|
||||||
|
throw new ElasticsearchException("exception while reading the store", e);
|
||||||
|
}
|
||||||
|
if (Files.exists(keyStore) == false) {
|
||||||
|
throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void clearKeyStore() {
|
||||||
|
keyStore = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Settings restClientSettings() {
|
||||||
|
if (isOss()) {
|
||||||
|
return super.restClientSettings();
|
||||||
|
}
|
||||||
|
String token = basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray()));
|
||||||
|
return Settings.builder()
|
||||||
|
.put(ThreadContext.PREFIX + ".Authorization", token)
|
||||||
|
.put(ESRestTestCase.TRUSTSTORE_PATH, keyStore)
|
||||||
|
.put(ESRestTestCase.TRUSTSTORE_PASSWORD, KEYSTORE_PASS)
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected String getProtocol() {
|
||||||
|
if (isOss()) {
|
||||||
|
return "http";
|
||||||
|
}
|
||||||
|
return "https";
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String basicAuthHeaderValue(String username, SecureString passwd) {
|
||||||
|
CharBuffer chars = CharBuffer.allocate(username.length() + passwd.length() + 1);
|
||||||
|
byte[] charBytes = null;
|
||||||
|
try {
|
||||||
|
chars.put(username).put(':').put(passwd.getChars());
|
||||||
|
charBytes = CharArrays.toUtf8Bytes(chars.array());
|
||||||
|
|
||||||
|
//TODO we still have passwords in Strings in headers. Maybe we can look into using a CharSequence?
|
||||||
|
String basicToken = Base64.getEncoder().encodeToString(charBytes);
|
||||||
|
return "Basic " + basicToken;
|
||||||
|
} finally {
|
||||||
|
Arrays.fill(chars.array(), (char) 0);
|
||||||
|
if (charBytes != null) {
|
||||||
|
Arrays.fill(charBytes, (byte) 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getProperty(String key) {
|
||||||
|
String value = System.getProperty(key);
|
||||||
|
if (value == null) {
|
||||||
|
throw new IllegalStateException("Could not find system properties from test.fixtures. " +
|
||||||
|
"This test expects to run with the elasticsearch.test.fixtures Gradle plugin");
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,155 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.cluster.remote.test;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||||
|
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||||
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
|
import org.elasticsearch.action.search.SearchRequest;
|
||||||
|
import org.elasticsearch.action.support.WriteRequest;
|
||||||
|
import org.elasticsearch.client.RequestOptions;
|
||||||
|
import org.elasticsearch.client.cluster.RemoteConnectionInfo;
|
||||||
|
import org.elasticsearch.client.cluster.RemoteInfoRequest;
|
||||||
|
import org.elasticsearch.client.indices.CreateIndexRequest;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Before;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
import static org.junit.Assume.assumeThat;
|
||||||
|
|
||||||
|
public class RemoteClustersIT extends AbstractMultiClusterRemoteTestCase {
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setupIndices() throws IOException {
|
||||||
|
assertTrue(cluster1Client().indices().create(new CreateIndexRequest("test1").settings(Settings.builder()
|
||||||
|
.put("index.number_of_replicas", 0).build()), RequestOptions.DEFAULT).isAcknowledged());
|
||||||
|
cluster1Client().index(new IndexRequest("test1").id("id1").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
|
||||||
|
.source(XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject()), RequestOptions.DEFAULT);
|
||||||
|
assertTrue(cluster2Client().indices().create(new CreateIndexRequest("test2").settings(Settings.builder()
|
||||||
|
.put("index.number_of_replicas", 0).build()), RequestOptions.DEFAULT).isAcknowledged());
|
||||||
|
cluster2Client().index(new IndexRequest("test2").id("id1")
|
||||||
|
.source(XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject()), RequestOptions.DEFAULT);
|
||||||
|
cluster2Client().index(new IndexRequest("test2").id("id2").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
|
||||||
|
.source(XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject()), RequestOptions.DEFAULT);
|
||||||
|
assertEquals(1L, cluster1Client().search(new SearchRequest("test1"), RequestOptions.DEFAULT).getHits().getTotalHits().value);
|
||||||
|
assertEquals(2L, cluster2Client().search(new SearchRequest("test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value);
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void clearIndices() throws IOException {
|
||||||
|
assertTrue(cluster1Client().indices().delete(new DeleteIndexRequest("*"), RequestOptions.DEFAULT).isAcknowledged());
|
||||||
|
assertTrue(cluster2Client().indices().delete(new DeleteIndexRequest("*"), RequestOptions.DEFAULT).isAcknowledged());
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void clearRemoteClusterSettings() throws IOException {
|
||||||
|
ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest().persistentSettings(
|
||||||
|
Settings.builder().putNull("cluster.remote.*").build());
|
||||||
|
assertTrue(cluster1Client().cluster().putSettings(request, RequestOptions.DEFAULT).isAcknowledged());
|
||||||
|
assertTrue(cluster2Client().cluster().putSettings(request, RequestOptions.DEFAULT).isAcknowledged());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testProxyModeConnectionWorks() throws IOException {
|
||||||
|
String cluster2RemoteClusterSeed = "elasticsearch-" + getDistribution() + "-2:9300";
|
||||||
|
logger.info("Configuring remote cluster [{}]", cluster2RemoteClusterSeed);
|
||||||
|
ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest().persistentSettings(Settings.builder()
|
||||||
|
.put("cluster.remote.cluster2.mode", "proxy")
|
||||||
|
.put("cluster.remote.cluster2.proxy_address", cluster2RemoteClusterSeed)
|
||||||
|
.build());
|
||||||
|
assertTrue(cluster1Client().cluster().putSettings(request, RequestOptions.DEFAULT).isAcknowledged());
|
||||||
|
|
||||||
|
RemoteConnectionInfo rci = cluster1Client().cluster().remoteInfo(new RemoteInfoRequest(), RequestOptions.DEFAULT).getInfos().get(0);
|
||||||
|
logger.info("Connection info: {}", rci);
|
||||||
|
assertTrue(rci.isConnected());
|
||||||
|
|
||||||
|
assertEquals(2L, cluster1Client().search(
|
||||||
|
new SearchRequest("cluster2:test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSniffModeConnectionFails() throws IOException {
|
||||||
|
String cluster2RemoteClusterSeed = "elasticsearch-" + getDistribution() + "-2:9300";
|
||||||
|
logger.info("Configuring remote cluster [{}]", cluster2RemoteClusterSeed);
|
||||||
|
ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest().persistentSettings(Settings.builder()
|
||||||
|
.put("cluster.remote.cluster2alt.mode", "sniff")
|
||||||
|
.put("cluster.remote.cluster2alt.seeds", cluster2RemoteClusterSeed)
|
||||||
|
.build());
|
||||||
|
assertTrue(cluster1Client().cluster().putSettings(request, RequestOptions.DEFAULT).isAcknowledged());
|
||||||
|
|
||||||
|
RemoteConnectionInfo rci = cluster1Client().cluster().remoteInfo(new RemoteInfoRequest(), RequestOptions.DEFAULT).getInfos().get(0);
|
||||||
|
logger.info("Connection info: {}", rci);
|
||||||
|
assertFalse(rci.isConnected());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testHAProxyModeConnectionWorks() throws IOException {
|
||||||
|
String proxyAddress = "haproxy:9600";
|
||||||
|
logger.info("Configuring remote cluster [{}]", proxyAddress);
|
||||||
|
ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest().persistentSettings(Settings.builder()
|
||||||
|
.put("cluster.remote.haproxynosn.mode", "proxy")
|
||||||
|
.put("cluster.remote.haproxynosn.proxy_address", proxyAddress)
|
||||||
|
.build());
|
||||||
|
assertTrue(cluster1Client().cluster().putSettings(request, RequestOptions.DEFAULT).isAcknowledged());
|
||||||
|
|
||||||
|
RemoteConnectionInfo rci = cluster1Client().cluster().remoteInfo(new RemoteInfoRequest(), RequestOptions.DEFAULT).getInfos().get(0);
|
||||||
|
logger.info("Connection info: {}", rci);
|
||||||
|
assertTrue(rci.isConnected());
|
||||||
|
|
||||||
|
assertEquals(2L, cluster1Client().search(
|
||||||
|
new SearchRequest("haproxynosn:test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testHAProxyModeConnectionWithSNIToCluster1Works() throws IOException {
|
||||||
|
assumeThat("test is only supported if the distribution contains xpack", getDistribution(), equalTo("default"));
|
||||||
|
|
||||||
|
ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest().persistentSettings(Settings.builder()
|
||||||
|
.put("cluster.remote.haproxysni1.mode", "proxy")
|
||||||
|
.put("cluster.remote.haproxysni1.proxy_address", "haproxy:9600")
|
||||||
|
.put("cluster.remote.haproxysni1.server_name", "application1.example.com")
|
||||||
|
.build());
|
||||||
|
assertTrue(cluster2Client().cluster().putSettings(request, RequestOptions.DEFAULT).isAcknowledged());
|
||||||
|
|
||||||
|
RemoteConnectionInfo rci = cluster2Client().cluster().remoteInfo(new RemoteInfoRequest(), RequestOptions.DEFAULT).getInfos().get(0);
|
||||||
|
logger.info("Connection info: {}", rci);
|
||||||
|
assertTrue(rci.isConnected());
|
||||||
|
|
||||||
|
assertEquals(1L, cluster2Client().search(
|
||||||
|
new SearchRequest("haproxysni1:test1"), RequestOptions.DEFAULT).getHits().getTotalHits().value);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testHAProxyModeConnectionWithSNIToCluster2Works() throws IOException {
|
||||||
|
assumeThat("test is only supported if the distribution contains xpack", getDistribution(), equalTo("default"));
|
||||||
|
|
||||||
|
ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest().persistentSettings(Settings.builder()
|
||||||
|
.put("cluster.remote.haproxysni2.mode", "proxy")
|
||||||
|
.put("cluster.remote.haproxysni2.proxy_address", "haproxy:9600")
|
||||||
|
.put("cluster.remote.haproxysni2.server_name", "application2.example.com")
|
||||||
|
.build());
|
||||||
|
assertTrue(cluster1Client().cluster().putSettings(request, RequestOptions.DEFAULT).isAcknowledged());
|
||||||
|
|
||||||
|
RemoteConnectionInfo rci = cluster1Client().cluster().remoteInfo(new RemoteInfoRequest(), RequestOptions.DEFAULT).getInfos().get(0);
|
||||||
|
logger.info("Connection info: {}", rci);
|
||||||
|
assertTrue(rci.isConnected());
|
||||||
|
|
||||||
|
assertEquals(2L, cluster1Client().search(
|
||||||
|
new SearchRequest("haproxysni2:test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value);
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue