Remove deprecated transport client (#1781)
This commit removes all usage of transport client in OpenSearch. Signed-off-by: Rabi Panda <adnapibar@gmail.com>
This commit is contained in:
parent
24d0c9b818
commit
7c73ed8d4c
|
@ -260,7 +260,7 @@ yamlRestTest’s and javaRestTest’s are easy to identify, since they are found
|
|||
|
||||
If in doubt about which command to use, simply run <gradle path>:check
|
||||
|
||||
Note that the REST tests, like all the integration tests, can be run against an external cluster by specifying the `tests.cluster` property, which if present needs to contain a comma separated list of nodes to connect to (e.g. localhost:9300). A transport client will be created based on that and used for all the before|after test operations, and to extract the http addresses of the nodes so that REST requests can be sent to them.
|
||||
Note that the REST tests, like all the integration tests, can be run against an external cluster by specifying the `tests.cluster` property, which if present needs to contain a comma separated list of nodes to connect to (e.g. localhost:9300).
|
||||
|
||||
# Testing packaging
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ import static java.util.Collections.emptyList;
|
|||
@State(Scope.Thread)
|
||||
@Fork(value = 1)
|
||||
public class TermsReduceBenchmark {
|
||||
private final SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
private final SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
|
||||
private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
private final SearchPhaseController controller = new SearchPhaseController(
|
||||
namedWriteableRegistry,
|
||||
|
|
|
@ -34,7 +34,7 @@ gradlew -p client/benchmark run --args ' rest bulk localhost build/documents-2.j
|
|||
|
||||
The parameters are all in the `'`s and are in order:
|
||||
|
||||
* Client type: Use either "rest" or "transport"
|
||||
* Client type: Use "rest"
|
||||
* Benchmark type: Use either "bulk" or "search"
|
||||
* Benchmark target host IP (the host where OpenSearch is running)
|
||||
* full path to the file that should be bulk indexed
|
||||
|
@ -54,9 +54,9 @@ Example invocation:
|
|||
|
||||
The parameters are in order:
|
||||
|
||||
* Client type: Use either "rest" or "transport"
|
||||
* Client type: Use "rest"
|
||||
* Benchmark type: Use either "bulk" or "search"
|
||||
* Benchmark target host IP (the host where OpenSearch is running)
|
||||
* name of the index
|
||||
* a search request body (remember to escape double quotes). The `TransportClientBenchmark` uses `QueryBuilders.wrapperQuery()` internally which automatically adds a root key `query`, so it must not be present in the command line parameter.
|
||||
* a search request body (remember to escape double quotes).
|
||||
* A comma-separated list of target throughput rates
|
||||
|
|
|
@ -48,13 +48,7 @@ dependencies {
|
|||
api project(":client:rest")
|
||||
// bottleneck should be the client, not OpenSearch
|
||||
api project(path: ':client:client-benchmark-noop-api-plugin')
|
||||
// for transport client
|
||||
api project(":server")
|
||||
api project(":client:transport")
|
||||
api project(':modules:transport-netty4')
|
||||
api project(':modules:reindex')
|
||||
api project(':modules:lang-mustache')
|
||||
api project(':modules:percolator')
|
||||
}
|
||||
|
||||
// No licenses for our benchmark deps (we don't ship benchmarks)
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
package org.opensearch.client.benchmark;
|
||||
|
||||
import org.opensearch.client.benchmark.rest.RestClientBenchmark;
|
||||
import org.opensearch.client.benchmark.transport.TransportClientBenchmark;
|
||||
import org.opensearch.common.SuppressForbidden;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -42,14 +41,9 @@ public class BenchmarkMain {
|
|||
public static void main(String[] args) throws Exception {
|
||||
String type = args[0];
|
||||
AbstractBenchmark<?> benchmark = null;
|
||||
switch (type) {
|
||||
case "transport":
|
||||
benchmark = new TransportClientBenchmark();
|
||||
break;
|
||||
case "rest":
|
||||
if ("rest".equals(type)) {
|
||||
benchmark = new RestClientBenchmark();
|
||||
break;
|
||||
default:
|
||||
} else {
|
||||
System.err.println("Unknown client type [" + type + "]");
|
||||
System.exit(1);
|
||||
}
|
||||
|
|
|
@ -1,140 +0,0 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* The OpenSearch Contributors require contributions made to
|
||||
* this file be licensed under the Apache-2.0 license or a
|
||||
* compatible open source license.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
/*
|
||||
* Modifications Copyright OpenSearch Contributors. See
|
||||
* GitHub history for details.
|
||||
*/
|
||||
|
||||
package org.opensearch.client.benchmark.transport;
|
||||
|
||||
import org.opensearch.OpenSearchException;
|
||||
import org.opensearch.action.bulk.BulkRequest;
|
||||
import org.opensearch.action.bulk.BulkResponse;
|
||||
import org.opensearch.action.index.IndexRequest;
|
||||
import org.opensearch.action.search.SearchRequest;
|
||||
import org.opensearch.action.search.SearchResponse;
|
||||
import org.opensearch.client.benchmark.AbstractBenchmark;
|
||||
import org.opensearch.client.benchmark.ops.bulk.BulkRequestExecutor;
|
||||
import org.opensearch.client.benchmark.ops.search.SearchRequestExecutor;
|
||||
import org.opensearch.client.transport.TransportClient;
|
||||
import org.opensearch.common.settings.Settings;
|
||||
import org.opensearch.common.transport.TransportAddress;
|
||||
import org.opensearch.common.xcontent.XContentType;
|
||||
import org.opensearch.index.query.QueryBuilders;
|
||||
import org.opensearch.plugin.noop.NoopPlugin;
|
||||
import org.opensearch.plugin.noop.action.bulk.NoopBulkAction;
|
||||
import org.opensearch.plugin.noop.action.search.NoopSearchAction;
|
||||
import org.opensearch.rest.RestStatus;
|
||||
import org.opensearch.search.builder.SearchSourceBuilder;
|
||||
import org.opensearch.transport.client.PreBuiltTransportClient;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
public final class TransportClientBenchmark extends AbstractBenchmark<TransportClient> {
|
||||
public static void main(String[] args) throws Exception {
|
||||
TransportClientBenchmark benchmark = new TransportClientBenchmark();
|
||||
benchmark.run(args);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TransportClient client(String benchmarkTargetHost) throws Exception {
|
||||
TransportClient client = new PreBuiltTransportClient(Settings.EMPTY, NoopPlugin.class);
|
||||
client.addTransportAddress(new TransportAddress(InetAddress.getByName(benchmarkTargetHost), 9300));
|
||||
return client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BulkRequestExecutor bulkRequestExecutor(TransportClient client, String indexName, String typeName) {
|
||||
return new TransportBulkRequestExecutor(client, indexName, typeName);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SearchRequestExecutor searchRequestExecutor(TransportClient client, String indexName) {
|
||||
return new TransportSearchRequestExecutor(client, indexName);
|
||||
}
|
||||
|
||||
private static final class TransportBulkRequestExecutor implements BulkRequestExecutor {
|
||||
private final TransportClient client;
|
||||
private final String indexName;
|
||||
private final String typeName;
|
||||
|
||||
TransportBulkRequestExecutor(TransportClient client, String indexName, String typeName) {
|
||||
this.client = client;
|
||||
this.indexName = indexName;
|
||||
this.typeName = typeName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean bulkIndex(List<String> bulkData) {
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
for (String bulkItem : bulkData) {
|
||||
bulkRequest.add(new IndexRequest(indexName, typeName).source(bulkItem.getBytes(StandardCharsets.UTF_8), XContentType.JSON));
|
||||
}
|
||||
BulkResponse bulkResponse;
|
||||
try {
|
||||
bulkResponse = client.execute(NoopBulkAction.INSTANCE, bulkRequest).get();
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return false;
|
||||
} catch (ExecutionException e) {
|
||||
throw new OpenSearchException(e);
|
||||
}
|
||||
return !bulkResponse.hasFailures();
|
||||
}
|
||||
}
|
||||
|
||||
private static final class TransportSearchRequestExecutor implements SearchRequestExecutor {
|
||||
private final TransportClient client;
|
||||
private final String indexName;
|
||||
|
||||
private TransportSearchRequestExecutor(TransportClient client, String indexName) {
|
||||
this.client = client;
|
||||
this.indexName = indexName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean search(String source) {
|
||||
final SearchResponse response;
|
||||
try {
|
||||
final SearchRequest searchRequest = new SearchRequest(indexName);
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||
searchRequest.source(searchSourceBuilder);
|
||||
searchSourceBuilder.query(QueryBuilders.wrapperQuery(source));
|
||||
response = client.execute(NoopSearchAction.INSTANCE, searchRequest).get();
|
||||
return response.status() == RestStatus.OK;
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return false;
|
||||
} catch (ExecutionException e) {
|
||||
throw new OpenSearchException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -547,7 +547,6 @@ final class IndicesRequestConverters {
|
|||
*/
|
||||
@Deprecated
|
||||
static Request indicesExist(org.opensearch.action.admin.indices.get.GetIndexRequest getIndexRequest) {
|
||||
// this can be called with no indices as argument by transport client, not via REST though
|
||||
if (getIndexRequest.indices() == null || getIndexRequest.indices().length == 0) {
|
||||
throw new IllegalArgumentException("indices are mandatory");
|
||||
}
|
||||
|
@ -565,7 +564,6 @@ final class IndicesRequestConverters {
|
|||
}
|
||||
|
||||
static Request indicesExist(GetIndexRequest getIndexRequest) {
|
||||
// this can be called with no indices as argument by transport client, not via REST though
|
||||
if (getIndexRequest.indices() == null || getIndexRequest.indices().length == 0) {
|
||||
throw new IllegalArgumentException("indices are mandatory");
|
||||
}
|
||||
|
|
|
@ -176,7 +176,7 @@ public abstract class OpenSearchRestHighLevelClientTestCase extends OpenSearchRe
|
|||
|
||||
private static class HighLevelClient extends RestHighLevelClient {
|
||||
private HighLevelClient(RestClient restClient) {
|
||||
super(restClient, (client) -> {}, new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedXContents());
|
||||
super(restClient, (client) -> {}, new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ public class CountRequestTests extends AbstractRequestTestCase<CountRequest, Que
|
|||
|
||||
@Override
|
||||
protected NamedXContentRegistry xContentRegistry() {
|
||||
return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedXContents());
|
||||
return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents());
|
||||
}
|
||||
|
||||
public void testIllegalArguments() {
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* The OpenSearch Contributors require contributions made to
|
||||
* this file be licensed under the Apache-2.0 license or a
|
||||
* compatible open source license.
|
||||
*
|
||||
* Modifications Copyright OpenSearch Contributors. See
|
||||
* GitHub history for details.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
apply plugin: 'opensearch.build'
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
|
||||
group = 'org.opensearch.client'
|
||||
|
||||
dependencies {
|
||||
api project(":server")
|
||||
api project(":modules:transport-netty4")
|
||||
api project(":modules:reindex")
|
||||
api project(":modules:lang-mustache")
|
||||
api project(":modules:percolator")
|
||||
api project(":modules:parent-join")
|
||||
api project(":modules:rank-eval")
|
||||
testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
testImplementation "junit:junit:${versions.junit}"
|
||||
testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}"
|
||||
}
|
||||
|
||||
forbiddenApisTest {
|
||||
// we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to
|
||||
// be pulled in
|
||||
replaceSignatureFiles 'jdk-signatures', 'opensearch-all-signatures'
|
||||
}
|
||||
|
||||
testingConventions {
|
||||
naming.clear()
|
||||
naming {
|
||||
Tests {
|
||||
baseClass 'com.carrotsearch.randomizedtesting.RandomizedTest'
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,159 +0,0 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* The OpenSearch Contributors require contributions made to
|
||||
* this file be licensed under the Apache-2.0 license or a
|
||||
* compatible open source license.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Modifications Copyright OpenSearch Contributors. See
|
||||
* GitHub history for details.
|
||||
*/
|
||||
|
||||
package org.opensearch.transport.client;
|
||||
|
||||
import io.netty.util.ThreadDeathWatcher;
|
||||
import io.netty.util.concurrent.GlobalEventExecutor;
|
||||
|
||||
import org.opensearch.client.transport.TransportClient;
|
||||
import org.opensearch.common.SuppressForbidden;
|
||||
import org.opensearch.common.network.NetworkModule;
|
||||
import org.opensearch.common.settings.Settings;
|
||||
import org.opensearch.index.reindex.ReindexPlugin;
|
||||
import org.opensearch.join.ParentJoinPlugin;
|
||||
import org.opensearch.percolator.PercolatorPlugin;
|
||||
import org.opensearch.plugins.Plugin;
|
||||
import org.opensearch.script.mustache.MustachePlugin;
|
||||
import org.opensearch.transport.Netty4Plugin;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* A builder to create an instance of {@link TransportClient}. This class pre-installs the
|
||||
* {@link Netty4Plugin},
|
||||
* {@link ReindexPlugin},
|
||||
* {@link PercolatorPlugin},
|
||||
* {@link MustachePlugin},
|
||||
* {@link ParentJoinPlugin}
|
||||
* plugins for the client. These plugins are all the required modules for OpenSearch.
|
||||
*
|
||||
* @deprecated {@link TransportClient} is deprecated in favour of the High Level REST client.
|
||||
*/
|
||||
@SuppressWarnings({ "unchecked", "varargs" })
|
||||
@Deprecated
|
||||
public class PreBuiltTransportClient extends TransportClient {
|
||||
|
||||
static {
|
||||
// initialize Netty system properties before triggering any Netty class loads
|
||||
initializeNetty();
|
||||
}
|
||||
|
||||
/**
|
||||
* Netty wants to do some unwelcome things like use unsafe and replace a private field, or use a poorly considered buffer recycler. This
|
||||
* method disables these things by default, but can be overridden by setting the corresponding system properties.
|
||||
*/
|
||||
private static void initializeNetty() {
|
||||
/*
|
||||
* We disable three pieces of Netty functionality here:
|
||||
* - we disable Netty from being unsafe
|
||||
* - we disable Netty from replacing the selector key set
|
||||
* - we disable Netty from using the recycler
|
||||
*
|
||||
* While permissions are needed to read and set these, the permissions needed here are innocuous and thus should simply be granted
|
||||
* rather than us handling a security exception here.
|
||||
*/
|
||||
setSystemPropertyIfUnset("io.netty.noUnsafe", Boolean.toString(true));
|
||||
setSystemPropertyIfUnset("io.netty.noKeySetOptimization", Boolean.toString(true));
|
||||
setSystemPropertyIfUnset("io.netty.recycler.maxCapacityPerThread", Integer.toString(0));
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "set system properties to configure Netty")
|
||||
private static void setSystemPropertyIfUnset(final String key, final String value) {
|
||||
final String currentValue = System.getProperty(key);
|
||||
if (currentValue == null) {
|
||||
System.setProperty(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
private static final Collection<Class<? extends Plugin>> PRE_INSTALLED_PLUGINS = Collections.unmodifiableList(
|
||||
Arrays.asList(Netty4Plugin.class, ReindexPlugin.class, PercolatorPlugin.class, MustachePlugin.class, ParentJoinPlugin.class)
|
||||
);
|
||||
|
||||
/**
|
||||
* Creates a new transport client with pre-installed plugins.
|
||||
*
|
||||
* @param settings the settings passed to this transport client
|
||||
* @param plugins an optional array of additional plugins to run with this client
|
||||
*/
|
||||
@SafeVarargs
|
||||
public PreBuiltTransportClient(Settings settings, Class<? extends Plugin>... plugins) {
|
||||
this(settings, Arrays.asList(plugins));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new transport client with pre-installed plugins.
|
||||
*
|
||||
* @param settings the settings passed to this transport client
|
||||
* @param plugins a collection of additional plugins to run with this client
|
||||
*/
|
||||
public PreBuiltTransportClient(Settings settings, Collection<Class<? extends Plugin>> plugins) {
|
||||
this(settings, plugins, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new transport client with pre-installed plugins.
|
||||
*
|
||||
* @param settings the settings passed to this transport client
|
||||
* @param plugins a collection of additional plugins to run with this client
|
||||
* @param hostFailureListener a failure listener that is invoked if a node is disconnected; this can be <code>null</code>
|
||||
*/
|
||||
public PreBuiltTransportClient(
|
||||
Settings settings,
|
||||
Collection<Class<? extends Plugin>> plugins,
|
||||
HostFailureListener hostFailureListener
|
||||
) {
|
||||
super(settings, Settings.EMPTY, addPlugins(plugins, PRE_INSTALLED_PLUGINS), hostFailureListener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
super.close();
|
||||
if (NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings) == false
|
||||
|| NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(Netty4Plugin.NETTY_TRANSPORT_NAME)) {
|
||||
try {
|
||||
GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
try {
|
||||
ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,84 +0,0 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* The OpenSearch Contributors require contributions made to
|
||||
* this file be licensed under the Apache-2.0 license or a
|
||||
* compatible open source license.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Modifications Copyright OpenSearch Contributors. See
|
||||
* GitHub history for details.
|
||||
*/
|
||||
|
||||
package org.opensearch.transport.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.RandomizedTest;
|
||||
import org.opensearch.client.transport.TransportClient;
|
||||
import org.opensearch.common.network.NetworkModule;
|
||||
import org.opensearch.common.settings.Settings;
|
||||
import org.opensearch.index.reindex.ReindexPlugin;
|
||||
import org.opensearch.join.ParentJoinPlugin;
|
||||
import org.opensearch.percolator.PercolatorPlugin;
|
||||
import org.opensearch.plugins.Plugin;
|
||||
import org.opensearch.script.mustache.MustachePlugin;
|
||||
import org.opensearch.transport.Netty4Plugin;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class PreBuiltTransportClientTests extends RandomizedTest {
|
||||
|
||||
@Test
|
||||
public void testPluginInstalled() {
|
||||
try (TransportClient client = new PreBuiltTransportClient(Settings.EMPTY)) {
|
||||
Settings settings = client.settings();
|
||||
assertEquals(Netty4Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
|
||||
assertEquals(Netty4Plugin.NETTY_TRANSPORT_NAME, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInstallPluginTwice() {
|
||||
for (Class<? extends Plugin> plugin : Arrays.asList(
|
||||
ParentJoinPlugin.class,
|
||||
ReindexPlugin.class,
|
||||
PercolatorPlugin.class,
|
||||
MustachePlugin.class
|
||||
)) {
|
||||
try {
|
||||
new PreBuiltTransportClient(Settings.EMPTY, plugin);
|
||||
fail("exception expected");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertTrue(
|
||||
"Expected message to start with [plugin already exists: ] but was instead [" + ex.getMessage() + "]",
|
||||
ex.getMessage().startsWith("plugin already exists: ")
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -93,7 +93,6 @@ configure([
|
|||
project(":client:client-benchmark-noop-api-plugin"),
|
||||
project(":client:rest-high-level"),
|
||||
project(":client:test"),
|
||||
project(":client:transport"),
|
||||
project(":distribution:tools:java-version-checker"),
|
||||
project(":distribution:tools:keystore-cli"),
|
||||
project(":distribution:tools:launchers"),
|
||||
|
|
|
@ -86,12 +86,12 @@ public class PainlessExecuteRequestTests extends AbstractWireSerializingTestCase
|
|||
|
||||
@Override
|
||||
protected NamedWriteableRegistry getNamedWriteableRegistry() {
|
||||
return new NamedWriteableRegistry(new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedWriteables());
|
||||
return new NamedWriteableRegistry(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedWriteables());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NamedXContentRegistry xContentRegistry() {
|
||||
return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedXContents());
|
||||
return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -70,13 +70,13 @@ public class QueryBuilderStoreTests extends OpenSearchTestCase {
|
|||
|
||||
@Override
|
||||
protected NamedWriteableRegistry writableRegistry() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
|
||||
return new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NamedXContentRegistry xContentRegistry() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
|
||||
return new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ public class RatedRequestsTests extends OpenSearchTestCase {
|
|||
@BeforeClass
|
||||
public static void init() {
|
||||
xContentRegistry = new NamedXContentRegistry(
|
||||
Stream.of(new SearchModule(Settings.EMPTY, false, emptyList()).getNamedXContents().stream())
|
||||
Stream.of(new SearchModule(Settings.EMPTY, emptyList()).getNamedXContents().stream())
|
||||
.flatMap(Function.identity())
|
||||
.collect(toList())
|
||||
);
|
||||
|
|
|
@ -58,7 +58,7 @@ import static org.hamcrest.Matchers.hasSize;
|
|||
* This test checks that in-flight requests are limited on HTTP level and that requests that are excluded from limiting can pass.
|
||||
*
|
||||
* As the same setting is also used to limit in-flight requests on transport level, we avoid transport messages by forcing
|
||||
* a single node "cluster". We also force test infrastructure to use the node client instead of the transport client for the same reason.
|
||||
* a single node "cluster".
|
||||
*/
|
||||
@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numClientNodes = 0, numDataNodes = 1)
|
||||
public class Netty4HttpRequestSizeLimitIT extends OpenSearchNetty4IntegTestCase {
|
||||
|
|
|
@ -42,7 +42,7 @@ import org.opensearch.test.OpenSearchIntegTestCase.Scope;
|
|||
import static org.hamcrest.CoreMatchers.is;
|
||||
|
||||
/**
|
||||
* Just an empty Node Start test to check eveything if fine when
|
||||
* Just an empty Node Start test to check everything if fine when
|
||||
* starting.
|
||||
* This test requires AWS to run.
|
||||
*/
|
||||
|
|
|
@ -209,7 +209,7 @@ public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase {
|
|||
assertEquals(201, rsp.getStatusLine().getStatusCode());
|
||||
}
|
||||
} else {
|
||||
NamedWriteableRegistry registry = new NamedWriteableRegistry(new SearchModule(Settings.EMPTY, false,
|
||||
NamedWriteableRegistry registry = new NamedWriteableRegistry(new SearchModule(Settings.EMPTY,
|
||||
Collections.emptyList()).getNamedWriteables());
|
||||
|
||||
for (int i = 0; i < CANDIDATES.size(); i++) {
|
||||
|
|
|
@ -1,64 +0,0 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* The OpenSearch Contributors require contributions made to
|
||||
* this file be licensed under the Apache-2.0 license or a
|
||||
* compatible open source license.
|
||||
*
|
||||
* Modifications Copyright OpenSearch Contributors. See
|
||||
* GitHub history for details.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.opensearch.gradle.test.RestIntegTestTask
|
||||
|
||||
apply plugin: 'opensearch.standalone-rest-test'
|
||||
apply plugin: 'opensearch.rest-test'
|
||||
|
||||
// TODO: this test works, but it isn't really a rest test...should we have another plugin for "non rest test that just needs N clusters?"
|
||||
|
||||
dependencies {
|
||||
testImplementation project(':client:transport') // randomly swapped in as a transport
|
||||
}
|
||||
|
||||
task singleNodeIntegTest(type: RestIntegTestTask) {
|
||||
mustRunAfter(precommit)
|
||||
}
|
||||
|
||||
testClusters.singleNodeIntegTest {
|
||||
setting 'discovery.type', 'single-node'
|
||||
}
|
||||
|
||||
integTest {
|
||||
dependsOn singleNodeIntegTest
|
||||
}
|
||||
|
||||
check.dependsOn(integTest)
|
||||
|
||||
|
||||
testingConventions {
|
||||
naming.clear()
|
||||
naming {
|
||||
IT {
|
||||
baseClass 'org.opensearch.smoketest.OpenSearchSmokeClientTestCase'
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,183 +0,0 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* The OpenSearch Contributors require contributions made to
|
||||
* this file be licensed under the Apache-2.0 license or a
|
||||
* compatible open source license.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Modifications Copyright OpenSearch Contributors. See
|
||||
* GitHub history for details.
|
||||
*/
|
||||
|
||||
package org.opensearch.smoketest;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.opensearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.opensearch.client.Client;
|
||||
import org.opensearch.client.transport.TransportClient;
|
||||
import org.opensearch.common.network.NetworkModule;
|
||||
import org.opensearch.common.settings.Settings;
|
||||
import org.opensearch.common.transport.TransportAddress;
|
||||
import org.opensearch.env.Environment;
|
||||
import org.opensearch.plugins.Plugin;
|
||||
import org.opensearch.transport.client.PreBuiltTransportClient;
|
||||
import org.opensearch.transport.nio.MockNioTransportPlugin;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URL;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
/**
|
||||
* An abstract base class to run integration tests against an OpenSearch cluster running outside of the test process.
|
||||
* <p>
|
||||
* You can define a list of transport addresses from where you can reach your cluster by setting "tests.cluster" system
|
||||
* property. It defaults to "localhost:9300". If you run this from `gradle integTest` then it will start the clsuter for
|
||||
* you and set up the property.
|
||||
* <p>
|
||||
* If you want to debug this module from your IDE, then start an external cluster by yourself, maybe with `gradle run`,
|
||||
* then run JUnit. If you changed the default port, set "-Dtests.cluster=localhost:PORT" when running your test.
|
||||
*/
|
||||
@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose")
|
||||
public abstract class OpenSearchSmokeClientTestCase extends LuceneTestCase {
|
||||
|
||||
/**
|
||||
* Key used to eventually switch to using an external cluster and provide its transport addresses
|
||||
*/
|
||||
public static final String TESTS_CLUSTER = "tests.cluster";
|
||||
|
||||
protected static final Logger logger = LogManager.getLogger(OpenSearchSmokeClientTestCase.class);
|
||||
|
||||
private static final AtomicInteger counter = new AtomicInteger();
|
||||
private static Client client;
|
||||
private static String clusterAddresses;
|
||||
protected String index;
|
||||
|
||||
private static Client startClient(Path tempDir, TransportAddress... transportAddresses) {
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put("node.name", "qa_smoke_client_" + counter.getAndIncrement())
|
||||
.put("client.transport.ignore_cluster_name", true)
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), tempDir);
|
||||
final Collection<Class<? extends Plugin>> plugins;
|
||||
String transportKey = MockNioTransportPlugin.MOCK_NIO_TRANSPORT_NAME;
|
||||
Class<? extends Plugin> transportPlugin = MockNioTransportPlugin.class;
|
||||
if (random().nextBoolean()) {
|
||||
builder.put(NetworkModule.TRANSPORT_TYPE_KEY, transportKey);
|
||||
plugins = Collections.singleton(transportPlugin);
|
||||
} else {
|
||||
plugins = Collections.emptyList();
|
||||
}
|
||||
TransportClient client = new PreBuiltTransportClient(builder.build(), plugins).addTransportAddresses(transportAddresses);
|
||||
|
||||
logger.info("--> OpenSearch Java TransportClient started");
|
||||
|
||||
Exception clientException = null;
|
||||
try {
|
||||
ClusterHealthResponse health = client.admin().cluster().prepareHealth().get();
|
||||
logger.info("--> connected to [{}] cluster which is running [{}] node(s).",
|
||||
health.getClusterName(), health.getNumberOfNodes());
|
||||
} catch (Exception e) {
|
||||
clientException = e;
|
||||
}
|
||||
|
||||
assumeNoException("Sounds like your cluster is not running at " + clusterAddresses, clientException);
|
||||
|
||||
return client;
|
||||
}
|
||||
|
||||
private static Client startClient() throws IOException {
|
||||
String[] stringAddresses = clusterAddresses.split(",");
|
||||
TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length];
|
||||
int i = 0;
|
||||
for (String stringAddress : stringAddresses) {
|
||||
URL url = new URL("http://" + stringAddress);
|
||||
InetAddress inetAddress = InetAddress.getByName(url.getHost());
|
||||
transportAddresses[i++] = new TransportAddress(new InetSocketAddress(inetAddress, url.getPort()));
|
||||
}
|
||||
return startClient(createTempDir(), transportAddresses);
|
||||
}
|
||||
|
||||
public static Client getClient() {
|
||||
if (client == null) {
|
||||
try {
|
||||
client = startClient();
|
||||
} catch (IOException e) {
|
||||
logger.error("can not start the client", e);
|
||||
}
|
||||
assertThat(client, notNullValue());
|
||||
}
|
||||
return client;
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void initializeSettings() {
|
||||
clusterAddresses = System.getProperty(TESTS_CLUSTER);
|
||||
if (clusterAddresses == null || clusterAddresses.isEmpty()) {
|
||||
fail("Must specify " + TESTS_CLUSTER + " for smoke client test");
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void stopTransportClient() {
|
||||
if (client != null) {
|
||||
client.close();
|
||||
client = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void defineIndexName() {
|
||||
doClean();
|
||||
index = "qa-smoke-test-client-" + randomAsciiOfLength(10).toLowerCase(Locale.getDefault());
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanIndex() {
|
||||
doClean();
|
||||
}
|
||||
|
||||
private void doClean() {
|
||||
if (client != null && index != null) {
|
||||
try {
|
||||
client.admin().indices().prepareDelete(index).get();
|
||||
} catch (Exception e) {
|
||||
// We ignore this cleanup exception
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* The OpenSearch Contributors require contributions made to
|
||||
* this file be licensed under the Apache-2.0 license or a
|
||||
* compatible open source license.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Modifications Copyright OpenSearch Contributors. See
|
||||
* GitHub history for details.
|
||||
*/
|
||||
|
||||
package org.opensearch.smoketest;
|
||||
|
||||
import org.opensearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.opensearch.action.search.SearchResponse;
|
||||
import org.opensearch.client.Client;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.is;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
|
||||
public class SmokeTestClientIT extends OpenSearchSmokeClientTestCase {
|
||||
|
||||
/**
|
||||
* Check that we are connected to a cluster named "opensearch".
|
||||
*/
|
||||
public void testSimpleClient() {
|
||||
final Client client = getClient();
|
||||
|
||||
// START SNIPPET: java-doc-admin-cluster-health
|
||||
final ClusterHealthResponse health =
|
||||
client.admin().cluster().prepareHealth().setWaitForYellowStatus().get();
|
||||
final String clusterName = health.getClusterName();
|
||||
final int numberOfNodes = health.getNumberOfNodes();
|
||||
// END SNIPPET: java-doc-admin-cluster-health
|
||||
assertThat(
|
||||
"cluster [" + clusterName + "] should have at least 1 node",
|
||||
numberOfNodes,
|
||||
greaterThan(0));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an index and index some docs
|
||||
*/
|
||||
public void testPutDocument() {
|
||||
final Client client = getClient();
|
||||
|
||||
// START SNIPPET: java-doc-index-doc-simple
|
||||
client.prepareIndex(index, "doc", "1") // Index, Type, Id
|
||||
.setSource("foo", "bar") // Simple document: { "foo" : "bar" }
|
||||
.get(); // Execute and wait for the result
|
||||
// END SNIPPET: java-doc-index-doc-simple
|
||||
|
||||
// START SNIPPET: java-doc-admin-indices-refresh
|
||||
// Prepare a refresh action on a given index, execute and wait for the result
|
||||
client.admin().indices().prepareRefresh(index).get();
|
||||
// END SNIPPET: java-doc-admin-indices-refresh
|
||||
|
||||
// START SNIPPET: java-doc-search-simple
|
||||
final SearchResponse searchResponse = client.prepareSearch(index).get();
|
||||
assertThat(searchResponse.getHits().getTotalHits().value, is(1L));
|
||||
// END SNIPPET: java-doc-search-simple
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -430,7 +430,6 @@ public class ActionModule extends AbstractModule {
|
|||
|
||||
private static final Logger logger = LogManager.getLogger(ActionModule.class);
|
||||
|
||||
private final boolean transportClient;
|
||||
private final Settings settings;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
|
@ -447,7 +446,6 @@ public class ActionModule extends AbstractModule {
|
|||
private final ThreadPool threadPool;
|
||||
|
||||
public ActionModule(
|
||||
boolean transportClient,
|
||||
Settings settings,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
IndexScopedSettings indexScopedSettings,
|
||||
|
@ -460,7 +458,6 @@ public class ActionModule extends AbstractModule {
|
|||
UsageService usageService,
|
||||
SystemIndices systemIndices
|
||||
) {
|
||||
this.transportClient = transportClient;
|
||||
this.settings = settings;
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
|
@ -470,9 +467,7 @@ public class ActionModule extends AbstractModule {
|
|||
this.threadPool = threadPool;
|
||||
actions = setupActions(actionPlugins);
|
||||
actionFilters = setupActionFilters(actionPlugins);
|
||||
autoCreateIndex = transportClient
|
||||
? null
|
||||
: new AutoCreateIndex(settings, clusterSettings, indexNameExpressionResolver, systemIndices);
|
||||
autoCreateIndex = new AutoCreateIndex(settings, clusterSettings, indexNameExpressionResolver, systemIndices);
|
||||
destructiveOperations = new DestructiveOperations(settings, clusterSettings);
|
||||
Set<RestHeaderDefinition> headers = Stream.concat(
|
||||
actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()),
|
||||
|
@ -496,12 +491,8 @@ public class ActionModule extends AbstractModule {
|
|||
actionPlugins.stream().flatMap(p -> p.indicesAliasesRequestValidators().stream()).collect(Collectors.toList())
|
||||
);
|
||||
|
||||
if (transportClient) {
|
||||
restController = null;
|
||||
} else {
|
||||
restController = new RestController(headers, restWrapper, nodeClient, circuitBreakerService, usageService);
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, ActionHandler<?, ?>> getActions() {
|
||||
return actions;
|
||||
|
@ -869,8 +860,7 @@ public class ActionModule extends AbstractModule {
|
|||
bind(new TypeLiteral<RequestValidators<IndicesAliasesRequest>>() {
|
||||
}).toInstance(indicesAliasesRequestRequestValidators);
|
||||
|
||||
if (false == transportClient) {
|
||||
// Supporting classes only used when not a transport client
|
||||
// Supporting classes
|
||||
bind(AutoCreateIndex.class).toInstance(autoCreateIndex);
|
||||
bind(TransportLivenessAction.class).asEagerSingleton();
|
||||
|
||||
|
@ -890,7 +880,6 @@ public class ActionModule extends AbstractModule {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public ActionFilters getActionFilters() {
|
||||
return actionFilters;
|
||||
|
|
|
@ -62,7 +62,7 @@ import java.util.Map;
|
|||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
/**
|
||||
* Transport client that collects snapshot shard statuses from data nodes
|
||||
* Transport action that collects snapshot shard statuses from data nodes
|
||||
*/
|
||||
public class TransportNodesSnapshotsStatus extends TransportNodesAction<
|
||||
TransportNodesSnapshotsStatus.Request,
|
||||
|
|
|
@ -36,53 +36,13 @@ import org.apache.logging.log4j.Logger;
|
|||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.opensearch.action.ActionListener;
|
||||
import org.opensearch.action.ActionRunnable;
|
||||
import org.opensearch.client.Client;
|
||||
import org.opensearch.client.transport.TransportClient;
|
||||
import org.opensearch.common.settings.Settings;
|
||||
import org.opensearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.opensearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
/**
|
||||
* An action listener that wraps another action listener and threading its execution.
|
||||
*/
|
||||
public final class ThreadedActionListener<Response> implements ActionListener<Response> {
|
||||
|
||||
/**
|
||||
* Wrapper that can be used to automatically wrap a listener in a threaded listener if needed.
|
||||
*/
|
||||
public static class Wrapper {
|
||||
|
||||
private final Logger logger;
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private final boolean threadedListener;
|
||||
|
||||
public Wrapper(Logger logger, Settings settings, ThreadPool threadPool) {
|
||||
this.logger = logger;
|
||||
this.threadPool = threadPool;
|
||||
// Should the action listener be threaded or not by default. Action listeners are automatically threaded for
|
||||
// the transport client in order to make sure client side code is not executed on IO threads.
|
||||
this.threadedListener = TransportClient.CLIENT_TYPE.equals(Client.CLIENT_TYPE_SETTING_S.get(settings));
|
||||
}
|
||||
|
||||
public <Response> ActionListener<Response> wrap(ActionListener<Response> listener) {
|
||||
if (threadedListener == false) {
|
||||
return listener;
|
||||
}
|
||||
// if its a future, the callback is very lightweight (flipping a bit) so no need to wrap it
|
||||
if (listener instanceof Future) {
|
||||
return listener;
|
||||
}
|
||||
// already threaded...
|
||||
if (listener instanceof ThreadedActionListener) {
|
||||
return listener;
|
||||
}
|
||||
return new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener, false);
|
||||
}
|
||||
}
|
||||
|
||||
private final Logger logger;
|
||||
private final ThreadPool threadPool;
|
||||
private final String executor;
|
||||
|
|
|
@ -51,8 +51,7 @@ public abstract class BaseNodesRequest<Request extends BaseNodesRequest<Request>
|
|||
*
|
||||
* See {@link DiscoveryNodes#resolveNodes} for a full description of the options.
|
||||
*
|
||||
* TODO: once we stop using the transport client as a gateway to the cluster, we can get rid of this and resolve it to concrete nodes
|
||||
* in the rest layer
|
||||
* TODO: get rid of this and resolve it to concrete nodes in the rest layer
|
||||
**/
|
||||
private String[] nodesIds;
|
||||
|
||||
|
|
|
@ -90,11 +90,9 @@ import java.util.Map;
|
|||
* simply returns an {@link org.opensearch.action.ActionFuture}, while the second accepts an
|
||||
* {@link ActionListener}.
|
||||
* <p>
|
||||
* A client can either be retrieved from a {@link org.opensearch.node.Node} started, or connected remotely
|
||||
* to one or more nodes using {@link org.opensearch.client.transport.TransportClient}.
|
||||
* A client can be retrieved from a started {@link org.opensearch.node.Node}.
|
||||
*
|
||||
* @see org.opensearch.node.Node#client()
|
||||
* @see org.opensearch.client.transport.TransportClient
|
||||
*/
|
||||
public interface Client extends OpenSearchClient, Releasable {
|
||||
|
||||
|
|
|
@ -347,7 +347,6 @@ import org.opensearch.action.search.SearchScrollAction;
|
|||
import org.opensearch.action.search.SearchScrollRequest;
|
||||
import org.opensearch.action.search.SearchScrollRequestBuilder;
|
||||
import org.opensearch.action.support.PlainActionFuture;
|
||||
import org.opensearch.action.support.ThreadedActionListener;
|
||||
import org.opensearch.action.support.master.AcknowledgedResponse;
|
||||
import org.opensearch.action.termvectors.MultiTermVectorsAction;
|
||||
import org.opensearch.action.termvectors.MultiTermVectorsRequest;
|
||||
|
@ -385,14 +384,12 @@ public abstract class AbstractClient implements Client {
|
|||
protected final Settings settings;
|
||||
private final ThreadPool threadPool;
|
||||
private final Admin admin;
|
||||
private final ThreadedActionListener.Wrapper threadedWrapper;
|
||||
|
||||
public AbstractClient(Settings settings, ThreadPool threadPool) {
|
||||
this.settings = settings;
|
||||
this.threadPool = threadPool;
|
||||
this.admin = new Admin(this);
|
||||
this.logger = LogManager.getLogger(this.getClass());
|
||||
this.threadedWrapper = new ThreadedActionListener.Wrapper(logger, settings, threadPool);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -429,7 +426,6 @@ public abstract class AbstractClient implements Client {
|
|||
Request request,
|
||||
ActionListener<Response> listener
|
||||
) {
|
||||
listener = threadedWrapper.wrap(listener);
|
||||
doExecute(action, request, listener);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,504 +0,0 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* The OpenSearch Contributors require contributions made to
|
||||
* this file be licensed under the Apache-2.0 license or a
|
||||
* compatible open source license.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Modifications Copyright OpenSearch Contributors. See
|
||||
* GitHub history for details.
|
||||
*/
|
||||
|
||||
package org.opensearch.client.transport;
|
||||
|
||||
import org.opensearch.action.ActionListener;
|
||||
import org.opensearch.action.ActionModule;
|
||||
import org.opensearch.action.ActionRequest;
|
||||
import org.opensearch.action.ActionResponse;
|
||||
import org.opensearch.action.ActionType;
|
||||
import org.opensearch.client.support.AbstractClient;
|
||||
import org.opensearch.cluster.ClusterModule;
|
||||
import org.opensearch.cluster.node.DiscoveryNode;
|
||||
import org.opensearch.common.UUIDs;
|
||||
import org.opensearch.common.breaker.CircuitBreaker;
|
||||
import org.opensearch.common.component.LifecycleComponent;
|
||||
import org.opensearch.common.inject.Injector;
|
||||
import org.opensearch.common.inject.Module;
|
||||
import org.opensearch.common.inject.ModulesBuilder;
|
||||
import org.opensearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.opensearch.common.network.NetworkModule;
|
||||
import org.opensearch.common.network.NetworkService;
|
||||
import org.opensearch.common.settings.ClusterSettings;
|
||||
import org.opensearch.common.settings.Setting;
|
||||
import org.opensearch.common.settings.Settings;
|
||||
import org.opensearch.common.settings.SettingsModule;
|
||||
import org.opensearch.common.transport.TransportAddress;
|
||||
import org.opensearch.common.unit.TimeValue;
|
||||
import org.opensearch.common.util.BigArrays;
|
||||
import org.opensearch.common.util.PageCacheRecycler;
|
||||
import org.opensearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.opensearch.core.internal.io.IOUtils;
|
||||
import org.opensearch.indices.IndicesModule;
|
||||
import org.opensearch.indices.SystemIndices;
|
||||
import org.opensearch.indices.breaker.CircuitBreakerService;
|
||||
import org.opensearch.node.InternalSettingsPreparer;
|
||||
import org.opensearch.node.Node;
|
||||
import org.opensearch.plugins.ActionPlugin;
|
||||
import org.opensearch.plugins.NetworkPlugin;
|
||||
import org.opensearch.plugins.Plugin;
|
||||
import org.opensearch.plugins.PluginsService;
|
||||
import org.opensearch.plugins.SearchPlugin;
|
||||
import org.opensearch.search.SearchModule;
|
||||
import org.opensearch.threadpool.ExecutorBuilder;
|
||||
import org.opensearch.threadpool.ThreadPool;
|
||||
import org.opensearch.transport.Transport;
|
||||
import org.opensearch.transport.TransportService;
|
||||
import org.opensearch.transport.TransportSettings;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
import static org.opensearch.common.unit.TimeValue.timeValueSeconds;
|
||||
|
||||
/**
|
||||
* The transport client allows to create a client that is not part of the cluster, but simply connects to one
|
||||
* or more nodes directly by adding their respective addresses using
|
||||
* {@link #addTransportAddress(org.opensearch.common.transport.TransportAddress)}.
|
||||
* <p>
|
||||
* The transport client important modules used is the {@link org.opensearch.common.network.NetworkModule} which is
|
||||
* started in client mode (only connects, no bind).
|
||||
*
|
||||
* @deprecated {@link TransportClient} is deprecated in favour of the High Level REST client and will
|
||||
* be removed in OpenSearch 1.0.0.
|
||||
*/
|
||||
@Deprecated
|
||||
public abstract class TransportClient extends AbstractClient {
|
||||
|
||||
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = Setting.positiveTimeSetting(
|
||||
"client.transport.nodes_sampler_interval",
|
||||
timeValueSeconds(5),
|
||||
Setting.Property.NodeScope
|
||||
);
|
||||
public static final Setting<TimeValue> CLIENT_TRANSPORT_PING_TIMEOUT = Setting.positiveTimeSetting(
|
||||
"client.transport.ping_timeout",
|
||||
timeValueSeconds(5),
|
||||
Setting.Property.NodeScope
|
||||
);
|
||||
public static final Setting<Boolean> CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = Setting.boolSetting(
|
||||
"client.transport.ignore_cluster_name",
|
||||
false,
|
||||
Setting.Property.NodeScope
|
||||
);
|
||||
public static final Setting<Boolean> CLIENT_TRANSPORT_SNIFF = Setting.boolSetting(
|
||||
"client.transport.sniff",
|
||||
false,
|
||||
Setting.Property.NodeScope
|
||||
);
|
||||
|
||||
public static final String TRANSPORT_CLIENT_FEATURE = "transport_client";
|
||||
|
||||
private static PluginsService newPluginService(final Settings settings, Collection<Class<? extends Plugin>> plugins) {
|
||||
final Settings.Builder settingsBuilder = Settings.builder()
|
||||
.put(TransportSettings.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval
|
||||
.put(InternalSettingsPreparer.prepareSettings(settings))
|
||||
.put(NetworkService.NETWORK_SERVER.getKey(), false)
|
||||
.put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE);
|
||||
return new PluginsService(settingsBuilder.build(), null, null, null, plugins);
|
||||
}
|
||||
|
||||
protected static Collection<Class<? extends Plugin>> addPlugins(
|
||||
Collection<Class<? extends Plugin>> collection,
|
||||
Class<? extends Plugin>... plugins
|
||||
) {
|
||||
return addPlugins(collection, Arrays.asList(plugins));
|
||||
}
|
||||
|
||||
protected static Collection<Class<? extends Plugin>> addPlugins(
|
||||
Collection<Class<? extends Plugin>> collection,
|
||||
Collection<Class<? extends Plugin>> plugins
|
||||
) {
|
||||
ArrayList<Class<? extends Plugin>> list = new ArrayList<>(collection);
|
||||
for (Class<? extends Plugin> p : plugins) {
|
||||
if (list.contains(p)) {
|
||||
throw new IllegalArgumentException("plugin already exists: " + p);
|
||||
}
|
||||
list.add(p);
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
private static ClientTemplate buildTemplate(
|
||||
Settings providedSettings,
|
||||
Settings defaultSettings,
|
||||
Collection<Class<? extends Plugin>> plugins,
|
||||
HostFailureListener failureListner
|
||||
) {
|
||||
if (Node.NODE_NAME_SETTING.exists(providedSettings) == false) {
|
||||
providedSettings = Settings.builder().put(providedSettings).put(Node.NODE_NAME_SETTING.getKey(), "_client_").build();
|
||||
}
|
||||
final PluginsService pluginsService = newPluginService(providedSettings, plugins);
|
||||
final List<Closeable> resourcesToClose = new ArrayList<>();
|
||||
final Settings settings = Settings.builder()
|
||||
.put(defaultSettings)
|
||||
.put(pluginsService.updatedSettings())
|
||||
.put(TransportSettings.FEATURE_PREFIX + "." + TRANSPORT_CLIENT_FEATURE, true)
|
||||
.build();
|
||||
final ThreadPool threadPool = new ThreadPool(settings);
|
||||
resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS));
|
||||
final NetworkService networkService = new NetworkService(emptyList());
|
||||
try {
|
||||
final List<Setting<?>> additionalSettings = new ArrayList<>(pluginsService.getPluginSettings());
|
||||
final List<String> additionalSettingsFilter = new ArrayList<>(pluginsService.getPluginSettingsFilter());
|
||||
for (final ExecutorBuilder<?> builder : threadPool.builders()) {
|
||||
additionalSettings.addAll(builder.getRegisteredSettings());
|
||||
}
|
||||
SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter, emptySet());
|
||||
|
||||
SearchModule searchModule = new SearchModule(settings, true, pluginsService.filterPlugins(SearchPlugin.class));
|
||||
IndicesModule indicesModule = new IndicesModule(emptyList());
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(NetworkModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(ClusterModule.getNamedWriteables());
|
||||
entries.addAll(
|
||||
pluginsService.filterPlugins(Plugin.class)
|
||||
.stream()
|
||||
.flatMap(p -> p.getNamedWriteables().stream())
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries);
|
||||
NamedXContentRegistry xContentRegistry = new NamedXContentRegistry(
|
||||
Stream.of(
|
||||
searchModule.getNamedXContents().stream(),
|
||||
pluginsService.filterPlugins(Plugin.class).stream().flatMap(p -> p.getNamedXContent().stream())
|
||||
).flatMap(Function.identity()).collect(toList())
|
||||
);
|
||||
|
||||
ModulesBuilder modules = new ModulesBuilder();
|
||||
// plugin modules must be added here, before others or we can get crazy injection errors...
|
||||
for (Module pluginModule : pluginsService.createGuiceModules()) {
|
||||
modules.add(pluginModule);
|
||||
}
|
||||
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
|
||||
ActionModule actionModule = new ActionModule(
|
||||
true,
|
||||
settings,
|
||||
null,
|
||||
settingsModule.getIndexScopedSettings(),
|
||||
settingsModule.getClusterSettings(),
|
||||
settingsModule.getSettingsFilter(),
|
||||
threadPool,
|
||||
pluginsService.filterPlugins(ActionPlugin.class),
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
new SystemIndices(emptyMap())
|
||||
);
|
||||
modules.add(actionModule);
|
||||
|
||||
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(
|
||||
settingsModule.getSettings(),
|
||||
emptyList(),
|
||||
settingsModule.getClusterSettings()
|
||||
);
|
||||
resourcesToClose.add(circuitBreakerService);
|
||||
PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(settings);
|
||||
BigArrays bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService, CircuitBreaker.REQUEST);
|
||||
modules.add(settingsModule);
|
||||
NetworkModule networkModule = new NetworkModule(
|
||||
settings,
|
||||
true,
|
||||
pluginsService.filterPlugins(NetworkPlugin.class),
|
||||
threadPool,
|
||||
bigArrays,
|
||||
pageCacheRecycler,
|
||||
circuitBreakerService,
|
||||
namedWriteableRegistry,
|
||||
xContentRegistry,
|
||||
networkService,
|
||||
null,
|
||||
new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
|
||||
);
|
||||
final Transport transport = networkModule.getTransportSupplier().get();
|
||||
final TransportService transportService = new TransportService(
|
||||
settings,
|
||||
transport,
|
||||
threadPool,
|
||||
networkModule.getTransportInterceptor(),
|
||||
boundTransportAddress -> DiscoveryNode.createLocal(
|
||||
settings,
|
||||
new TransportAddress(TransportAddress.META_ADDRESS, 0),
|
||||
UUIDs.randomBase64UUID()
|
||||
),
|
||||
null,
|
||||
emptySet()
|
||||
);
|
||||
modules.add((b -> {
|
||||
b.bind(BigArrays.class).toInstance(bigArrays);
|
||||
b.bind(PageCacheRecycler.class).toInstance(pageCacheRecycler);
|
||||
b.bind(PluginsService.class).toInstance(pluginsService);
|
||||
b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService);
|
||||
b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
|
||||
b.bind(Transport.class).toInstance(transport);
|
||||
b.bind(TransportService.class).toInstance(transportService);
|
||||
b.bind(NetworkService.class).toInstance(networkService);
|
||||
}));
|
||||
|
||||
Injector injector = modules.createInjector();
|
||||
final TransportClientNodesService nodesService = new TransportClientNodesService(
|
||||
settings,
|
||||
transportService,
|
||||
threadPool,
|
||||
failureListner == null ? (t, e) -> {} : failureListner
|
||||
);
|
||||
|
||||
// construct the list of client actions
|
||||
final List<ActionPlugin> actionPlugins = pluginsService.filterPlugins(ActionPlugin.class);
|
||||
final List<ActionType> clientActions = actionPlugins.stream()
|
||||
.flatMap(p -> p.getClientActions().stream())
|
||||
.collect(Collectors.toList());
|
||||
// add all the base actions
|
||||
final List<? extends ActionType<?>> baseActions = actionModule.getActions()
|
||||
.values()
|
||||
.stream()
|
||||
.map(ActionPlugin.ActionHandler::getAction)
|
||||
.collect(Collectors.toList());
|
||||
clientActions.addAll(baseActions);
|
||||
final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService, clientActions);
|
||||
|
||||
List<LifecycleComponent> pluginLifecycleComponents = new ArrayList<>(
|
||||
pluginsService.getGuiceServiceClasses().stream().map(injector::getInstance).collect(Collectors.toList())
|
||||
);
|
||||
resourcesToClose.addAll(pluginLifecycleComponents);
|
||||
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
|
||||
ClientTemplate transportClient = new ClientTemplate(
|
||||
injector,
|
||||
pluginLifecycleComponents,
|
||||
nodesService,
|
||||
proxy,
|
||||
namedWriteableRegistry
|
||||
);
|
||||
resourcesToClose.clear();
|
||||
return transportClient;
|
||||
} finally {
|
||||
IOUtils.closeWhileHandlingException(resourcesToClose);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class ClientTemplate {
|
||||
final Injector injector;
|
||||
private final List<LifecycleComponent> pluginLifecycleComponents;
|
||||
private final TransportClientNodesService nodesService;
|
||||
private final TransportProxyClient proxy;
|
||||
private final NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
private ClientTemplate(
|
||||
Injector injector,
|
||||
List<LifecycleComponent> pluginLifecycleComponents,
|
||||
TransportClientNodesService nodesService,
|
||||
TransportProxyClient proxy,
|
||||
NamedWriteableRegistry namedWriteableRegistry
|
||||
) {
|
||||
this.injector = injector;
|
||||
this.pluginLifecycleComponents = pluginLifecycleComponents;
|
||||
this.nodesService = nodesService;
|
||||
this.proxy = proxy;
|
||||
this.namedWriteableRegistry = namedWriteableRegistry;
|
||||
}
|
||||
|
||||
Settings getSettings() {
|
||||
return injector.getInstance(Settings.class);
|
||||
}
|
||||
|
||||
ThreadPool getThreadPool() {
|
||||
return injector.getInstance(ThreadPool.class);
|
||||
}
|
||||
}
|
||||
|
||||
public static final String CLIENT_TYPE = "transport";
|
||||
|
||||
final Injector injector;
|
||||
protected final NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
private final List<LifecycleComponent> pluginLifecycleComponents;
|
||||
private final TransportClientNodesService nodesService;
|
||||
private final TransportProxyClient proxy;
|
||||
|
||||
/**
|
||||
* Creates a new TransportClient with the given settings and plugins
|
||||
*/
|
||||
public TransportClient(Settings settings, Collection<Class<? extends Plugin>> plugins) {
|
||||
this(buildTemplate(settings, Settings.EMPTY, plugins, null));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new TransportClient with the given settings, defaults and plugins.
|
||||
* @param settings the client settings
|
||||
* @param defaultSettings default settings that are merged after the plugins have added it's additional settings.
|
||||
* @param plugins the client plugins
|
||||
*/
|
||||
protected TransportClient(
|
||||
Settings settings,
|
||||
Settings defaultSettings,
|
||||
Collection<Class<? extends Plugin>> plugins,
|
||||
HostFailureListener hostFailureListener
|
||||
) {
|
||||
this(buildTemplate(settings, defaultSettings, plugins, hostFailureListener));
|
||||
}
|
||||
|
||||
private TransportClient(ClientTemplate template) {
|
||||
super(template.getSettings(), template.getThreadPool());
|
||||
this.injector = template.injector;
|
||||
this.pluginLifecycleComponents = unmodifiableList(template.pluginLifecycleComponents);
|
||||
this.nodesService = template.nodesService;
|
||||
this.proxy = template.proxy;
|
||||
this.namedWriteableRegistry = template.namedWriteableRegistry;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current registered transport addresses to use (added using
|
||||
* {@link #addTransportAddress(org.opensearch.common.transport.TransportAddress)}.
|
||||
*/
|
||||
public List<TransportAddress> transportAddresses() {
|
||||
return nodesService.transportAddresses();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current connected transport nodes that this client will use.
|
||||
* <p>
|
||||
* The nodes include all the nodes that are currently alive based on the transport
|
||||
* addresses provided.
|
||||
*/
|
||||
public List<DiscoveryNode> connectedNodes() {
|
||||
return nodesService.connectedNodes();
|
||||
}
|
||||
|
||||
/**
|
||||
* The list of filtered nodes that were not connected to, for example, due to
|
||||
* mismatch in cluster name.
|
||||
*/
|
||||
public List<DiscoveryNode> filteredNodes() {
|
||||
return nodesService.filteredNodes();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the listed nodes in the transport client (ones added to it).
|
||||
*/
|
||||
public List<DiscoveryNode> listedNodes() {
|
||||
return nodesService.listedNodes();
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a transport address that will be used to connect to.
|
||||
* <p>
|
||||
* The Node this transport address represents will be used if its possible to connect to it.
|
||||
* If it is unavailable, it will be automatically connected to once it is up.
|
||||
* <p>
|
||||
* In order to get the list of all the current connected nodes, please see {@link #connectedNodes()}.
|
||||
*/
|
||||
public TransportClient addTransportAddress(TransportAddress transportAddress) {
|
||||
nodesService.addTransportAddresses(transportAddress);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a list of transport addresses that will be used to connect to.
|
||||
* <p>
|
||||
* The Node this transport address represents will be used if its possible to connect to it.
|
||||
* If it is unavailable, it will be automatically connected to once it is up.
|
||||
* <p>
|
||||
* In order to get the list of all the current connected nodes, please see {@link #connectedNodes()}.
|
||||
*/
|
||||
public TransportClient addTransportAddresses(TransportAddress... transportAddress) {
|
||||
nodesService.addTransportAddresses(transportAddress);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes a transport address from the list of transport addresses that are used to connect to.
|
||||
*/
|
||||
public TransportClient removeTransportAddress(TransportAddress transportAddress) {
|
||||
nodesService.removeTransportAddress(transportAddress);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes the client.
|
||||
*/
|
||||
@Override
|
||||
public void close() {
|
||||
List<Closeable> closeables = new ArrayList<>();
|
||||
closeables.add(nodesService);
|
||||
closeables.add(injector.getInstance(TransportService.class));
|
||||
|
||||
for (LifecycleComponent plugin : pluginLifecycleComponents) {
|
||||
closeables.add(plugin);
|
||||
}
|
||||
closeables.add(() -> ThreadPool.terminate(injector.getInstance(ThreadPool.class), 10, TimeUnit.SECONDS));
|
||||
IOUtils.closeWhileHandlingException(closeables);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected <Request extends ActionRequest, Response extends ActionResponse> void doExecute(
|
||||
ActionType<Response> action,
|
||||
Request request,
|
||||
ActionListener<Response> listener
|
||||
) {
|
||||
proxy.execute(action, request, listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Listener that allows to be notified whenever a node failure / disconnect happens
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface HostFailureListener {
|
||||
/**
|
||||
* Called once a node disconnect is detected.
|
||||
* @param node the node that has been disconnected
|
||||
* @param ex the exception causing the disconnection
|
||||
*/
|
||||
void onNodeDisconnected(DiscoveryNode node, Exception ex);
|
||||
}
|
||||
|
||||
// pkg private for testing
|
||||
TransportClientNodesService getNodesService() {
|
||||
return nodesService;
|
||||
}
|
||||
}
|
|
@ -1,640 +0,0 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* The OpenSearch Contributors require contributions made to
|
||||
* this file be licensed under the Apache-2.0 license or a
|
||||
* compatible open source license.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Modifications Copyright OpenSearch Contributors. See
|
||||
* GitHub history for details.
|
||||
*/
|
||||
|
||||
package org.opensearch.client.transport;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.opensearch.common.io.stream.StreamInput;
|
||||
import org.opensearch.core.internal.io.IOUtils;
|
||||
import org.opensearch.ExceptionsHelper;
|
||||
import org.opensearch.Version;
|
||||
import org.opensearch.action.ActionListener;
|
||||
import org.opensearch.action.admin.cluster.node.liveness.LivenessRequest;
|
||||
import org.opensearch.action.admin.cluster.node.liveness.LivenessResponse;
|
||||
import org.opensearch.action.admin.cluster.node.liveness.TransportLivenessAction;
|
||||
import org.opensearch.action.admin.cluster.state.ClusterStateAction;
|
||||
import org.opensearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.opensearch.client.Requests;
|
||||
import org.opensearch.cluster.ClusterName;
|
||||
import org.opensearch.cluster.node.DiscoveryNode;
|
||||
import org.opensearch.common.Randomness;
|
||||
import org.opensearch.common.settings.Settings;
|
||||
import org.opensearch.common.transport.TransportAddress;
|
||||
import org.opensearch.common.unit.TimeValue;
|
||||
import org.opensearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.opensearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.opensearch.threadpool.Scheduler;
|
||||
import org.opensearch.threadpool.ThreadPool;
|
||||
import org.opensearch.transport.ConnectTransportException;
|
||||
import org.opensearch.transport.ConnectionProfile;
|
||||
import org.opensearch.transport.FutureTransportResponseHandler;
|
||||
import org.opensearch.transport.NodeDisconnectedException;
|
||||
import org.opensearch.transport.NodeNotConnectedException;
|
||||
import org.opensearch.transport.PlainTransportFuture;
|
||||
import org.opensearch.transport.Transport;
|
||||
import org.opensearch.transport.TransportException;
|
||||
import org.opensearch.transport.TransportRequestOptions;
|
||||
import org.opensearch.transport.TransportResponseHandler;
|
||||
import org.opensearch.transport.TransportService;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
final class TransportClientNodesService implements Closeable {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(TransportClientNodesService.class);
|
||||
|
||||
private final TimeValue nodesSamplerInterval;
|
||||
|
||||
private final long pingTimeout;
|
||||
|
||||
private final ClusterName clusterName;
|
||||
|
||||
private final TransportService transportService;
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private final Version minCompatibilityVersion;
|
||||
|
||||
// nodes that are added to be discovered
|
||||
private volatile List<DiscoveryNode> listedNodes = Collections.emptyList();
|
||||
|
||||
private final Object mutex = new Object();
|
||||
|
||||
private volatile List<DiscoveryNode> nodes = Collections.emptyList();
|
||||
// Filtered nodes are nodes whose cluster name does not match the configured cluster name
|
||||
private volatile List<DiscoveryNode> filteredNodes = Collections.emptyList();
|
||||
|
||||
private final AtomicInteger tempNodeIdGenerator = new AtomicInteger();
|
||||
|
||||
private final NodeSampler nodesSampler;
|
||||
|
||||
private volatile Scheduler.Cancellable nodesSamplerCancellable;
|
||||
|
||||
private final AtomicInteger randomNodeGenerator = new AtomicInteger(Randomness.get().nextInt());
|
||||
|
||||
private final boolean ignoreClusterName;
|
||||
|
||||
private volatile boolean closed;
|
||||
|
||||
private final TransportClient.HostFailureListener hostFailureListener;
|
||||
|
||||
// TODO: migrate this to use low level connections and single type channels
|
||||
/** {@link ConnectionProfile} to use when to connecting to the listed nodes and doing a liveness check */
|
||||
private static final ConnectionProfile LISTED_NODES_PROFILE;
|
||||
|
||||
static {
|
||||
ConnectionProfile.Builder builder = new ConnectionProfile.Builder();
|
||||
builder.addConnections(
|
||||
1,
|
||||
TransportRequestOptions.Type.BULK,
|
||||
TransportRequestOptions.Type.PING,
|
||||
TransportRequestOptions.Type.RECOVERY,
|
||||
TransportRequestOptions.Type.REG,
|
||||
TransportRequestOptions.Type.STATE
|
||||
);
|
||||
LISTED_NODES_PROFILE = builder.build();
|
||||
}
|
||||
|
||||
TransportClientNodesService(
|
||||
Settings settings,
|
||||
TransportService transportService,
|
||||
ThreadPool threadPool,
|
||||
TransportClient.HostFailureListener hostFailureListener
|
||||
) {
|
||||
this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
|
||||
this.transportService = transportService;
|
||||
this.threadPool = threadPool;
|
||||
this.minCompatibilityVersion = Version.CURRENT.minimumCompatibilityVersion();
|
||||
|
||||
this.nodesSamplerInterval = TransportClient.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.get(settings);
|
||||
this.pingTimeout = TransportClient.CLIENT_TRANSPORT_PING_TIMEOUT.get(settings).millis();
|
||||
this.ignoreClusterName = TransportClient.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(settings);
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("node_sampler_interval[{}]", nodesSamplerInterval);
|
||||
}
|
||||
|
||||
if (TransportClient.CLIENT_TRANSPORT_SNIFF.get(settings)) {
|
||||
this.nodesSampler = new SniffNodesSampler();
|
||||
} else {
|
||||
this.nodesSampler = new SimpleNodeSampler();
|
||||
}
|
||||
this.hostFailureListener = hostFailureListener;
|
||||
this.nodesSamplerCancellable = threadPool.schedule(new ScheduledNodeSampler(), nodesSamplerInterval, ThreadPool.Names.GENERIC);
|
||||
}
|
||||
|
||||
public List<TransportAddress> transportAddresses() {
|
||||
List<TransportAddress> lstBuilder = new ArrayList<>();
|
||||
for (DiscoveryNode listedNode : listedNodes) {
|
||||
lstBuilder.add(listedNode.getAddress());
|
||||
}
|
||||
return Collections.unmodifiableList(lstBuilder);
|
||||
}
|
||||
|
||||
public List<DiscoveryNode> connectedNodes() {
|
||||
return this.nodes;
|
||||
}
|
||||
|
||||
public List<DiscoveryNode> filteredNodes() {
|
||||
return this.filteredNodes;
|
||||
}
|
||||
|
||||
public List<DiscoveryNode> listedNodes() {
|
||||
return this.listedNodes;
|
||||
}
|
||||
|
||||
public TransportClientNodesService addTransportAddresses(TransportAddress... transportAddresses) {
|
||||
synchronized (mutex) {
|
||||
if (closed) {
|
||||
throw new IllegalStateException("transport client is closed, can't add an address");
|
||||
}
|
||||
List<TransportAddress> filtered = new ArrayList<>(transportAddresses.length);
|
||||
for (TransportAddress transportAddress : transportAddresses) {
|
||||
boolean found = false;
|
||||
for (DiscoveryNode otherNode : listedNodes) {
|
||||
if (otherNode.getAddress().equals(transportAddress)) {
|
||||
found = true;
|
||||
logger.debug("address [{}] already exists with [{}], ignoring...", transportAddress, otherNode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
filtered.add(transportAddress);
|
||||
}
|
||||
}
|
||||
if (filtered.isEmpty()) {
|
||||
return this;
|
||||
}
|
||||
List<DiscoveryNode> builder = new ArrayList<>(listedNodes);
|
||||
for (TransportAddress transportAddress : filtered) {
|
||||
DiscoveryNode node = new DiscoveryNode(
|
||||
"#transport#-" + tempNodeIdGenerator.incrementAndGet(),
|
||||
transportAddress,
|
||||
Collections.emptyMap(),
|
||||
Collections.emptySet(),
|
||||
minCompatibilityVersion
|
||||
);
|
||||
logger.debug("adding address [{}]", node);
|
||||
builder.add(node);
|
||||
}
|
||||
listedNodes = Collections.unmodifiableList(builder);
|
||||
nodesSampler.sample();
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public TransportClientNodesService removeTransportAddress(TransportAddress transportAddress) {
|
||||
synchronized (mutex) {
|
||||
if (closed) {
|
||||
throw new IllegalStateException("transport client is closed, can't remove an address");
|
||||
}
|
||||
List<DiscoveryNode> listNodesBuilder = new ArrayList<>();
|
||||
for (DiscoveryNode otherNode : listedNodes) {
|
||||
if (!otherNode.getAddress().equals(transportAddress)) {
|
||||
listNodesBuilder.add(otherNode);
|
||||
} else {
|
||||
logger.debug("removing address [{}] from listed nodes", otherNode);
|
||||
}
|
||||
}
|
||||
listedNodes = Collections.unmodifiableList(listNodesBuilder);
|
||||
List<DiscoveryNode> nodesBuilder = new ArrayList<>();
|
||||
for (DiscoveryNode otherNode : nodes) {
|
||||
if (!otherNode.getAddress().equals(transportAddress)) {
|
||||
nodesBuilder.add(otherNode);
|
||||
} else {
|
||||
logger.debug("disconnecting from node with address [{}]", otherNode);
|
||||
transportService.disconnectFromNode(otherNode);
|
||||
}
|
||||
}
|
||||
nodes = Collections.unmodifiableList(nodesBuilder);
|
||||
nodesSampler.sample();
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public <Response> void execute(NodeListenerCallback<Response> callback, ActionListener<Response> listener) {
|
||||
// we first read nodes before checking the closed state; this
|
||||
// is because otherwise we could be subject to a race where we
|
||||
// read the state as not being closed, and then the client is
|
||||
// closed and the nodes list is cleared, and then a
|
||||
// NoNodeAvailableException is thrown
|
||||
// it is important that the order of first setting the state of
|
||||
// closed and then clearing the list of nodes is maintained in
|
||||
// the close method
|
||||
final List<DiscoveryNode> nodes = this.nodes;
|
||||
if (closed) {
|
||||
throw new IllegalStateException("transport client is closed");
|
||||
}
|
||||
ensureNodesAreAvailable(nodes);
|
||||
int index = getNodeNumber();
|
||||
RetryListener<Response> retryListener = new RetryListener<>(callback, listener, nodes, index, hostFailureListener);
|
||||
DiscoveryNode node = retryListener.getNode(0);
|
||||
try {
|
||||
callback.doWithNode(node, retryListener);
|
||||
} catch (Exception e) {
|
||||
try {
|
||||
// this exception can't come from the TransportService as it doesn't throw exception at all
|
||||
listener.onFailure(e);
|
||||
} finally {
|
||||
retryListener.maybeNodeFailed(node, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class RetryListener<Response> implements ActionListener<Response> {
|
||||
private final NodeListenerCallback<Response> callback;
|
||||
private final ActionListener<Response> listener;
|
||||
private final List<DiscoveryNode> nodes;
|
||||
private final int index;
|
||||
private final TransportClient.HostFailureListener hostFailureListener;
|
||||
|
||||
private volatile int i;
|
||||
|
||||
RetryListener(
|
||||
NodeListenerCallback<Response> callback,
|
||||
ActionListener<Response> listener,
|
||||
List<DiscoveryNode> nodes,
|
||||
int index,
|
||||
TransportClient.HostFailureListener hostFailureListener
|
||||
) {
|
||||
this.callback = callback;
|
||||
this.listener = listener;
|
||||
this.nodes = nodes;
|
||||
this.index = index;
|
||||
this.hostFailureListener = hostFailureListener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponse(Response response) {
|
||||
listener.onResponse(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
Throwable throwable = ExceptionsHelper.unwrapCause(e);
|
||||
if (throwable instanceof ConnectTransportException) {
|
||||
maybeNodeFailed(getNode(this.i), (ConnectTransportException) throwable);
|
||||
int i = ++this.i;
|
||||
if (i >= nodes.size()) {
|
||||
listener.onFailure(new NoNodeAvailableException("None of the configured nodes were available: " + nodes, e));
|
||||
} else {
|
||||
try {
|
||||
callback.doWithNode(getNode(i), this);
|
||||
} catch (final Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
// this exception can't come from the TransportService as it doesn't throw exceptions at all
|
||||
listener.onFailure(inner);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
final DiscoveryNode getNode(int i) {
|
||||
return nodes.get((index + i) % nodes.size());
|
||||
}
|
||||
|
||||
final void maybeNodeFailed(DiscoveryNode node, Exception ex) {
|
||||
if (ex instanceof NodeDisconnectedException || ex instanceof NodeNotConnectedException) {
|
||||
hostFailureListener.onNodeDisconnected(node, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
synchronized (mutex) {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
closed = true;
|
||||
if (nodesSamplerCancellable != null) {
|
||||
nodesSamplerCancellable.cancel();
|
||||
}
|
||||
for (DiscoveryNode node : nodes) {
|
||||
transportService.disconnectFromNode(node);
|
||||
}
|
||||
for (DiscoveryNode listedNode : listedNodes) {
|
||||
transportService.disconnectFromNode(listedNode);
|
||||
}
|
||||
nodes = Collections.emptyList();
|
||||
}
|
||||
}
|
||||
|
||||
private int getNodeNumber() {
|
||||
int index = randomNodeGenerator.incrementAndGet();
|
||||
if (index < 0) {
|
||||
index = 0;
|
||||
randomNodeGenerator.set(0);
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
||||
private void ensureNodesAreAvailable(List<DiscoveryNode> nodes) {
|
||||
if (nodes.isEmpty()) {
|
||||
String message = String.format(Locale.ROOT, "None of the configured nodes are available: %s", this.listedNodes);
|
||||
throw new NoNodeAvailableException(message);
|
||||
}
|
||||
}
|
||||
|
||||
abstract class NodeSampler {
|
||||
public void sample() {
|
||||
synchronized (mutex) {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
doSample();
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void doSample();
|
||||
|
||||
/**
|
||||
* Establishes the node connections. If validateInHandshake is set to true, the connection will fail if
|
||||
* node returned in the handshake response is different than the discovery node.
|
||||
*/
|
||||
List<DiscoveryNode> establishNodeConnections(Set<DiscoveryNode> nodes) {
|
||||
for (Iterator<DiscoveryNode> it = nodes.iterator(); it.hasNext();) {
|
||||
DiscoveryNode node = it.next();
|
||||
if (!transportService.nodeConnected(node)) {
|
||||
try {
|
||||
logger.trace("connecting to node [{}]", node);
|
||||
transportService.connectToNode(node);
|
||||
} catch (Exception e) {
|
||||
it.remove();
|
||||
logger.debug(() -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Collections.unmodifiableList(new ArrayList<>(nodes));
|
||||
}
|
||||
}
|
||||
|
||||
class ScheduledNodeSampler implements Runnable {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
nodesSampler.sample();
|
||||
if (!closed) {
|
||||
nodesSamplerCancellable = threadPool.schedule(this, nodesSamplerInterval, ThreadPool.Names.GENERIC);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to sample", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class SimpleNodeSampler extends NodeSampler {
|
||||
|
||||
@Override
|
||||
protected void doSample() {
|
||||
HashSet<DiscoveryNode> newNodes = new HashSet<>();
|
||||
ArrayList<DiscoveryNode> newFilteredNodes = new ArrayList<>();
|
||||
for (DiscoveryNode listedNode : listedNodes) {
|
||||
try (Transport.Connection connection = transportService.openConnection(listedNode, LISTED_NODES_PROFILE)) {
|
||||
final PlainTransportFuture<LivenessResponse> handler = new PlainTransportFuture<>(
|
||||
new FutureTransportResponseHandler<LivenessResponse>() {
|
||||
@Override
|
||||
public LivenessResponse read(StreamInput in) throws IOException {
|
||||
return new LivenessResponse(in);
|
||||
}
|
||||
}
|
||||
);
|
||||
transportService.sendRequest(
|
||||
connection,
|
||||
TransportLivenessAction.NAME,
|
||||
new LivenessRequest(),
|
||||
TransportRequestOptions.builder().withType(TransportRequestOptions.Type.STATE).withTimeout(pingTimeout).build(),
|
||||
handler
|
||||
);
|
||||
final LivenessResponse livenessResponse = handler.txGet();
|
||||
if (!ignoreClusterName && !clusterName.equals(livenessResponse.getClusterName())) {
|
||||
logger.warn("node {} not part of the cluster {}, ignoring...", listedNode, clusterName);
|
||||
newFilteredNodes.add(listedNode);
|
||||
} else {
|
||||
// use discovered information but do keep the original transport address,
|
||||
// so people can control which address is exactly used.
|
||||
DiscoveryNode nodeWithInfo = livenessResponse.getDiscoveryNode();
|
||||
newNodes.add(
|
||||
new DiscoveryNode(
|
||||
nodeWithInfo.getName(),
|
||||
nodeWithInfo.getId(),
|
||||
nodeWithInfo.getEphemeralId(),
|
||||
nodeWithInfo.getHostName(),
|
||||
nodeWithInfo.getHostAddress(),
|
||||
listedNode.getAddress(),
|
||||
nodeWithInfo.getAttributes(),
|
||||
nodeWithInfo.getRoles(),
|
||||
nodeWithInfo.getVersion()
|
||||
)
|
||||
);
|
||||
}
|
||||
} catch (ConnectTransportException e) {
|
||||
logger.debug(() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e);
|
||||
hostFailureListener.onNodeDisconnected(listedNode, e);
|
||||
} catch (Exception e) {
|
||||
logger.info(() -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e);
|
||||
}
|
||||
}
|
||||
|
||||
nodes = establishNodeConnections(newNodes);
|
||||
filteredNodes = Collections.unmodifiableList(newFilteredNodes);
|
||||
}
|
||||
}
|
||||
|
||||
class SniffNodesSampler extends NodeSampler {
|
||||
|
||||
@Override
|
||||
protected void doSample() {
|
||||
// the nodes we are going to ping include the core listed nodes that were added
|
||||
// and the last round of discovered nodes
|
||||
Set<DiscoveryNode> nodesToPing = new HashSet<>();
|
||||
for (DiscoveryNode node : listedNodes) {
|
||||
nodesToPing.add(node);
|
||||
}
|
||||
for (DiscoveryNode node : nodes) {
|
||||
nodesToPing.add(node);
|
||||
}
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(nodesToPing.size());
|
||||
final ConcurrentMap<DiscoveryNode, ClusterStateResponse> clusterStateResponses = ConcurrentCollections.newConcurrentMap();
|
||||
try {
|
||||
for (final DiscoveryNode nodeToPing : nodesToPing) {
|
||||
threadPool.executor(ThreadPool.Names.MANAGEMENT).execute(new AbstractRunnable() {
|
||||
|
||||
/**
|
||||
* we try to reuse existing connections but if needed we will open a temporary connection
|
||||
* that will be closed at the end of the execution.
|
||||
*/
|
||||
Transport.Connection connectionToClose = null;
|
||||
|
||||
void onDone() {
|
||||
try {
|
||||
IOUtils.closeWhileHandlingException(connectionToClose);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
onDone();
|
||||
if (e instanceof ConnectTransportException) {
|
||||
logger.debug(() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", nodeToPing), e);
|
||||
hostFailureListener.onNodeDisconnected(nodeToPing, e);
|
||||
} else {
|
||||
logger.info(
|
||||
() -> new ParameterizedMessage(
|
||||
"failed to get local cluster state info for {}, disconnecting...",
|
||||
nodeToPing
|
||||
),
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
Transport.Connection pingConnection = null;
|
||||
if (nodes.contains(nodeToPing)) {
|
||||
try {
|
||||
pingConnection = transportService.getConnection(nodeToPing);
|
||||
} catch (NodeNotConnectedException e) {
|
||||
// will use a temp connection
|
||||
}
|
||||
}
|
||||
if (pingConnection == null) {
|
||||
logger.trace("connecting to cluster node [{}]", nodeToPing);
|
||||
connectionToClose = transportService.openConnection(nodeToPing, LISTED_NODES_PROFILE);
|
||||
pingConnection = connectionToClose;
|
||||
}
|
||||
transportService.sendRequest(
|
||||
pingConnection,
|
||||
ClusterStateAction.NAME,
|
||||
Requests.clusterStateRequest().clear().nodes(true).local(true),
|
||||
TransportRequestOptions.builder()
|
||||
.withType(TransportRequestOptions.Type.STATE)
|
||||
.withTimeout(pingTimeout)
|
||||
.build(),
|
||||
new TransportResponseHandler<ClusterStateResponse>() {
|
||||
|
||||
@Override
|
||||
public ClusterStateResponse read(StreamInput in) throws IOException {
|
||||
return new ClusterStateResponse(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleResponse(ClusterStateResponse response) {
|
||||
clusterStateResponses.put(nodeToPing, response);
|
||||
onDone();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException e) {
|
||||
logger.info(
|
||||
() -> new ParameterizedMessage(
|
||||
"failed to get local cluster state for {}, disconnecting...",
|
||||
nodeToPing
|
||||
),
|
||||
e
|
||||
);
|
||||
try {
|
||||
hostFailureListener.onNodeDisconnected(nodeToPing, e);
|
||||
} finally {
|
||||
onDone();
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
latch.await();
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return;
|
||||
}
|
||||
|
||||
HashSet<DiscoveryNode> newNodes = new HashSet<>();
|
||||
HashSet<DiscoveryNode> newFilteredNodes = new HashSet<>();
|
||||
for (Map.Entry<DiscoveryNode, ClusterStateResponse> entry : clusterStateResponses.entrySet()) {
|
||||
if (!ignoreClusterName && !clusterName.equals(entry.getValue().getClusterName())) {
|
||||
logger.warn(
|
||||
"node {} not part of the cluster {}, ignoring...",
|
||||
entry.getValue().getState().nodes().getLocalNode(),
|
||||
clusterName
|
||||
);
|
||||
newFilteredNodes.add(entry.getKey());
|
||||
continue;
|
||||
}
|
||||
for (ObjectCursor<DiscoveryNode> cursor : entry.getValue().getState().nodes().getDataNodes().values()) {
|
||||
newNodes.add(cursor.value);
|
||||
}
|
||||
}
|
||||
|
||||
nodes = establishNodeConnections(newNodes);
|
||||
filteredNodes = Collections.unmodifiableList(new ArrayList<>(newFilteredNodes));
|
||||
}
|
||||
}
|
||||
|
||||
public interface NodeListenerCallback<Response> {
|
||||
|
||||
void doWithNode(DiscoveryNode node, ActionListener<Response> listener);
|
||||
}
|
||||
|
||||
// pkg private for testing
|
||||
void doSample() {
|
||||
nodesSampler.doSample();
|
||||
}
|
||||
}
|
|
@ -1,81 +0,0 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* The OpenSearch Contributors require contributions made to
|
||||
* this file be licensed under the Apache-2.0 license or a
|
||||
* compatible open source license.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Modifications Copyright OpenSearch Contributors. See
|
||||
* GitHub history for details.
|
||||
*/
|
||||
|
||||
package org.opensearch.client.transport;
|
||||
|
||||
import org.opensearch.action.ActionType;
|
||||
import org.opensearch.action.ActionListener;
|
||||
import org.opensearch.action.ActionRequest;
|
||||
import org.opensearch.action.ActionRequestBuilder;
|
||||
import org.opensearch.action.ActionResponse;
|
||||
import org.opensearch.action.TransportActionNodeProxy;
|
||||
import org.opensearch.common.settings.Settings;
|
||||
import org.opensearch.transport.TransportService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
final class TransportProxyClient {
|
||||
|
||||
private final TransportClientNodesService nodesService;
|
||||
private final Map<ActionType, TransportActionNodeProxy> proxies;
|
||||
|
||||
TransportProxyClient(
|
||||
Settings settings,
|
||||
TransportService transportService,
|
||||
TransportClientNodesService nodesService,
|
||||
List<ActionType> actions
|
||||
) {
|
||||
this.nodesService = nodesService;
|
||||
Map<ActionType, TransportActionNodeProxy> proxies = new HashMap<>();
|
||||
for (ActionType action : actions) {
|
||||
proxies.put(action, new TransportActionNodeProxy(settings, action, transportService));
|
||||
}
|
||||
this.proxies = unmodifiableMap(proxies);
|
||||
}
|
||||
|
||||
public <
|
||||
Request extends ActionRequest,
|
||||
Response extends ActionResponse,
|
||||
RequestBuilder extends ActionRequestBuilder<Request, Response>> void execute(
|
||||
final ActionType<Response> action,
|
||||
final Request request,
|
||||
ActionListener<Response> listener
|
||||
) {
|
||||
final TransportActionNodeProxy<Request, Response> proxy = proxies.get(action);
|
||||
assert proxy != null : "no proxy found for action: " + action;
|
||||
nodesService.execute((n, l) -> proxy.execute(n, request, l), listener);
|
||||
}
|
||||
}
|
|
@ -35,7 +35,6 @@ package org.opensearch.cluster;
|
|||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.opensearch.LegacyESVersion;
|
||||
import org.opensearch.client.transport.TransportClient;
|
||||
import org.opensearch.cluster.block.ClusterBlock;
|
||||
import org.opensearch.cluster.block.ClusterBlocks;
|
||||
import org.opensearch.cluster.coordination.CoordinationMetadata;
|
||||
|
@ -119,15 +118,10 @@ public class ClusterState implements ToXContentFragment, Diffable<ClusterState>
|
|||
}
|
||||
|
||||
/**
|
||||
* Tests whether or not the custom should be serialized. The criteria are:
|
||||
* <ul>
|
||||
* <li>the output stream must be at least the minimum supported version of the custom</li>
|
||||
* <li>the output stream must have the feature required by the custom (if any) or not be a transport client</li>
|
||||
* </ul>
|
||||
* Tests whether the custom should be serialized. The criterion is that
|
||||
* the output stream must be at least the minimum supported version of the custom.
|
||||
* <p>
|
||||
* That is, we only serialize customs to clients than can understand the custom based on the version of the client and the features
|
||||
* that the client has. For transport clients we can be lenient in requiring a feature in which case we do not send the custom but
|
||||
* for connected nodes we always require that the node has the required feature.
|
||||
* That is, we only serialize customs to clients than can understand the custom based on the version of the client.
|
||||
*
|
||||
* @param out the output stream
|
||||
* @param custom the custom to serialize
|
||||
|
@ -135,15 +129,7 @@ public class ClusterState implements ToXContentFragment, Diffable<ClusterState>
|
|||
* @return true if the custom should be serialized and false otherwise
|
||||
*/
|
||||
static <T extends VersionedNamedWriteable & FeatureAware> boolean shouldSerialize(final StreamOutput out, final T custom) {
|
||||
if (out.getVersion().before(custom.getMinimalSupportedVersion())) {
|
||||
return false;
|
||||
}
|
||||
if (custom.getRequiredFeature().isPresent()) {
|
||||
final String requiredFeature = custom.getRequiredFeature().get();
|
||||
// if it is a transport client we are lenient yet for a connected node it must have the required feature
|
||||
return out.hasFeature(requiredFeature) || out.hasFeature(TransportClient.TRANSPORT_CLIENT_FEATURE) == false;
|
||||
}
|
||||
return true;
|
||||
return out.getVersion().onOrAfter(custom.getMinimalSupportedVersion());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -91,7 +91,6 @@ public final class NetworkModule {
|
|||
public static final Setting<String> TRANSPORT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_KEY, Property.NodeScope);
|
||||
|
||||
private final Settings settings;
|
||||
private final boolean transportClient;
|
||||
|
||||
private static final List<NamedWriteableRegistry.Entry> namedWriteables = new ArrayList<>();
|
||||
private static final List<NamedXContentRegistry.Entry> namedXContents = new ArrayList<>();
|
||||
|
@ -134,11 +133,9 @@ public final class NetworkModule {
|
|||
/**
|
||||
* Creates a network module that custom networking classes can be plugged into.
|
||||
* @param settings The settings for the node
|
||||
* @param transportClient True if only transport classes should be allowed to be registered, false otherwise.
|
||||
*/
|
||||
public NetworkModule(
|
||||
Settings settings,
|
||||
boolean transportClient,
|
||||
List<NetworkPlugin> plugins,
|
||||
ThreadPool threadPool,
|
||||
BigArrays bigArrays,
|
||||
|
@ -151,7 +148,6 @@ public final class NetworkModule {
|
|||
ClusterSettings clusterSettings
|
||||
) {
|
||||
this.settings = settings;
|
||||
this.transportClient = transportClient;
|
||||
for (NetworkPlugin plugin : plugins) {
|
||||
Map<String, Supplier<HttpServerTransport>> httpTransportFactory = plugin.getHttpTransports(
|
||||
settings,
|
||||
|
@ -164,11 +160,9 @@ public final class NetworkModule {
|
|||
dispatcher,
|
||||
clusterSettings
|
||||
);
|
||||
if (transportClient == false) {
|
||||
for (Map.Entry<String, Supplier<HttpServerTransport>> entry : httpTransportFactory.entrySet()) {
|
||||
registerHttpTransport(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
Map<String, Supplier<Transport>> transportFactory = plugin.getTransports(
|
||||
settings,
|
||||
threadPool,
|
||||
|
@ -190,10 +184,6 @@ public final class NetworkModule {
|
|||
}
|
||||
}
|
||||
|
||||
public boolean isTransportClient() {
|
||||
return transportClient;
|
||||
}
|
||||
|
||||
/** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */
|
||||
private void registerTransport(String key, Supplier<Transport> factory) {
|
||||
if (transportFactories.putIfAbsent(key, factory) != null) {
|
||||
|
@ -204,9 +194,6 @@ public final class NetworkModule {
|
|||
/** Adds an http transport implementation that can be selected by setting {@link #HTTP_TYPE_KEY}. */
|
||||
// TODO: we need another name than "http transport"....so confusing with transportClient...
|
||||
private void registerHttpTransport(String key, Supplier<HttpServerTransport> factory) {
|
||||
if (transportClient) {
|
||||
throw new IllegalArgumentException("Cannot register http transport " + key + " for transport client");
|
||||
}
|
||||
if (transportHttpFactories.putIfAbsent(key, factory) != null) {
|
||||
throw new IllegalArgumentException("transport for name: " + key + " is already registered");
|
||||
}
|
||||
|
@ -215,7 +202,7 @@ public final class NetworkModule {
|
|||
/**
|
||||
* Register an allocation command.
|
||||
* <p>
|
||||
* This lives here instead of the more aptly named ClusterModule because the Transport client needs these to be registered.
|
||||
* This lives here instead of the more aptly named ClusterModule because the Transport client needed these to be registered.
|
||||
* </p>
|
||||
* @param reader the reader to read it from a stream
|
||||
* @param parser the parser to read it from XContent
|
||||
|
|
|
@ -49,7 +49,6 @@ import org.opensearch.action.support.DestructiveOperations;
|
|||
import org.opensearch.action.support.replication.TransportReplicationAction;
|
||||
import org.opensearch.bootstrap.BootstrapSettings;
|
||||
import org.opensearch.client.Client;
|
||||
import org.opensearch.client.transport.TransportClient;
|
||||
import org.opensearch.cluster.ClusterModule;
|
||||
import org.opensearch.cluster.ClusterName;
|
||||
import org.opensearch.cluster.InternalClusterInfoService;
|
||||
|
@ -217,10 +216,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
new HashSet<>(
|
||||
Arrays.asList(
|
||||
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
|
||||
TransportClient.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL,
|
||||
TransportClient.CLIENT_TRANSPORT_PING_TIMEOUT,
|
||||
TransportClient.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
|
||||
TransportClient.CLIENT_TRANSPORT_SNIFF,
|
||||
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
|
||||
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
|
||||
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
|
||||
|
|
|
@ -54,8 +54,8 @@ public class InternalSettingsPreparer {
|
|||
private static final String TEXT_PROMPT_VALUE = "${prompt.text}";
|
||||
|
||||
/**
|
||||
* Prepares settings for the transport client by gathering all
|
||||
* opensearch system properties and setting defaults.
|
||||
* TODO: Refactor this as transport client is removed and this used to prepare settings for the transport client by
|
||||
* gathering all opensearch system properties and setting defaults.
|
||||
*/
|
||||
public static Settings prepareSettings(Settings input) {
|
||||
Settings.Builder output = Settings.builder();
|
||||
|
|
|
@ -533,7 +533,7 @@ public class Node implements Closeable {
|
|||
IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class));
|
||||
modules.add(indicesModule);
|
||||
|
||||
SearchModule searchModule = new SearchModule(settings, false, pluginsService.filterPlugins(SearchPlugin.class));
|
||||
SearchModule searchModule = new SearchModule(settings, pluginsService.filterPlugins(SearchPlugin.class));
|
||||
List<BreakerSettings> pluginCircuitBreakers = pluginsService.filterPlugins(CircuitBreakerPlugin.class)
|
||||
.stream()
|
||||
.map(plugin -> plugin.getCircuitBreaker(settings))
|
||||
|
@ -683,7 +683,6 @@ public class Node implements Closeable {
|
|||
.collect(Collectors.toList());
|
||||
|
||||
ActionModule actionModule = new ActionModule(
|
||||
false,
|
||||
settings,
|
||||
clusterModule.getIndexNameExpressionResolver(),
|
||||
settingsModule.getIndexScopedSettings(),
|
||||
|
@ -701,7 +700,6 @@ public class Node implements Closeable {
|
|||
final RestController restController = actionModule.getRestController();
|
||||
final NetworkModule networkModule = new NetworkModule(
|
||||
settings,
|
||||
false,
|
||||
pluginsService.filterPlugins(NetworkPlugin.class),
|
||||
threadPool,
|
||||
bigArrays,
|
||||
|
|
|
@ -60,7 +60,7 @@ import static java.util.Objects.requireNonNull;
|
|||
|
||||
/**
|
||||
* This component is responsible for coordination of execution of persistent tasks on individual nodes. It runs on all
|
||||
* non-transport client nodes in the cluster and monitors cluster state changes to detect started commands.
|
||||
* nodes in the cluster and monitors cluster state changes to detect started commands.
|
||||
*/
|
||||
public class PersistentTasksNodeService implements ClusterStateListener {
|
||||
|
||||
|
|
|
@ -135,7 +135,7 @@ public class PluginsService implements ReportingService<PluginsAndModules> {
|
|||
List<PluginInfo> pluginsList = new ArrayList<>();
|
||||
// we need to build a List of plugins for checking mandatory plugins
|
||||
final List<String> pluginsNames = new ArrayList<>();
|
||||
// first we load plugins that are on the classpath. this is for tests and transport clients
|
||||
// first we load plugins that are on the classpath. this is for tests
|
||||
for (Class<? extends Plugin> pluginClass : classpathPlugins) {
|
||||
Plugin plugin = loadPlugin(pluginClass, settings, configPath);
|
||||
PluginInfo pluginInfo = new PluginInfo(
|
||||
|
|
|
@ -61,7 +61,6 @@ public final class SearchHits implements Writeable, ToXContentFragment, Iterable
|
|||
}
|
||||
|
||||
public static SearchHits empty(boolean withTotalHits) {
|
||||
// We shouldn't use static final instance, since that could directly be returned by native transport clients
|
||||
return new SearchHits(EMPTY, withTotalHits ? new TotalHits(0, Relation.EQUAL_TO) : null, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -318,7 +318,6 @@ public class SearchModule {
|
|||
Setting.Property.NodeScope
|
||||
);
|
||||
|
||||
private final boolean transportClient;
|
||||
private final Map<String, Highlighter> highlighters;
|
||||
private final ParseFieldRegistry<MovAvgModel.AbstractModelParser> movingAverageModelParserRegistry = new ParseFieldRegistry<>(
|
||||
"moving_avg_model"
|
||||
|
@ -337,12 +336,10 @@ public class SearchModule {
|
|||
* NOTE: This constructor should not be called in production unless an accurate {@link Settings} object is provided.
|
||||
* When constructed, a static flag is set in Lucene {@link BooleanQuery#setMaxClauseCount} according to the settings.
|
||||
* @param settings Current settings
|
||||
* @param transportClient Is this being constructed in the TransportClient or not
|
||||
* @param plugins List of included {@link SearchPlugin} objects.
|
||||
*/
|
||||
public SearchModule(Settings settings, boolean transportClient, List<SearchPlugin> plugins) {
|
||||
public SearchModule(Settings settings, List<SearchPlugin> plugins) {
|
||||
this.settings = settings;
|
||||
this.transportClient = transportClient;
|
||||
registerSuggesters(plugins);
|
||||
highlighters = setupHighlighters(settings, plugins);
|
||||
registerScoreFunctions(plugins);
|
||||
|
@ -693,12 +690,10 @@ public class SearchModule {
|
|||
}
|
||||
|
||||
private void registerAggregation(AggregationSpec spec, ValuesSourceRegistry.Builder builder) {
|
||||
if (false == transportClient) {
|
||||
namedXContents.add(new NamedXContentRegistry.Entry(BaseAggregationBuilder.class, spec.getName(), (p, c) -> {
|
||||
String name = (String) c;
|
||||
return spec.getParser().parse(p, name);
|
||||
}));
|
||||
}
|
||||
namedWriteables.add(
|
||||
new NamedWriteableRegistry.Entry(AggregationBuilder.class, spec.getName().getPreferredName(), spec.getReader())
|
||||
);
|
||||
|
@ -853,15 +848,9 @@ public class SearchModule {
|
|||
}
|
||||
|
||||
private void registerPipelineAggregation(PipelineAggregationSpec spec) {
|
||||
if (false == transportClient) {
|
||||
namedXContents.add(
|
||||
new NamedXContentRegistry.Entry(
|
||||
BaseAggregationBuilder.class,
|
||||
spec.getName(),
|
||||
(p, c) -> spec.getParser().parse(p, (String) c)
|
||||
)
|
||||
new NamedXContentRegistry.Entry(BaseAggregationBuilder.class, spec.getName(), (p, c) -> spec.getParser().parse(p, (String) c))
|
||||
);
|
||||
}
|
||||
namedWriteables.add(
|
||||
new NamedWriteableRegistry.Entry(PipelineAggregationBuilder.class, spec.getName().getPreferredName(), spec.getReader())
|
||||
);
|
||||
|
@ -889,9 +878,7 @@ public class SearchModule {
|
|||
}
|
||||
|
||||
private void registerRescorer(RescorerSpec<?> spec) {
|
||||
if (false == transportClient) {
|
||||
namedXContents.add(new NamedXContentRegistry.Entry(RescorerBuilder.class, spec.getName(), (p, c) -> spec.getParser().apply(p)));
|
||||
}
|
||||
namedWriteables.add(new NamedWriteableRegistry.Entry(RescorerBuilder.class, spec.getName().getPreferredName(), spec.getReader()));
|
||||
}
|
||||
|
||||
|
|
|
@ -346,7 +346,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
for (int i = 0; i < numConnections; ++i) {
|
||||
try {
|
||||
TcpChannel channel = initiateChannel(node);
|
||||
logger.trace(() -> new ParameterizedMessage("Tcp transport client channel opened: {}", channel));
|
||||
logger.trace(() -> new ParameterizedMessage("Tcp transport channel opened: {}", channel));
|
||||
channels.add(channel);
|
||||
} catch (ConnectTransportException e) {
|
||||
CloseableChannel.closeChannels(channels, false);
|
||||
|
|
|
@ -40,8 +40,6 @@ import org.opensearch.Version;
|
|||
import org.opensearch.action.ActionListener;
|
||||
import org.opensearch.action.ActionListenerResponseHandler;
|
||||
import org.opensearch.action.support.PlainActionFuture;
|
||||
import org.opensearch.client.Client;
|
||||
import org.opensearch.client.transport.TransportClient;
|
||||
import org.opensearch.cluster.ClusterName;
|
||||
import org.opensearch.cluster.node.DiscoveryNode;
|
||||
import org.opensearch.cluster.node.DiscoveryNodes;
|
||||
|
@ -135,8 +133,6 @@ public class TransportService extends AbstractLifecycleComponent
|
|||
|
||||
private final RemoteClusterService remoteClusterService;
|
||||
|
||||
private final boolean validateConnections;
|
||||
|
||||
/** if set will call requests sent to this id to shortcut and executed locally */
|
||||
volatile DiscoveryNode localNode = null;
|
||||
private final Transport.Connection localNodeConnection = new Transport.Connection() {
|
||||
|
@ -200,9 +196,6 @@ public class TransportService extends AbstractLifecycleComponent
|
|||
Set<String> taskHeaders,
|
||||
ConnectionManager connectionManager
|
||||
) {
|
||||
// The only time we do not want to validate node connections is when this is a transport client using the simple node sampler
|
||||
this.validateConnections = TransportClient.CLIENT_TYPE.equals(settings.get(Client.CLIENT_TYPE_SETTING_S.getKey())) == false
|
||||
|| TransportClient.CLIENT_TRANSPORT_SNIFF.get(settings);
|
||||
this.transport = transport;
|
||||
transport.setSlowLogThreshold(TransportSettings.SLOW_OPERATION_THRESHOLD_SETTING.get(settings));
|
||||
this.threadPool = threadPool;
|
||||
|
@ -435,7 +428,7 @@ public class TransportService extends AbstractLifecycleComponent
|
|||
// We don't validate cluster names to allow for CCS connections.
|
||||
handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> {
|
||||
final DiscoveryNode remote = resp.discoveryNode;
|
||||
if (validateConnections && node.equals(remote) == false) {
|
||||
if (node.equals(remote) == false) {
|
||||
throw new ConnectTransportException(node, "handshake failed. unexpected remote node " + remote);
|
||||
}
|
||||
return null;
|
||||
|
|
|
@ -126,7 +126,6 @@ public class ActionModuleTests extends OpenSearchTestCase {
|
|||
SettingsModule settings = new SettingsModule(Settings.EMPTY);
|
||||
UsageService usageService = new UsageService();
|
||||
ActionModule actionModule = new ActionModule(
|
||||
false,
|
||||
settings.getSettings(),
|
||||
new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)),
|
||||
settings.getIndexScopedSettings(),
|
||||
|
@ -183,7 +182,6 @@ public class ActionModuleTests extends OpenSearchTestCase {
|
|||
try {
|
||||
UsageService usageService = new UsageService();
|
||||
ActionModule actionModule = new ActionModule(
|
||||
false,
|
||||
settings.getSettings(),
|
||||
new IndexNameExpressionResolver(threadPool.getThreadContext()),
|
||||
settings.getIndexScopedSettings(),
|
||||
|
@ -233,7 +231,6 @@ public class ActionModuleTests extends OpenSearchTestCase {
|
|||
try {
|
||||
UsageService usageService = new UsageService();
|
||||
ActionModule actionModule = new ActionModule(
|
||||
false,
|
||||
settings.getSettings(),
|
||||
new IndexNameExpressionResolver(threadPool.getThreadContext()),
|
||||
settings.getIndexScopedSettings(),
|
||||
|
|
|
@ -56,7 +56,7 @@ public class ShardValidateQueryRequestTests extends OpenSearchTestCase {
|
|||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
|
|
|
@ -92,7 +92,7 @@ public class ClusterSearchShardsResponseTests extends OpenSearchTestCase {
|
|||
indicesAndFilters
|
||||
);
|
||||
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries);
|
||||
|
|
|
@ -59,7 +59,7 @@ public class ExplainRequestTests extends OpenSearchTestCase {
|
|||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
|
|
|
@ -121,9 +121,7 @@ public class SearchPhaseControllerTests extends OpenSearchTestCase {
|
|||
|
||||
@Override
|
||||
protected NamedWriteableRegistry writableRegistry() {
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>(
|
||||
new SearchModule(Settings.EMPTY, false, emptyList()).getNamedWriteables()
|
||||
);
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>(new SearchModule(Settings.EMPTY, emptyList()).getNamedWriteables());
|
||||
return new NamedWriteableRegistry(entries);
|
||||
}
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ public class SearchResponseTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(
|
||||
new SearchModule(Settings.EMPTY, false, emptyList()).getNamedWriteables()
|
||||
new SearchModule(Settings.EMPTY, emptyList()).getNamedWriteables()
|
||||
);
|
||||
private AggregationsTests aggregationsTests = new AggregationsTests();
|
||||
|
||||
|
|
|
@ -121,14 +121,8 @@ public class NetworkModuleTests extends OpenSearchTestCase {
|
|||
return Collections.singletonMap("custom", custom);
|
||||
}
|
||||
};
|
||||
NetworkModule module = newNetworkModule(settings, false, plugin);
|
||||
assertFalse(module.isTransportClient());
|
||||
NetworkModule module = newNetworkModule(settings, plugin);
|
||||
assertSame(custom, module.getTransportSupplier());
|
||||
|
||||
// check it works with transport only as well
|
||||
module = newNetworkModule(settings, true, plugin);
|
||||
assertSame(custom, module.getTransportSupplier());
|
||||
assertTrue(module.isTransportClient());
|
||||
}
|
||||
|
||||
public void testRegisterHttpTransport() {
|
||||
|
@ -138,7 +132,7 @@ public class NetworkModuleTests extends OpenSearchTestCase {
|
|||
.build();
|
||||
Supplier<HttpServerTransport> custom = FakeHttpTransport::new;
|
||||
|
||||
NetworkModule module = newNetworkModule(settings, false, new NetworkPlugin() {
|
||||
NetworkModule module = newNetworkModule(settings, new NetworkPlugin() {
|
||||
@Override
|
||||
public Map<String, Supplier<HttpServerTransport>> getHttpTransports(
|
||||
Settings settings,
|
||||
|
@ -155,11 +149,9 @@ public class NetworkModuleTests extends OpenSearchTestCase {
|
|||
}
|
||||
});
|
||||
assertSame(custom, module.getHttpServerTransportSupplier());
|
||||
assertFalse(module.isTransportClient());
|
||||
|
||||
settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build();
|
||||
NetworkModule newModule = newNetworkModule(settings, false);
|
||||
assertFalse(newModule.isTransportClient());
|
||||
NetworkModule newModule = newNetworkModule(settings);
|
||||
expectThrows(IllegalStateException.class, () -> newModule.getHttpServerTransportSupplier());
|
||||
}
|
||||
|
||||
|
@ -173,7 +165,7 @@ public class NetworkModuleTests extends OpenSearchTestCase {
|
|||
Supplier<Transport> customTransport = () -> null; // content doesn't matter we check reference equality
|
||||
Supplier<HttpServerTransport> custom = FakeHttpTransport::new;
|
||||
Supplier<HttpServerTransport> def = FakeHttpTransport::new;
|
||||
NetworkModule module = newNetworkModule(settings, false, new NetworkPlugin() {
|
||||
NetworkModule module = newNetworkModule(settings, new NetworkPlugin() {
|
||||
@Override
|
||||
public Map<String, Supplier<Transport>> getTransports(
|
||||
Settings settings,
|
||||
|
@ -216,7 +208,7 @@ public class NetworkModuleTests extends OpenSearchTestCase {
|
|||
Supplier<HttpServerTransport> custom = FakeHttpTransport::new;
|
||||
Supplier<HttpServerTransport> def = FakeHttpTransport::new;
|
||||
Supplier<Transport> customTransport = () -> null;
|
||||
NetworkModule module = newNetworkModule(settings, false, new NetworkPlugin() {
|
||||
NetworkModule module = newNetworkModule(settings, new NetworkPlugin() {
|
||||
@Override
|
||||
public Map<String, Supplier<Transport>> getTransports(
|
||||
Settings settings,
|
||||
|
@ -273,7 +265,7 @@ public class NetworkModuleTests extends OpenSearchTestCase {
|
|||
return actualHandler;
|
||||
}
|
||||
};
|
||||
NetworkModule module = newNetworkModule(settings, false, new NetworkPlugin() {
|
||||
NetworkModule module = newNetworkModule(settings, new NetworkPlugin() {
|
||||
@Override
|
||||
public List<TransportInterceptor> getTransportInterceptors(
|
||||
NamedWriteableRegistry namedWriteableRegistry,
|
||||
|
@ -295,7 +287,7 @@ public class NetworkModuleTests extends OpenSearchTestCase {
|
|||
assertSame(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.get(0), interceptor);
|
||||
|
||||
NullPointerException nullPointerException = expectThrows(NullPointerException.class, () -> {
|
||||
newNetworkModule(settings, false, new NetworkPlugin() {
|
||||
newNetworkModule(settings, new NetworkPlugin() {
|
||||
@Override
|
||||
public List<TransportInterceptor> getTransportInterceptors(
|
||||
NamedWriteableRegistry namedWriteableRegistry,
|
||||
|
@ -309,10 +301,9 @@ public class NetworkModuleTests extends OpenSearchTestCase {
|
|||
assertEquals("interceptor must not be null", nullPointerException.getMessage());
|
||||
}
|
||||
|
||||
private NetworkModule newNetworkModule(Settings settings, boolean transportClient, NetworkPlugin... plugins) {
|
||||
private NetworkModule newNetworkModule(Settings settings, NetworkPlugin... plugins) {
|
||||
return new NetworkModule(
|
||||
settings,
|
||||
transportClient,
|
||||
Arrays.asList(plugins),
|
||||
threadPool,
|
||||
null,
|
||||
|
|
|
@ -53,7 +53,7 @@ public class AbstractQueryBuilderTests extends OpenSearchTestCase {
|
|||
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
xContentRegistry = new NamedXContentRegistry(new SearchModule(Settings.EMPTY, false, emptyList()).getNamedXContents());
|
||||
xContentRegistry = new NamedXContentRegistry(new SearchModule(Settings.EMPTY, emptyList()).getNamedXContents());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
|
|
@ -80,7 +80,7 @@ public class InnerHitBuilderTests extends OpenSearchTestCase {
|
|||
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
xContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
|
|
@ -65,13 +65,13 @@ public class ReindexRequestTests extends AbstractBulkByScrollRequestTestCase<Rei
|
|||
|
||||
@Override
|
||||
protected NamedWriteableRegistry writableRegistry() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
|
||||
return new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NamedXContentRegistry xContentRegistry() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
|
||||
return new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ public class RestActionsTests extends OpenSearchTestCase {
|
|||
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
xContentRegistry = new NamedXContentRegistry(new SearchModule(Settings.EMPTY, false, emptyList()).getNamedXContents());
|
||||
xContentRegistry = new NamedXContentRegistry(new SearchModule(Settings.EMPTY, emptyList()).getNamedXContents());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
|
|
@ -74,7 +74,7 @@ public abstract class AbstractSearchTestCase extends OpenSearchTestCase {
|
|||
super.setUp();
|
||||
IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
|
||||
searchExtPlugin = new TestSearchExtPlugin();
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.singletonList(searchExtPlugin));
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.singletonList(searchExtPlugin));
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
|
|
|
@ -213,11 +213,11 @@ public class SearchModuleTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
private ThrowingRunnable registryForPlugin(SearchPlugin plugin) {
|
||||
return () -> new NamedXContentRegistry(new SearchModule(Settings.EMPTY, false, singletonList(plugin)).getNamedXContents());
|
||||
return () -> new NamedXContentRegistry(new SearchModule(Settings.EMPTY, singletonList(plugin)).getNamedXContents());
|
||||
}
|
||||
|
||||
public void testRegisterSuggester() {
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, false, singletonList(new SearchPlugin() {
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, singletonList(new SearchPlugin() {
|
||||
@Override
|
||||
public List<SuggesterSpec<?>> getSuggesters() {
|
||||
return singletonList(
|
||||
|
@ -314,7 +314,7 @@ public class SearchModuleTests extends OpenSearchTestCase {
|
|||
|
||||
public void testRegisterHighlighter() {
|
||||
CustomHighlighter customHighlighter = new CustomHighlighter();
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, false, singletonList(new SearchPlugin() {
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, singletonList(new SearchPlugin() {
|
||||
@Override
|
||||
public Map<String, Highlighter> getHighlighters() {
|
||||
return singletonMap("custom", customHighlighter);
|
||||
|
@ -332,7 +332,7 @@ public class SearchModuleTests extends OpenSearchTestCase {
|
|||
List<String> allSupportedQueries = new ArrayList<>();
|
||||
Collections.addAll(allSupportedQueries, NON_DEPRECATED_QUERIES);
|
||||
Collections.addAll(allSupportedQueries, DEPRECATED_QUERIES);
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, emptyList());
|
||||
|
||||
Set<String> registeredNonDeprecated = module.getNamedXContents()
|
||||
.stream()
|
||||
|
@ -351,7 +351,7 @@ public class SearchModuleTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
public void testRegisterAggregation() {
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, false, singletonList(new SearchPlugin() {
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, singletonList(new SearchPlugin() {
|
||||
@Override
|
||||
public List<AggregationSpec> getAggregations() {
|
||||
return singletonList(new AggregationSpec("test", TestAggregationBuilder::new, TestAggregationBuilder::fromXContent));
|
||||
|
@ -371,7 +371,7 @@ public class SearchModuleTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
public void testRegisterPipelineAggregation() {
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, false, singletonList(new SearchPlugin() {
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, singletonList(new SearchPlugin() {
|
||||
@Override
|
||||
public List<PipelineAggregationSpec> getPipelineAggregations() {
|
||||
return singletonList(
|
||||
|
@ -398,7 +398,7 @@ public class SearchModuleTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
public void testRegisterRescorer() {
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, false, singletonList(new SearchPlugin() {
|
||||
SearchModule module = new SearchModule(Settings.EMPTY, singletonList(new SearchPlugin() {
|
||||
@Override
|
||||
public List<RescorerSpec<?>> getRescorers() {
|
||||
return singletonList(new RescorerSpec<>("test", TestRescorerBuilder::new, TestRescorerBuilder::fromXContent));
|
||||
|
|
|
@ -49,7 +49,7 @@ public class SearchSortValuesAndFormatsTests extends AbstractWireSerializingTest
|
|||
|
||||
@Before
|
||||
public void initRegistry() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(entries);
|
||||
|
|
|
@ -62,7 +62,7 @@ public class AggregatorFactoriesBuilderTests extends AbstractSerializingTestCase
|
|||
super.setUp();
|
||||
|
||||
// register aggregations as NamedWriteable
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
namedXContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
|
|
@ -86,7 +86,7 @@ public class AggregatorFactoriesTests extends OpenSearchTestCase {
|
|||
.put("node.name", AbstractQueryTestCase.class.toString())
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
|
||||
.build();
|
||||
xContentRegistry = new NamedXContentRegistry(new SearchModule(settings, false, emptyList()).getNamedXContents());
|
||||
xContentRegistry = new NamedXContentRegistry(new SearchModule(settings, emptyList()).getNamedXContents());
|
||||
}
|
||||
|
||||
public void testGetAggregatorFactories_returnsUnmodifiableList() {
|
||||
|
|
|
@ -64,7 +64,7 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
public class InternalAggregationsTests extends OpenSearchTestCase {
|
||||
|
||||
private final NamedWriteableRegistry registry = new NamedWriteableRegistry(
|
||||
new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedWriteables()
|
||||
new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedWriteables()
|
||||
);
|
||||
|
||||
public void testReduceEmptyAggs() {
|
||||
|
|
|
@ -103,7 +103,7 @@ public class SignificanceHeuristicTests extends OpenSearchTestCase {
|
|||
// read
|
||||
ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
|
||||
StreamInput in = new InputStreamStreamInput(inBuffer);
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); // populates the registry through side effects
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList()); // populates the registry through side effects
|
||||
NamedWriteableRegistry registry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
in = new NamedWriteableAwareStreamInput(in, registry);
|
||||
in.setVersion(version);
|
||||
|
@ -549,6 +549,6 @@ public class SignificanceHeuristicTests extends OpenSearchTestCase {
|
|||
|
||||
@Override
|
||||
protected NamedXContentRegistry xContentRegistry() {
|
||||
return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, false, emptyList()).getNamedXContents());
|
||||
return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, emptyList()).getNamedXContents());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ public class WeightedAvgAggregationBuilderTests extends AbstractSerializingTestC
|
|||
|
||||
@Override
|
||||
protected NamedXContentRegistry xContentRegistry() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
|
||||
return new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
||||
|
|
|
@ -90,11 +90,11 @@ public class MultiValuesSourceFieldConfigTests extends AbstractSerializingTestCa
|
|||
|
||||
@Override
|
||||
protected NamedWriteableRegistry getNamedWriteableRegistry() {
|
||||
return new NamedWriteableRegistry(new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedWriteables());
|
||||
return new NamedWriteableRegistry(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedWriteables());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NamedXContentRegistry xContentRegistry() {
|
||||
return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedXContents());
|
||||
return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ public class CollapseBuilderTests extends AbstractSerializingTestCase<CollapseBu
|
|||
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
xContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ public class HighlightBuilderTests extends OpenSearchTestCase {
|
|||
*/
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
xContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ public class QuerySearchResultTests extends OpenSearchTestCase {
|
|||
private final NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
public QuerySearchResultTests() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
|
||||
this.namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
}
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ public class QueryRescorerBuilderTests extends OpenSearchTestCase {
|
|||
*/
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
xContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ public abstract class AbstractSortTestCase<T extends SortBuilder<T>> extends Ope
|
|||
ScriptEngine engine = new MockScriptEngine(MockScriptEngine.NAME, scripts, Collections.emptyMap());
|
||||
scriptService = new ScriptService(baseSettings, Collections.singletonMap(engine.getType(), engine), ScriptModule.CORE_CONTEXTS);
|
||||
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
xContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ public class NestedSortBuilderTests extends OpenSearchTestCase {
|
|||
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
xContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ public class SortBuilderTests extends OpenSearchTestCase {
|
|||
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
|
||||
xContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ public abstract class AbstractSuggestionBuilderTestCase<SB extends SuggestionBui
|
|||
*/
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
xContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ public class SuggestBuilderTests extends OpenSearchTestCase {
|
|||
*/
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
|
||||
namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
|
||||
xContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents());
|
||||
}
|
||||
|
|
|
@ -276,9 +276,7 @@ public class SuggestTests extends OpenSearchTestCase {
|
|||
final Suggest suggest = createTestItem();
|
||||
final Suggest bwcSuggest;
|
||||
|
||||
NamedWriteableRegistry registry = new NamedWriteableRegistry(
|
||||
new SearchModule(Settings.EMPTY, false, emptyList()).getNamedWriteables()
|
||||
);
|
||||
NamedWriteableRegistry registry = new NamedWriteableRegistry(new SearchModule(Settings.EMPTY, emptyList()).getNamedWriteables());
|
||||
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
out.setVersion(bwcVersion);
|
||||
|
|
|
@ -35,8 +35,6 @@ package org.opensearch.transport;
|
|||
import org.opensearch.Version;
|
||||
import org.opensearch.action.ActionListener;
|
||||
import org.opensearch.action.support.PlainActionFuture;
|
||||
import org.opensearch.client.Client;
|
||||
import org.opensearch.client.transport.TransportClient;
|
||||
import org.opensearch.cluster.node.DiscoveryNode;
|
||||
import org.opensearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.opensearch.common.network.NetworkService;
|
||||
|
@ -225,45 +223,6 @@ public class TransportServiceHandshakeTests extends OpenSearchTestCase {
|
|||
assertFalse(handleA.transportService.nodeConnected(discoveryNode));
|
||||
}
|
||||
|
||||
public void testNodeConnectWithDifferentNodeIdSucceedsIfThisIsTransportClientOfSimpleNodeSampler() {
|
||||
Settings.Builder settings = Settings.builder().put("cluster.name", "test");
|
||||
Settings transportClientSettings = settings.put(Client.CLIENT_TYPE_SETTING_S.getKey(), TransportClient.CLIENT_TYPE).build();
|
||||
NetworkHandle handleA = startServices("TS_A", transportClientSettings, Version.CURRENT);
|
||||
NetworkHandle handleB = startServices("TS_B", settings.build(), Version.CURRENT);
|
||||
DiscoveryNode discoveryNode = new DiscoveryNode(
|
||||
randomAlphaOfLength(10),
|
||||
handleB.discoveryNode.getAddress(),
|
||||
emptyMap(),
|
||||
emptySet(),
|
||||
handleB.discoveryNode.getVersion()
|
||||
);
|
||||
|
||||
handleA.transportService.connectToNode(discoveryNode, TestProfiles.LIGHT_PROFILE);
|
||||
assertTrue(handleA.transportService.nodeConnected(discoveryNode));
|
||||
}
|
||||
|
||||
public void testNodeConnectWithDifferentNodeIdFailsWhenSnifferTransportClient() {
|
||||
Settings.Builder settings = Settings.builder().put("cluster.name", "test");
|
||||
Settings transportClientSettings = settings.put(Client.CLIENT_TYPE_SETTING_S.getKey(), TransportClient.CLIENT_TYPE)
|
||||
.put(TransportClient.CLIENT_TRANSPORT_SNIFF.getKey(), true)
|
||||
.build();
|
||||
NetworkHandle handleA = startServices("TS_A", transportClientSettings, Version.CURRENT);
|
||||
NetworkHandle handleB = startServices("TS_B", settings.build(), Version.CURRENT);
|
||||
DiscoveryNode discoveryNode = new DiscoveryNode(
|
||||
randomAlphaOfLength(10),
|
||||
handleB.discoveryNode.getAddress(),
|
||||
emptyMap(),
|
||||
emptySet(),
|
||||
handleB.discoveryNode.getVersion()
|
||||
);
|
||||
ConnectTransportException ex = expectThrows(
|
||||
ConnectTransportException.class,
|
||||
() -> { handleA.transportService.connectToNode(discoveryNode, TestProfiles.LIGHT_PROFILE); }
|
||||
);
|
||||
assertThat(ex.getMessage(), containsString("unexpected remote node"));
|
||||
assertFalse(handleA.transportService.nodeConnected(discoveryNode));
|
||||
}
|
||||
|
||||
private static class NetworkHandle {
|
||||
private TransportService transportService;
|
||||
private DiscoveryNode discoveryNode;
|
||||
|
|
|
@ -26,7 +26,6 @@ List projects = [
|
|||
'client:rest',
|
||||
'client:rest-high-level',
|
||||
'client:sniffer',
|
||||
'client:transport',
|
||||
'client:test',
|
||||
'client:client-benchmark-noop-api-plugin',
|
||||
'client:benchmark',
|
||||
|
|
|
@ -214,7 +214,7 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
|
|||
public void initValuesSourceRegistry() {
|
||||
List<SearchPlugin> plugins = new ArrayList<>(getSearchPlugins());
|
||||
plugins.add(new AggCardinalityPlugin());
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, plugins);
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, plugins);
|
||||
valuesSourceRegistry = searchModule.getValuesSourceRegistry();
|
||||
}
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ public abstract class BasePipelineAggregationTestCase<AF extends AbstractPipelin
|
|||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
|
||||
.build();
|
||||
IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
|
||||
SearchModule searchModule = new SearchModule(settings, false, plugins());
|
||||
SearchModule searchModule = new SearchModule(settings, plugins());
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
|
|
|
@ -387,7 +387,7 @@ public abstract class AbstractBuilderTestCase extends OpenSearchTestCase {
|
|||
pluginsService.getPluginSettingsFilter(),
|
||||
Collections.emptySet()
|
||||
);
|
||||
searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class));
|
||||
searchModule = new SearchModule(nodeSettings, pluginsService.filterPlugins(SearchPlugin.class));
|
||||
IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class));
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
|
|
|
@ -322,11 +322,7 @@ public abstract class InternalAggregationTestCase<T extends InternalAggregation>
|
|||
*/
|
||||
protected List<NamedWriteableRegistry.Entry> getNamedWriteables() {
|
||||
SearchPlugin plugin = registerPlugin();
|
||||
SearchModule searchModule = new SearchModule(
|
||||
Settings.EMPTY,
|
||||
false,
|
||||
plugin == null ? emptyList() : Collections.singletonList(plugin)
|
||||
);
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, plugin == null ? emptyList() : Collections.singletonList(plugin));
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>(searchModule.getNamedWriteables());
|
||||
|
||||
// Modules/plugins may have extra namedwriteables that are not added by agg specs
|
||||
|
|
|
@ -50,11 +50,6 @@ public abstract class NodeConfigurationSource {
|
|||
public Path nodeConfigPath(int nodeOrdinal) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Settings transportClientSettings() {
|
||||
return Settings.EMPTY;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -68,9 +63,4 @@ public abstract class NodeConfigurationSource {
|
|||
public Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
public Settings transportClientSettings() {
|
||||
return Settings.EMPTY;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue