From 865b951b7d5ba0acfa51bae2c12416044642338d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 29 Jun 2016 15:35:55 -0700 Subject: [PATCH 01/36] Internal: Changed rest handler interface to take NodeClient Previously all rest handlers would take Client in their injected ctor. However, it was only to hold the client around for runtime. Instead, this can be done just once in the HttpService which handles rest requests, and passed along through the handleRequest method. It also should always be a NodeClient, and other types of Clients (eg a TransportClient) would not work anyways (and some handlers can be simplified in follow ups like reindex by taking NodeClient). --- .../org/elasticsearch/http/HttpServer.java | 18 +++++--- .../java/org/elasticsearch/node/Node.java | 1 + .../elasticsearch/rest/BaseRestHandler.java | 12 +---- .../elasticsearch/rest/RestController.java | 21 ++++----- .../org/elasticsearch/rest/RestFilter.java | 4 +- .../elasticsearch/rest/RestFilterChain.java | 4 +- .../org/elasticsearch/rest/RestHandler.java | 11 ++++- .../RestClusterAllocationExplainAction.java | 13 +++--- .../health/RestClusterHealthAction.java | 8 ++-- .../hotthreads/RestNodesHotThreadsAction.java | 8 ++-- .../node/info/RestNodesInfoAction.java | 8 ++-- .../node/stats/RestNodesStatsAction.java | 8 ++-- .../node/tasks/RestCancelTasksAction.java | 8 ++-- .../cluster/node/tasks/RestGetTaskAction.java | 8 ++-- .../node/tasks/RestListTasksAction.java | 8 ++-- .../delete/RestDeleteRepositoryAction.java | 8 ++-- .../get/RestGetRepositoriesAction.java | 8 ++-- .../put/RestPutRepositoryAction.java | 8 ++-- .../verify/RestVerifyRepositoryAction.java | 8 ++-- .../reroute/RestClusterRerouteAction.java | 8 ++-- .../RestClusterGetSettingsAction.java | 8 ++-- .../RestClusterUpdateSettingsAction.java | 8 ++-- .../shards/RestClusterSearchShardsAction.java | 8 ++-- .../create/RestCreateSnapshotAction.java | 8 ++-- .../delete/RestDeleteSnapshotAction.java | 8 ++-- .../snapshots/get/RestGetSnapshotsAction.java | 8 ++-- .../restore/RestRestoreSnapshotAction.java | 8 ++-- .../status/RestSnapshotsStatusAction.java | 8 ++-- .../cluster/state/RestClusterStateAction.java | 8 ++-- .../cluster/stats/RestClusterStatsAction.java | 8 ++-- .../RestDeleteStoredScriptAction.java | 12 ++--- .../RestGetStoredScriptAction.java | 12 ++--- .../RestPutStoredScriptAction.java | 12 ++--- .../tasks/RestPendingClusterTasksAction.java | 8 ++-- .../indices/RestRolloverIndexAction.java | 8 ++-- .../admin/indices/RestShrinkIndexAction.java | 8 ++-- .../alias/RestIndicesAliasesAction.java | 8 ++-- .../delete/RestIndexDeleteAliasesAction.java | 8 ++-- .../alias/get/RestGetAliasesAction.java | 8 ++-- .../alias/head/RestAliasesExistAction.java | 8 ++-- .../alias/put/RestIndexPutAliasAction.java | 8 ++-- .../indices/analyze/RestAnalyzeAction.java | 8 ++-- .../clear/RestClearIndicesCacheAction.java | 8 ++-- .../indices/close/RestCloseIndexAction.java | 8 ++-- .../indices/create/RestCreateIndexAction.java | 8 ++-- .../indices/delete/RestDeleteIndexAction.java | 8 ++-- .../indices/RestIndicesExistsAction.java | 8 ++-- .../exists/types/RestTypesExistsAction.java | 8 ++-- .../admin/indices/flush/RestFlushAction.java | 8 ++-- .../indices/flush/RestSyncedFlushAction.java | 8 ++-- .../forcemerge/RestForceMergeAction.java | 8 ++-- .../indices/get/RestGetIndicesAction.java | 8 ++-- .../get/RestGetFieldMappingAction.java | 8 ++-- .../mapping/get/RestGetMappingAction.java | 8 ++-- .../mapping/put/RestPutMappingAction.java | 8 ++-- .../indices/open/RestOpenIndexAction.java | 8 ++-- .../indices/recovery/RestRecoveryAction.java | 8 ++-- .../indices/refresh/RestRefreshAction.java | 8 ++-- .../segments/RestIndicesSegmentsAction.java | 8 ++-- .../settings/RestGetSettingsAction.java | 8 ++-- .../settings/RestUpdateSettingsAction.java | 8 ++-- .../shards/RestIndicesShardStoresAction.java | 8 ++-- .../indices/stats/RestIndicesStatsAction.java | 8 ++-- .../delete/RestDeleteIndexTemplateAction.java | 8 ++-- .../get/RestGetIndexTemplateAction.java | 8 ++-- .../head/RestHeadIndexTemplateAction.java | 8 ++-- .../put/RestPutIndexTemplateAction.java | 8 ++-- .../indices/upgrade/RestUpgradeAction.java | 13 +++--- .../query/RestValidateQueryAction.java | 8 ++-- .../rest/action/bulk/RestBulkAction.java | 8 ++-- .../rest/action/cat/AbstractCatAction.java | 10 ++--- .../rest/action/cat/RestAliasAction.java | 8 ++-- .../rest/action/cat/RestAllocationAction.java | 8 ++-- .../rest/action/cat/RestCatAction.java | 8 ++-- .../rest/action/cat/RestCountAction.java | 8 ++-- .../rest/action/cat/RestFielddataAction.java | 8 ++-- .../rest/action/cat/RestHealthAction.java | 8 ++-- .../rest/action/cat/RestIndicesAction.java | 8 ++-- .../rest/action/cat/RestMasterAction.java | 8 ++-- .../rest/action/cat/RestNodeAttrsAction.java | 8 ++-- .../rest/action/cat/RestNodesAction.java | 8 ++-- .../cat/RestPendingClusterTasksAction.java | 8 ++-- .../rest/action/cat/RestPluginsAction.java | 8 ++-- .../rest/action/cat/RestRecoveryAction.java | 8 ++-- .../action/cat/RestRepositoriesAction.java | 8 ++-- .../rest/action/cat/RestSegmentsAction.java | 8 ++-- .../rest/action/cat/RestShardsAction.java | 8 ++-- .../rest/action/cat/RestSnapshotAction.java | 8 ++-- .../rest/action/cat/RestTasksAction.java | 8 ++-- .../rest/action/cat/RestThreadPoolAction.java | 8 ++-- .../rest/action/count/RestCountAction.java | 8 ++-- .../rest/action/delete/RestDeleteAction.java | 8 ++-- .../action/explain/RestExplainAction.java | 8 ++-- .../fieldstats/RestFieldStatsAction.java | 8 ++-- .../rest/action/get/RestGetAction.java | 8 ++-- .../rest/action/get/RestGetSourceAction.java | 8 ++-- .../rest/action/get/RestHeadAction.java | 17 ++++--- .../rest/action/get/RestMultiGetAction.java | 8 ++-- .../rest/action/index/RestIndexAction.java | 16 +++---- .../ingest/RestDeletePipelineAction.java | 8 ++-- .../action/ingest/RestGetPipelineAction.java | 8 ++-- .../action/ingest/RestPutPipelineAction.java | 8 ++-- .../ingest/RestSimulatePipelineAction.java | 8 ++-- .../rest/action/main/RestMainAction.java | 8 ++-- .../action/search/RestClearScrollAction.java | 8 ++-- .../action/search/RestMultiSearchAction.java | 8 ++-- .../rest/action/search/RestSearchAction.java | 8 ++-- .../action/search/RestSearchScrollAction.java | 8 ++-- .../action/suggest/RestSuggestAction.java | 8 ++-- .../RestMultiTermVectorsAction.java | 8 ++-- .../termvectors/RestTermVectorsAction.java | 8 ++-- .../rest/action/update/RestUpdateAction.java | 8 ++-- .../action/ActionModuleTests.java | 3 +- .../common/network/NetworkModuleTests.java | 10 ++--- .../elasticsearch/http/HttpServerTests.java | 6 +-- .../TestResponseHeaderRestAction.java | 8 ++-- .../rest/RestControllerTests.java | 7 +-- .../rest/RestFilterChainTests.java | 35 +++++++-------- .../action/cat/RestRecoveryActionTests.java | 2 +- .../DedicatedClusterSnapshotRestoreIT.java | 6 ++- .../RestDeleteSearchTemplateAction.java | 6 +-- .../template/RestGetSearchTemplateAction.java | 6 +-- .../RestMultiSearchTemplateAction.java | 8 ++-- .../template/RestPutSearchTemplateAction.java | 6 +-- .../RestRenderSearchTemplateAction.java | 8 ++-- .../template/RestSearchTemplateAction.java | 8 ++-- .../percolator/RestMultiPercolateAction.java | 8 ++-- .../percolator/RestPercolateAction.java | 44 +++++++++---------- .../AbstractBaseReindexRestHandler.java | 10 ++--- .../AbstractBulkByQueryRestHandler.java | 4 +- .../reindex/RestDeleteByQueryAction.java | 8 ++-- .../index/reindex/RestReindexAction.java | 8 ++-- .../index/reindex/RestRethrottleAction.java | 8 ++-- .../reindex/RestUpdateByQueryAction.java | 8 ++-- .../plugin/example/ExampleCatAction.java | 8 ++-- 135 files changed, 592 insertions(+), 585 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/http/HttpServer.java b/core/src/main/java/org/elasticsearch/http/HttpServer.java index 31bbd235009..f44b839b2a3 100644 --- a/core/src/main/java/org/elasticsearch/http/HttpServer.java +++ b/core/src/main/java/org/elasticsearch/http/HttpServer.java @@ -19,6 +19,12 @@ package org.elasticsearch.http; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; @@ -39,11 +45,6 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.concurrent.atomic.AtomicBoolean; - import static org.elasticsearch.rest.RestStatus.FORBIDDEN; import static org.elasticsearch.rest.RestStatus.INTERNAL_SERVER_ERROR; @@ -57,15 +58,18 @@ public class HttpServer extends AbstractLifecycleComponent implement private final NodeService nodeService; + private final NodeClient client; + private final CircuitBreakerService circuitBreakerService; @Inject public HttpServer(Settings settings, HttpServerTransport transport, RestController restController, NodeService nodeService, - CircuitBreakerService circuitBreakerService) { + NodeClient client, CircuitBreakerService circuitBreakerService) { super(settings); this.transport = transport; this.restController = restController; this.nodeService = nodeService; + this.client = client; this.circuitBreakerService = circuitBreakerService; nodeService.setHttpServer(this); transport.httpServerAdapter(this); @@ -115,7 +119,7 @@ public class HttpServer extends AbstractLifecycleComponent implement } // iff we could reserve bytes for the request we need to send the response also over this channel responseChannel = new ResourceHandlingHttpChannel(channel, circuitBreakerService, contentLength); - restController.dispatchRequest(request, responseChannel, threadContext); + restController.dispatchRequest(request, responseChannel, client, threadContext); } catch (Throwable t) { restController.sendErrorResponse(request, responseChannel, t); } diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 1d18f696163..e12a5f552ff 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -283,6 +283,7 @@ public class Node implements Closeable { modules.add(b -> { b.bind(PluginsService.class).toInstance(pluginsService); b.bind(Client.class).toInstance(client); + b.bind(NodeClient.class).toInstance(client); b.bind(Environment.class).toInstance(environment); b.bind(ThreadPool.class).toInstance(threadPool); b.bind(NodeEnvironment.class).toInstance(nodeEnvironment); diff --git a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index b406dfca545..cfa46eab514 100644 --- a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest; -import org.elasticsearch.client.Client; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; @@ -37,19 +36,10 @@ import org.elasticsearch.common.settings.Settings; public abstract class BaseRestHandler extends AbstractComponent implements RestHandler { public static final Setting MULTI_ALLOW_EXPLICIT_INDEX = Setting.boolSetting("rest.action.multi.allow_explicit_index", true, Property.NodeScope); - private final Client client; protected final ParseFieldMatcher parseFieldMatcher; - protected BaseRestHandler(Settings settings, Client client) { + protected BaseRestHandler(Settings settings) { super(settings); - this.client = client; this.parseFieldMatcher = new ParseFieldMatcher(settings); } - - @Override - public final void handleRequest(RestRequest request, RestChannel channel) throws Exception { - handleRequest(request, channel, client); - } - - protected abstract void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception; } diff --git a/core/src/main/java/org/elasticsearch/rest/RestController.java b/core/src/main/java/org/elasticsearch/rest/RestController.java index e8212ff09c6..8059dfd7ea5 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestController.java +++ b/core/src/main/java/org/elasticsearch/rest/RestController.java @@ -19,6 +19,7 @@ package org.elasticsearch.rest; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -150,7 +151,7 @@ public class RestController extends AbstractLifecycleComponent { return (handler != null) ? handler.canTripCircuitBreaker() : true; } - public void dispatchRequest(final RestRequest request, final RestChannel channel, ThreadContext threadContext) throws Exception { + public void dispatchRequest(final RestRequest request, final RestChannel channel, final NodeClient client, ThreadContext threadContext) throws Exception { if (!checkRequestParameters(request, channel)) { return; } @@ -162,10 +163,10 @@ public class RestController extends AbstractLifecycleComponent { } } if (filters.length == 0) { - executeHandler(request, channel); + executeHandler(request, channel, client); } else { ControllerFilterChain filterChain = new ControllerFilterChain(handlerFilter); - filterChain.continueProcessing(request, channel); + filterChain.continueProcessing(request, channel, client); } } } @@ -200,10 +201,10 @@ public class RestController extends AbstractLifecycleComponent { return true; } - void executeHandler(RestRequest request, RestChannel channel) throws Exception { + void executeHandler(RestRequest request, RestChannel channel, NodeClient client) throws Exception { final RestHandler handler = getHandler(request); if (handler != null) { - handler.handleRequest(request, channel); + handler.handleRequest(request, channel, client); } else { if (request.method() == RestRequest.Method.OPTIONS) { // when we have OPTIONS request, simply send OK by default (with the Access Control Origin header which gets automatically added) @@ -261,16 +262,16 @@ public class RestController extends AbstractLifecycleComponent { } @Override - public void continueProcessing(RestRequest request, RestChannel channel) { + public void continueProcessing(RestRequest request, RestChannel channel, NodeClient client) { try { int loc = index.getAndIncrement(); if (loc > filters.length) { throw new IllegalStateException("filter continueProcessing was called more than expected"); } else if (loc == filters.length) { - executionFilter.process(request, channel, this); + executionFilter.process(request, channel, client, this); } else { RestFilter preProcessor = filters[loc]; - preProcessor.process(request, channel, this); + preProcessor.process(request, channel, client, this); } } catch (Exception e) { try { @@ -285,8 +286,8 @@ public class RestController extends AbstractLifecycleComponent { class RestHandlerFilter extends RestFilter { @Override - public void process(RestRequest request, RestChannel channel, RestFilterChain filterChain) throws Exception { - executeHandler(request, channel); + public void process(RestRequest request, RestChannel channel, NodeClient client, RestFilterChain filterChain) throws Exception { + executeHandler(request, channel, client); } } } diff --git a/core/src/main/java/org/elasticsearch/rest/RestFilter.java b/core/src/main/java/org/elasticsearch/rest/RestFilter.java index e3ff44ff1fc..276e99fc7e5 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestFilter.java +++ b/core/src/main/java/org/elasticsearch/rest/RestFilter.java @@ -21,6 +21,8 @@ package org.elasticsearch.rest; import java.io.Closeable; +import org.elasticsearch.client.node.NodeClient; + /** * A filter allowing to filter rest operations. */ @@ -43,5 +45,5 @@ public abstract class RestFilter implements Closeable { * Process the rest request. Using the channel to send a response, or the filter chain to continue * processing the request. */ - public abstract void process(RestRequest request, RestChannel channel, RestFilterChain filterChain) throws Exception; + public abstract void process(RestRequest request, RestChannel channel, NodeClient client, RestFilterChain filterChain) throws Exception; } diff --git a/core/src/main/java/org/elasticsearch/rest/RestFilterChain.java b/core/src/main/java/org/elasticsearch/rest/RestFilterChain.java index be14d252b74..239a6c6b1bb 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestFilterChain.java +++ b/core/src/main/java/org/elasticsearch/rest/RestFilterChain.java @@ -19,6 +19,8 @@ package org.elasticsearch.rest; +import org.elasticsearch.client.node.NodeClient; + /** * A filter chain allowing to continue and process the rest request. */ @@ -28,5 +30,5 @@ public interface RestFilterChain { * Continue processing the request. Should only be called if a response has not been sent * through the channel. */ - void continueProcessing(RestRequest request, RestChannel channel); + void continueProcessing(RestRequest request, RestChannel channel, NodeClient client); } diff --git a/core/src/main/java/org/elasticsearch/rest/RestHandler.java b/core/src/main/java/org/elasticsearch/rest/RestHandler.java index 31970441493..393e425baf9 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestHandler.java +++ b/core/src/main/java/org/elasticsearch/rest/RestHandler.java @@ -19,12 +19,21 @@ package org.elasticsearch.rest; +import org.elasticsearch.client.node.NodeClient; + /** * Handler for REST requests */ public interface RestHandler { - void handleRequest(RestRequest request, RestChannel channel) throws Exception; + /** + * Handles a rest request. + * + * @param request The request to handle + * @param channel The channel to write the request response to + * @param client A client to use to make internal requests on behalf of the original request + */ + void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception; default boolean canTripCircuitBreaker() { return true; diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/allocation/RestClusterAllocationExplainAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/allocation/RestClusterAllocationExplainAction.java index 0785151e8d7..e802e9e2b7f 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/allocation/RestClusterAllocationExplainAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/allocation/RestClusterAllocationExplainAction.java @@ -19,11 +19,12 @@ package org.elasticsearch.rest.action.admin.cluster.allocation; +import java.io.IOException; + import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Requests; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; @@ -42,22 +43,20 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestBuilderListener; -import java.io.IOException; - /** * Class handling cluster allocation explanation at the REST level */ public class RestClusterAllocationExplainAction extends BaseRestHandler { @Inject - public RestClusterAllocationExplainAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestClusterAllocationExplainAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.GET, "/_cluster/allocation/explain", this); controller.registerHandler(RestRequest.Method.POST, "/_cluster/allocation/explain", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { ClusterAllocationExplainRequest req; if (RestActions.hasBodyContent(request) == false) { // Empty request signals "explain the first unassigned shard you find" diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java index a2c6ffeaf14..d43d75ea5e7 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster.health; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -42,15 +42,15 @@ import static org.elasticsearch.client.Requests.clusterHealthRequest; public class RestClusterHealthAction extends BaseRestHandler { @Inject - public RestClusterHealthAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestClusterHealthAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.GET, "/_cluster/health", this); controller.registerHandler(RestRequest.Method.GET, "/_cluster/health/{index}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { ClusterHealthRequest clusterHealthRequest = clusterHealthRequest(Strings.splitStringByCommaToArray(request.param("index"))); clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local())); clusterHealthRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterHealthRequest.masterNodeTimeout())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/hotthreads/RestNodesHotThreadsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/hotthreads/RestNodesHotThreadsAction.java index 2030d5e7d92..1d0daa3e7d6 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/hotthreads/RestNodesHotThreadsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/hotthreads/RestNodesHotThreadsAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.cluster.node.hotthreads; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -42,8 +42,8 @@ import org.elasticsearch.rest.action.support.RestResponseListener; public class RestNodesHotThreadsAction extends BaseRestHandler { @Inject - public RestNodesHotThreadsAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestNodesHotThreadsAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.GET, "/_cluster/nodes/hotthreads", this); controller.registerHandler(RestRequest.Method.GET, "/_cluster/nodes/hot_threads", this); controller.registerHandler(RestRequest.Method.GET, "/_cluster/nodes/{nodeId}/hotthreads", this); @@ -56,7 +56,7 @@ public class RestNodesHotThreadsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); NodesHotThreadsRequest nodesHotThreadsRequest = new NodesHotThreadsRequest(nodesIds); nodesHotThreadsRequest.threads(request.paramAsInt("threads", nodesHotThreadsRequest.threads())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java index 312fd0b0b7a..3df457efb9d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.admin.cluster.node.info; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -46,8 +46,8 @@ public class RestNodesInfoAction extends BaseRestHandler { private final static Set ALLOWED_METRICS = Sets.newHashSet("http", "jvm", "os", "plugins", "process", "settings", "thread_pool", "transport", "ingest", "indices"); @Inject - public RestNodesInfoAction(Settings settings, RestController controller, Client client, SettingsFilter settingsFilter) { - super(settings, client); + public RestNodesInfoAction(Settings settings, RestController controller, SettingsFilter settingsFilter) { + super(settings); controller.registerHandler(GET, "/_nodes", this); // this endpoint is used for metrics, not for nodeIds, like /_nodes/fs controller.registerHandler(GET, "/_nodes/{nodeId}", this); @@ -59,7 +59,7 @@ public class RestNodesInfoAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { String[] nodeIds; Set metrics; diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java index 8019de0b4e5..1b0e434ef5c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.cluster.node.stats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -43,8 +43,8 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestNodesStatsAction extends BaseRestHandler { @Inject - public RestNodesStatsAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestNodesStatsAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_nodes/stats", this); controller.registerHandler(GET, "/_nodes/{nodeId}/stats", this); @@ -57,7 +57,7 @@ public class RestNodesStatsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); Set metrics = Strings.splitStringByCommaToSet(request.param("metric", "_all")); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestCancelTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestCancelTasksAction.java index 0602abe651f..b9eec1ba98a 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestCancelTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestCancelTasksAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.cluster.node.tasks; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -42,15 +42,15 @@ public class RestCancelTasksAction extends BaseRestHandler { private final ClusterService clusterService; @Inject - public RestCancelTasksAction(Settings settings, RestController controller, Client client, ClusterService clusterService) { - super(settings, client); + public RestCancelTasksAction(Settings settings, RestController controller, ClusterService clusterService) { + super(settings); this.clusterService = clusterService; controller.registerHandler(POST, "/_tasks/_cancel", this); controller.registerHandler(POST, "/_tasks/{taskId}/_cancel", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); TaskId taskId = new TaskId(request.param("taskId")); String[] actions = Strings.splitStringByCommaToArray(request.param("actions")); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestGetTaskAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestGetTaskAction.java index e5617711014..501e96857fd 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestGetTaskAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestGetTaskAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.admin.cluster.node.tasks; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -35,13 +35,13 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestGetTaskAction extends BaseRestHandler { @Inject - public RestGetTaskAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestGetTaskAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_tasks/{taskId}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { TaskId taskId = new TaskId(request.param("taskId")); boolean waitForCompletion = request.paramAsBoolean("wait_for_completion", false); TimeValue timeout = request.paramAsTime("timeout", null); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java index 8fa13e808ac..5d1c617e8e3 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.cluster.node.tasks; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -42,8 +42,8 @@ public class RestListTasksAction extends BaseRestHandler { private final ClusterService clusterService; @Inject - public RestListTasksAction(Settings settings, RestController controller, Client client, ClusterService clusterService) { - super(settings, client); + public RestListTasksAction(Settings settings, RestController controller, ClusterService clusterService) { + super(settings); this.clusterService = clusterService; controller.registerHandler(GET, "/_tasks", this); } @@ -67,7 +67,7 @@ public class RestListTasksAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { ActionListener listener = nodeSettingListener(clusterService, new RestToXContentListener<>(channel)); client.admin().cluster().listTasks(generateListTasksRequest(request), listener); } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java index 136c1cfae3f..c2dce1cae36 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster.repositories.delete; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -39,13 +39,13 @@ import static org.elasticsearch.rest.RestRequest.Method.DELETE; public class RestDeleteRepositoryAction extends BaseRestHandler { @Inject - public RestDeleteRepositoryAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestDeleteRepositoryAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(DELETE, "/_snapshot/{repository}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { DeleteRepositoryRequest deleteRepositoryRequest = deleteRepositoryRequest(request.param("repository")); deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout())); deleteRepositoryRequest.timeout(request.paramAsTime("timeout", deleteRepositoryRequest.timeout())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java index 09422481cf3..34004156d5b 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster.repositories.get; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.RepositoriesMetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; @@ -49,15 +49,15 @@ public class RestGetRepositoriesAction extends BaseRestHandler { private final SettingsFilter settingsFilter; @Inject - public RestGetRepositoriesAction(Settings settings, RestController controller, Client client, SettingsFilter settingsFilter) { - super(settings, client); + public RestGetRepositoriesAction(Settings settings, RestController controller, SettingsFilter settingsFilter) { + super(settings); controller.registerHandler(GET, "/_snapshot", this); controller.registerHandler(GET, "/_snapshot/{repository}", this); this.settingsFilter = settingsFilter; } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final String[] repositories = request.paramAsStringArray("repository", Strings.EMPTY_ARRAY); GetRepositoriesRequest getRepositoriesRequest = getRepositoryRequest(repositories); getRepositoriesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRepositoriesRequest.masterNodeTimeout())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java index 878eb2915bc..745dd7e0b9f 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster.repositories.put; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -40,15 +40,15 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; public class RestPutRepositoryAction extends BaseRestHandler { @Inject - public RestPutRepositoryAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestPutRepositoryAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(PUT, "/_snapshot/{repository}", this); controller.registerHandler(POST, "/_snapshot/{repository}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { PutRepositoryRequest putRepositoryRequest = putRepositoryRequest(request.param("repository")); putRepositoryRequest.source(request.content().toUtf8()); putRepositoryRequest.verify(request.paramAsBoolean("verify", true)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/verify/RestVerifyRepositoryAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/verify/RestVerifyRepositoryAction.java index 306dcbb21b9..b3f6fe776fe 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/verify/RestVerifyRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/verify/RestVerifyRepositoryAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.admin.cluster.repositories.verify; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -35,13 +35,13 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestVerifyRepositoryAction extends BaseRestHandler { @Inject - public RestVerifyRepositoryAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestVerifyRepositoryAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(POST, "/_snapshot/{repository}/_verify", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { VerifyRepositoryRequest verifyRepositoryRequest = verifyRepositoryRequest(request.param("repository")); verifyRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", verifyRepositoryRequest.masterNodeTimeout())); verifyRepositoryRequest.timeout(request.paramAsTime("timeout", verifyRepositoryRequest.timeout())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java index a06466137a5..ad4abaf10c4 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster.reroute; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommandRegistry; @@ -65,16 +65,16 @@ public class RestClusterRerouteAction extends BaseRestHandler { private final AllocationCommandRegistry registry; @Inject - public RestClusterRerouteAction(Settings settings, RestController controller, Client client, SettingsFilter settingsFilter, + public RestClusterRerouteAction(Settings settings, RestController controller, SettingsFilter settingsFilter, AllocationCommandRegistry registry) { - super(settings, client); + super(settings); this.settingsFilter = settingsFilter; this.registry = registry; controller.registerHandler(RestRequest.Method.POST, "/_cluster/reroute", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { ClusterRerouteRequest clusterRerouteRequest = createRequest(request, registry, parseFieldMatcher); client.admin().cluster().reroute(clusterRerouteRequest, new AcknowledgedRestListener(channel) { @Override diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java index 44a7f2f714b..a1e85c5d298 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster.settings; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.inject.Inject; @@ -50,15 +50,15 @@ public class RestClusterGetSettingsAction extends BaseRestHandler { private final SettingsFilter settingsFilter; @Inject - public RestClusterGetSettingsAction(Settings settings, RestController controller, Client client, ClusterSettings clusterSettings, SettingsFilter settingsFilter) { - super(settings, client); + public RestClusterGetSettingsAction(Settings settings, RestController controller, ClusterSettings clusterSettings, SettingsFilter settingsFilter) { + super(settings); this.clusterSettings = clusterSettings; controller.registerHandler(RestRequest.Method.GET, "/_cluster/settings", this); this.settingsFilter = settingsFilter; } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest() .routingTable(false) .nodes(false); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java index b25866d4520..055bcc4df60 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster.settings; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.Requests; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -42,13 +42,13 @@ import java.util.Map; public class RestClusterUpdateSettingsAction extends BaseRestHandler { @Inject - public RestClusterUpdateSettingsAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestClusterUpdateSettingsAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.PUT, "/_cluster/settings", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = Requests.clusterUpdateSettingsRequest(); clusterUpdateSettingsRequest.timeout(request.paramAsTime("timeout", clusterUpdateSettingsRequest.timeout())); clusterUpdateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterUpdateSettingsRequest.masterNodeTimeout())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java index 860e110b2d6..82edb74c7e6 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.cluster.shards; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.Requests; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -41,8 +41,8 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestClusterSearchShardsAction extends BaseRestHandler { @Inject - public RestClusterSearchShardsAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestClusterSearchShardsAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_search_shards", this); controller.registerHandler(POST, "/_search_shards", this); controller.registerHandler(GET, "/{index}/_search_shards", this); @@ -52,7 +52,7 @@ public class RestClusterSearchShardsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final ClusterSearchShardsRequest clusterSearchShardsRequest = Requests.clusterSearchShardsRequest(indices); clusterSearchShardsRequest.local(request.paramAsBoolean("local", clusterSearchShardsRequest.local())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java index 9d6be664d48..3be27d286e2 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster.snapshots.create; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -40,14 +40,14 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; public class RestCreateSnapshotAction extends BaseRestHandler { @Inject - public RestCreateSnapshotAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestCreateSnapshotAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(PUT, "/_snapshot/{repository}/{snapshot}", this); controller.registerHandler(POST, "/_snapshot/{repository}/{snapshot}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { CreateSnapshotRequest createSnapshotRequest = createSnapshotRequest(request.param("repository"), request.param("snapshot")); createSnapshotRequest.source(request.content().toUtf8()); createSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createSnapshotRequest.masterNodeTimeout())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/delete/RestDeleteSnapshotAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/delete/RestDeleteSnapshotAction.java index 38c78bd5d88..5b99cb3a5ee 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/delete/RestDeleteSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/delete/RestDeleteSnapshotAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster.snapshots.delete; import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -39,13 +39,13 @@ import static org.elasticsearch.rest.RestRequest.Method.DELETE; public class RestDeleteSnapshotAction extends BaseRestHandler { @Inject - public RestDeleteSnapshotAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestDeleteSnapshotAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(DELETE, "/_snapshot/{repository}/{snapshot}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { DeleteSnapshotRequest deleteSnapshotRequest = deleteSnapshotRequest(request.param("repository"), request.param("snapshot")); deleteSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteSnapshotRequest.masterNodeTimeout())); client.admin().cluster().deleteSnapshot(deleteSnapshotRequest, new AcknowledgedRestListener(channel)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/get/RestGetSnapshotsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/get/RestGetSnapshotsAction.java index 1151fed8f23..835c23ac191 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/get/RestGetSnapshotsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/get/RestGetSnapshotsAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster.snapshots.get; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -40,14 +40,14 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestGetSnapshotsAction extends BaseRestHandler { @Inject - public RestGetSnapshotsAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestGetSnapshotsAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_snapshot/{repository}/{snapshot}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { String repository = request.param("repository"); String[] snapshots = request.paramAsStringArray("snapshot", Strings.EMPTY_ARRAY); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/restore/RestRestoreSnapshotAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/restore/RestRestoreSnapshotAction.java index e2a16bd4b46..6a1eb0eaa50 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/restore/RestRestoreSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/restore/RestRestoreSnapshotAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster.snapshots.restore; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -39,13 +39,13 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestRestoreSnapshotAction extends BaseRestHandler { @Inject - public RestRestoreSnapshotAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestRestoreSnapshotAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(POST, "/_snapshot/{repository}/{snapshot}/_restore", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { RestoreSnapshotRequest restoreSnapshotRequest = restoreSnapshotRequest(request.param("repository"), request.param("snapshot")); restoreSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", restoreSnapshotRequest.masterNodeTimeout())); restoreSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/status/RestSnapshotsStatusAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/status/RestSnapshotsStatusAction.java index 2e8810e2ba7..484ee4e5120 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/status/RestSnapshotsStatusAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/status/RestSnapshotsStatusAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster.snapshots.status; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -40,15 +40,15 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestSnapshotsStatusAction extends BaseRestHandler { @Inject - public RestSnapshotsStatusAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestSnapshotsStatusAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_snapshot/{repository}/{snapshot}/_status", this); controller.registerHandler(GET, "/_snapshot/{repository}/_status", this); controller.registerHandler(GET, "/_snapshot/_status", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { String repository = request.param("repository", "_all"); String[] snapshots = request.paramAsStringArray("snapshot", Strings.EMPTY_ARRAY); if (snapshots.length == 1 && "_all".equalsIgnoreCase(snapshots[0])) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java index c756796446e..b3f5a407d91 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.cluster.state; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.Strings; @@ -50,8 +50,8 @@ public class RestClusterStateAction extends BaseRestHandler { private final SettingsFilter settingsFilter; @Inject - public RestClusterStateAction(Settings settings, RestController controller, Client client, SettingsFilter settingsFilter) { - super(settings, client); + public RestClusterStateAction(Settings settings, RestController controller, SettingsFilter settingsFilter) { + super(settings); controller.registerHandler(RestRequest.Method.GET, "/_cluster/state", this); controller.registerHandler(RestRequest.Method.GET, "/_cluster/state/{metric}", this); controller.registerHandler(RestRequest.Method.GET, "/_cluster/state/{metric}/{indices}", this); @@ -60,7 +60,7 @@ public class RestClusterStateAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest(); clusterStateRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterStateRequest.indicesOptions())); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java index c83a5b7ae1b..a43b428ae9c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.admin.cluster.stats; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -35,14 +35,14 @@ import org.elasticsearch.rest.action.support.RestActions.NodesResponseRestListen public class RestClusterStatsAction extends BaseRestHandler { @Inject - public RestClusterStatsAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestClusterStatsAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.GET, "/_cluster/stats", this); controller.registerHandler(RestRequest.Method.GET, "/_cluster/stats/nodes/{nodeId}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { ClusterStatsRequest clusterStatsRequest = new ClusterStatsRequest().nodesIds(request.paramAsStringArray("nodeId", null)); clusterStatsRequest.timeout(request.param("timeout")); client.admin().cluster().clusterStats(clusterStatsRequest, new NodesResponseRestListener<>(channel)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/storedscripts/RestDeleteStoredScriptAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/storedscripts/RestDeleteStoredScriptAction.java index a8190d30278..6416a693550 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/storedscripts/RestDeleteStoredScriptAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/storedscripts/RestDeleteStoredScriptAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.rest.action.admin.cluster.storedscripts; import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -33,12 +33,12 @@ import static org.elasticsearch.rest.RestRequest.Method.DELETE; public class RestDeleteStoredScriptAction extends BaseRestHandler { @Inject - public RestDeleteStoredScriptAction(Settings settings, RestController controller, Client client) { - this(settings, controller, true, client); + public RestDeleteStoredScriptAction(Settings settings, RestController controller) { + this(settings, controller, true); } - protected RestDeleteStoredScriptAction(Settings settings, RestController controller, boolean registerDefaultHandlers, Client client) { - super(settings, client); + protected RestDeleteStoredScriptAction(Settings settings, RestController controller, boolean registerDefaultHandlers) { + super(settings); if (registerDefaultHandlers) { controller.registerHandler(DELETE, "/_scripts/{lang}/{id}", this); } @@ -49,7 +49,7 @@ public class RestDeleteStoredScriptAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, NodeClient client) { DeleteStoredScriptRequest deleteStoredScriptRequest = new DeleteStoredScriptRequest(getScriptLang(request), request.param("id")); client.admin().cluster().deleteStoredScript(deleteStoredScriptRequest, new AcknowledgedRestListener<>(channel)); } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/storedscripts/RestGetStoredScriptAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/storedscripts/RestGetStoredScriptAction.java index bf286f623fb..fb6fd0ca7c9 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/storedscripts/RestGetStoredScriptAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/storedscripts/RestGetStoredScriptAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.admin.cluster.storedscripts; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -38,12 +38,12 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestGetStoredScriptAction extends BaseRestHandler { @Inject - public RestGetStoredScriptAction(Settings settings, RestController controller, Client client) { - this(settings, controller, true, client); + public RestGetStoredScriptAction(Settings settings, RestController controller) { + this(settings, controller, true); } - protected RestGetStoredScriptAction(Settings settings, RestController controller, boolean registerDefaultHandlers, Client client) { - super(settings, client); + protected RestGetStoredScriptAction(Settings settings, RestController controller, boolean registerDefaultHandlers) { + super(settings); if (registerDefaultHandlers) { controller.registerHandler(GET, "/_scripts/{lang}/{id}", this); } @@ -58,7 +58,7 @@ public class RestGetStoredScriptAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, NodeClient client) { final GetStoredScriptRequest getRequest = new GetStoredScriptRequest(getScriptLang(request), request.param("id")); client.admin().cluster().getStoredScript(getRequest, new RestBuilderListener(channel) { @Override diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/storedscripts/RestPutStoredScriptAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/storedscripts/RestPutStoredScriptAction.java index 0ecb27b8d1e..bb62528fc17 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/storedscripts/RestPutStoredScriptAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/storedscripts/RestPutStoredScriptAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.rest.action.admin.cluster.storedscripts; import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -34,12 +34,12 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; public class RestPutStoredScriptAction extends BaseRestHandler { @Inject - public RestPutStoredScriptAction(Settings settings, RestController controller, Client client) { - this(settings, controller, true, client); + public RestPutStoredScriptAction(Settings settings, RestController controller) { + this(settings, controller, true); } - protected RestPutStoredScriptAction(Settings settings, RestController controller, boolean registerDefaultHandlers, Client client) { - super(settings, client); + protected RestPutStoredScriptAction(Settings settings, RestController controller, boolean registerDefaultHandlers) { + super(settings); if (registerDefaultHandlers) { controller.registerHandler(POST, "/_scripts/{lang}/{id}", this); controller.registerHandler(PUT, "/_scripts/{lang}/{id}", this); @@ -51,7 +51,7 @@ public class RestPutStoredScriptAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, NodeClient client) { PutStoredScriptRequest putRequest = new PutStoredScriptRequest(getScriptLang(request), request.param("id")); putRequest.script(request.content()); client.admin().cluster().putStoredScript(putRequest, new AcknowledgedRestListener<>(channel)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/tasks/RestPendingClusterTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/tasks/RestPendingClusterTasksAction.java index 333b6d64491..e106dd1f980 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/tasks/RestPendingClusterTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/tasks/RestPendingClusterTasksAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster.tasks; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -35,13 +35,13 @@ import org.elasticsearch.rest.action.support.RestToXContentListener; public class RestPendingClusterTasksAction extends BaseRestHandler { @Inject - public RestPendingClusterTasksAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestPendingClusterTasksAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.GET, "/_cluster/pending_tasks", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(); pendingClusterTasksRequest.masterNodeTimeout(request.paramAsTime("master_timeout", pendingClusterTasksRequest.masterNodeTimeout())); pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java index 7eecaaa738c..a922edff484 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -35,15 +35,15 @@ import org.elasticsearch.rest.action.support.RestToXContentListener; public class RestRolloverIndexAction extends BaseRestHandler { @Inject - public RestRolloverIndexAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestRolloverIndexAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.POST, "/{index}/_rollover", this); controller.registerHandler(RestRequest.Method.POST, "/{index}/_rollover/{new_index}", this); } @SuppressWarnings({"unchecked"}) @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { RolloverRequest rolloverIndexRequest = new RolloverRequest(request.param("index"), request.param("new_index")); if (request.hasContent()) { rolloverIndexRequest.source(request.content()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java index f145bcbf02a..96b4abbe063 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.shrink.ShrinkRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -35,15 +35,15 @@ import org.elasticsearch.rest.action.support.AcknowledgedRestListener; public class RestShrinkIndexAction extends BaseRestHandler { @Inject - public RestShrinkIndexAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestShrinkIndexAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.PUT, "/{index}/_shrink/{target}", this); controller.registerHandler(RestRequest.Method.POST, "/{index}/_shrink/{target}", this); } @SuppressWarnings({"unchecked"}) @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { if (request.param("target") == null) { throw new IllegalArgumentException("no target index"); } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java index 74982133f71..dff4c492f17 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.indices.alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -46,13 +46,13 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestIndicesAliasesAction extends BaseRestHandler { @Inject - public RestIndicesAliasesAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestIndicesAliasesAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(POST, "/_aliases", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); try (XContentParser parser = XContentFactory.xContent(request.content()).createParser(request.content())) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/RestIndexDeleteAliasesAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/RestIndexDeleteAliasesAction.java index 7fcaadc3d8b..eedac4d8eea 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/RestIndexDeleteAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/delete/RestIndexDeleteAliasesAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.admin.indices.alias.delete; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -37,14 +37,14 @@ import static org.elasticsearch.rest.RestRequest.Method.DELETE; public class RestIndexDeleteAliasesAction extends BaseRestHandler { @Inject - public RestIndexDeleteAliasesAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestIndexDeleteAliasesAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(DELETE, "/{index}/_alias/{name}", this); controller.registerHandler(DELETE, "/{index}/_aliases/{name}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final String[] aliases = Strings.splitStringByCommaToArray(request.param("name")); IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetAliasesAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetAliasesAction.java index cbebf08516d..8ecbc64b27f 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetAliasesAction.java @@ -23,7 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -50,14 +50,14 @@ import static org.elasticsearch.rest.RestStatus.OK; public class RestGetAliasesAction extends BaseRestHandler { @Inject - public RestGetAliasesAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestGetAliasesAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_alias/{name}", this); controller.registerHandler(GET, "/{index}/_alias/{name}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final String[] aliases = request.paramAsStringArrayOrEmptyIfAll("name"); final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final GetAliasesRequest getAliasesRequest = new GetAliasesRequest(aliases); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/head/RestAliasesExistAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/head/RestAliasesExistAction.java index 65f8fd363ea..7c539e1be48 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/head/RestAliasesExistAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/head/RestAliasesExistAction.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.inject.Inject; @@ -44,15 +44,15 @@ import static org.elasticsearch.rest.RestStatus.OK; public class RestAliasesExistAction extends BaseRestHandler { @Inject - public RestAliasesExistAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestAliasesExistAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(HEAD, "/_alias/{name}", this); controller.registerHandler(HEAD, "/{index}/_alias/{name}", this); controller.registerHandler(HEAD, "/{index}/_alias", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { String[] aliases = request.paramAsStringArray("name", Strings.EMPTY_ARRAY); final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); GetAliasesRequest getAliasesRequest = new GetAliasesRequest(aliases); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java index 7a0c2ad466f..e9f5e294db4 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.indices.alias.put; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -44,8 +44,8 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; public class RestIndexPutAliasAction extends BaseRestHandler { @Inject - public RestIndexPutAliasAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestIndexPutAliasAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(PUT, "/{index}/_alias/{name}", this); controller.registerHandler(PUT, "/_alias/{name}", this); controller.registerHandler(PUT, "/{index}/_aliases/{name}", this); @@ -62,7 +62,7 @@ public class RestIndexPutAliasAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); String alias = request.param("name"); Map filter = null; diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java index 75caea19856..588771ae1c8 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.admin.indices.analyze; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesReference; @@ -60,8 +60,8 @@ public class RestAnalyzeAction extends BaseRestHandler { } @Inject - public RestAnalyzeAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestAnalyzeAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_analyze", this); controller.registerHandler(GET, "/{index}/_analyze", this); controller.registerHandler(POST, "/_analyze", this); @@ -69,7 +69,7 @@ public class RestAnalyzeAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { String[] texts = request.paramAsStringArrayOrEmptyIfAll("text"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java index 97e424445d8..454e18cb2d7 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.indices.cache.clear; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; @@ -50,8 +50,8 @@ import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastSh public class RestClearIndicesCacheAction extends BaseRestHandler { @Inject - public RestClearIndicesCacheAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestClearIndicesCacheAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(POST, "/_cache/clear", this); controller.registerHandler(POST, "/{index}/_cache/clear", this); @@ -60,7 +60,7 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest(Strings.splitStringByCommaToArray(request.param("index"))); clearIndicesCacheRequest.indicesOptions(IndicesOptions.fromRequest(request, clearIndicesCacheRequest.indicesOptions())); fromRequest(request, clearIndicesCacheRequest, parseFieldMatcher); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java index 5f211b88d11..1022310a350 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.indices.close; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -38,14 +38,14 @@ import org.elasticsearch.rest.action.support.AcknowledgedRestListener; public class RestCloseIndexAction extends BaseRestHandler { @Inject - public RestCloseIndexAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestCloseIndexAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.POST, "/_close", this); controller.registerHandler(RestRequest.Method.POST, "/{index}/_close", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); closeIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", closeIndexRequest.masterNodeTimeout())); closeIndexRequest.timeout(request.paramAsTime("timeout", closeIndexRequest.timeout())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java index 46bc9388972..46ee596286e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.indices.create; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -36,15 +36,15 @@ import org.elasticsearch.rest.action.support.AcknowledgedRestListener; public class RestCreateIndexAction extends BaseRestHandler { @Inject - public RestCreateIndexAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestCreateIndexAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.PUT, "/{index}", this); controller.registerHandler(RestRequest.Method.POST, "/{index}", this); } @SuppressWarnings({"unchecked"}) @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { CreateIndexRequest createIndexRequest = new CreateIndexRequest(request.param("index")); if (request.hasContent()) { createIndexRequest.source(request.content()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java index 4953842c54a..75564021a3b 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.indices.delete; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -38,14 +38,14 @@ import org.elasticsearch.rest.action.support.AcknowledgedRestListener; public class RestDeleteIndexAction extends BaseRestHandler { @Inject - public RestDeleteIndexAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestDeleteIndexAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.DELETE, "/", this); controller.registerHandler(RestRequest.Method.DELETE, "/{index}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); deleteIndexRequest.timeout(request.paramAsTime("timeout", deleteIndexRequest.timeout())); deleteIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexRequest.masterNodeTimeout())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java index 0e240352a73..f45f607356e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.indices.exists.indices; import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest; import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.inject.Inject; @@ -45,13 +45,13 @@ import static org.elasticsearch.rest.RestStatus.OK; public class RestIndicesExistsAction extends BaseRestHandler { @Inject - public RestIndicesExistsAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestIndicesExistsAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(HEAD, "/{index}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { IndicesExistsRequest indicesExistsRequest = new IndicesExistsRequest(Strings.splitStringByCommaToArray(request.param("index"))); indicesExistsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesExistsRequest.indicesOptions())); indicesExistsRequest.local(request.paramAsBoolean("local", indicesExistsRequest.local())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java index 7a55be7e3af..2ac0d1354f1 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.indices.exists.types; import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequest; import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.inject.Inject; @@ -44,13 +44,13 @@ import static org.elasticsearch.rest.RestStatus.OK; public class RestTypesExistsAction extends BaseRestHandler { @Inject - public RestTypesExistsAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestTypesExistsAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(HEAD, "/{index}/{type}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { TypesExistsRequest typesExistsRequest = new TypesExistsRequest( Strings.splitStringByCommaToArray(request.param("index")), Strings.splitStringByCommaToArray(request.param("type")) ); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java index f3b3304bcf9..5156a95c0cc 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.indices.flush; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -46,8 +46,8 @@ import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastSh public class RestFlushAction extends BaseRestHandler { @Inject - public RestFlushAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestFlushAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(POST, "/_flush", this); controller.registerHandler(POST, "/{index}/_flush", this); @@ -56,7 +56,7 @@ public class RestFlushAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { FlushRequest flushRequest = new FlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions())); flushRequest.force(request.paramAsBoolean("force", flushRequest.force())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java index 9bb36f03d65..70d311c6e04 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.indices.flush; import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -44,8 +44,8 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestSyncedFlushAction extends BaseRestHandler { @Inject - public RestSyncedFlushAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestSyncedFlushAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(POST, "/_flush/synced", this); controller.registerHandler(POST, "/{index}/_flush/synced", this); @@ -54,7 +54,7 @@ public class RestSyncedFlushAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.lenientExpandOpen()); SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); syncedFlushRequest.indicesOptions(indicesOptions); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/forcemerge/RestForceMergeAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/forcemerge/RestForceMergeAction.java index 8aa2683be5e..ecdbdeae14c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/forcemerge/RestForceMergeAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/forcemerge/RestForceMergeAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.indices.forcemerge; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -45,14 +45,14 @@ import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastSh public class RestForceMergeAction extends BaseRestHandler { @Inject - public RestForceMergeAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestForceMergeAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(POST, "/_forcemerge", this); controller.registerHandler(POST, "/{index}/_forcemerge", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { ForceMergeRequest mergeRequest = new ForceMergeRequest(Strings.splitStringByCommaToArray(request.param("index"))); mergeRequest.indicesOptions(IndicesOptions.fromRequest(request, mergeRequest.indicesOptions())); mergeRequest.maxNumSegments(request.paramAsInt("max_num_segments", mergeRequest.maxNumSegments())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java index f31c17d8b6e..578cd2e4a62 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.Strings; @@ -57,8 +57,8 @@ public class RestGetIndicesAction extends BaseRestHandler { private final SettingsFilter settingsFilter; @Inject - public RestGetIndicesAction(Settings settings, RestController controller, Client client, IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter) { - super(settings, client); + public RestGetIndicesAction(Settings settings, RestController controller, IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter) { + super(settings); this.indexScopedSettings = indexScopedSettings; controller.registerHandler(GET, "/{index}", this); controller.registerHandler(GET, "/{index}/{type}", this); @@ -66,7 +66,7 @@ public class RestGetIndicesAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); String[] featureParams = request.paramAsStringArray("type", null); // Work out if the indices is a list of features diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetFieldMappingAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetFieldMappingAction.java index 0db931d0a7a..d2f77e95f52 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetFieldMappingAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetFieldMappingAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsReques import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -50,8 +50,8 @@ import static org.elasticsearch.rest.RestStatus.OK; public class RestGetFieldMappingAction extends BaseRestHandler { @Inject - public RestGetFieldMappingAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestGetFieldMappingAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_mapping/field/{fields}", this); controller.registerHandler(GET, "/_mapping/{type}/field/{fields}", this); controller.registerHandler(GET, "/{index}/_mapping/field/{fields}", this); @@ -60,7 +60,7 @@ public class RestGetFieldMappingAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final String[] types = request.paramAsStringArrayOrEmptyIfAll("type"); final String[] fields = Strings.splitStringByCommaToArray(request.param("fields")); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetMappingAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetMappingAction.java index 0da54bc4e75..c4f9dbf2333 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetMappingAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/get/RestGetMappingAction.java @@ -23,7 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -50,8 +50,8 @@ import static org.elasticsearch.rest.RestStatus.OK; public class RestGetMappingAction extends BaseRestHandler { @Inject - public RestGetMappingAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestGetMappingAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/{index}/{type}/_mapping", this); controller.registerHandler(GET, "/{index}/_mappings/{type}", this); controller.registerHandler(GET, "/{index}/_mapping/{type}", this); @@ -59,7 +59,7 @@ public class RestGetMappingAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final String[] types = request.paramAsStringArrayOrEmptyIfAll("type"); GetMappingsRequest getMappingsRequest = new GetMappingsRequest(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java index fdb16d2fb8f..cb2493f4e33 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.indices.mapping.put; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -43,8 +43,8 @@ public class RestPutMappingAction extends BaseRestHandler { @Inject - public RestPutMappingAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestPutMappingAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(PUT, "/{index}/_mapping/", this); controller.registerHandler(PUT, "/{index}/{type}/_mapping", this); controller.registerHandler(PUT, "/{index}/_mapping/{type}", this); @@ -68,7 +68,7 @@ public class RestPutMappingAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { PutMappingRequest putMappingRequest = putMappingRequest(Strings.splitStringByCommaToArray(request.param("index"))); putMappingRequest.type(request.param("type")); putMappingRequest.source(request.content().toUtf8()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java index 58bda9d3a3d..9f27d8e3666 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.indices.open; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -38,14 +38,14 @@ import org.elasticsearch.rest.action.support.AcknowledgedRestListener; public class RestOpenIndexAction extends BaseRestHandler { @Inject - public RestOpenIndexAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestOpenIndexAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.POST, "/_open", this); controller.registerHandler(RestRequest.Method.POST, "/{index}/_open", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { OpenIndexRequest openIndexRequest = new OpenIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); openIndexRequest.timeout(request.paramAsTime("timeout", openIndexRequest.timeout())); openIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", openIndexRequest.masterNodeTimeout())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/recovery/RestRecoveryAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/recovery/RestRecoveryAction.java index 88bc9fb8c9f..6178d2559b5 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/recovery/RestRecoveryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/recovery/RestRecoveryAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.indices.recovery; import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -44,14 +44,14 @@ import static org.elasticsearch.rest.RestStatus.OK; public class RestRecoveryAction extends BaseRestHandler { @Inject - public RestRecoveryAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestRecoveryAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_recovery", this); controller.registerHandler(GET, "/{index}/_recovery", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final RecoveryRequest recoveryRequest = new RecoveryRequest(Strings.splitStringByCommaToArray(request.param("index"))); recoveryRequest.detailed(request.paramAsBoolean("detailed", false)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java index fcc6d240b34..6e2fcec3f51 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.indices.refresh; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -46,8 +46,8 @@ import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastSh public class RestRefreshAction extends BaseRestHandler { @Inject - public RestRefreshAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestRefreshAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(POST, "/_refresh", this); controller.registerHandler(POST, "/{index}/_refresh", this); @@ -56,7 +56,7 @@ public class RestRefreshAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { RefreshRequest refreshRequest = new RefreshRequest(Strings.splitStringByCommaToArray(request.param("index"))); refreshRequest.indicesOptions(IndicesOptions.fromRequest(request, refreshRequest.indicesOptions())); client.admin().indices().refresh(refreshRequest, new RestBuilderListener(channel) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java index da76a769ce4..a1d5560e674 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.indices.segments; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -44,14 +44,14 @@ import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastSh public class RestIndicesSegmentsAction extends BaseRestHandler { @Inject - public RestIndicesSegmentsAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestIndicesSegmentsAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_segments", this); controller.registerHandler(GET, "/{index}/_segments", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { IndicesSegmentsRequest indicesSegmentsRequest = new IndicesSegmentsRequest(Strings.splitStringByCommaToArray(request.param("index"))); indicesSegmentsRequest.verbose(request.paramAsBoolean("verbose", false)); indicesSegmentsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesSegmentsRequest.indicesOptions())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestGetSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestGetSettingsAction.java index 8c24e2c68e3..46a101161b6 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestGetSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestGetSettingsAction.java @@ -23,7 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -47,8 +47,8 @@ public class RestGetSettingsAction extends BaseRestHandler { private final SettingsFilter settingsFilter; @Inject - public RestGetSettingsAction(Settings settings, RestController controller, Client client, IndexScopedSettings indexScopedSettings, final SettingsFilter settingsFilter) { - super(settings, client); + public RestGetSettingsAction(Settings settings, RestController controller, IndexScopedSettings indexScopedSettings, final SettingsFilter settingsFilter) { + super(settings); this.indexScopedSettings = indexScopedSettings; controller.registerHandler(GET, "/{index}/_settings/{name}", this); controller.registerHandler(GET, "/_settings/{name}", this); @@ -57,7 +57,7 @@ public class RestGetSettingsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final String[] names = request.paramAsStringArrayOrEmptyIfAll("name"); final boolean renderDefaults = request.paramAsBoolean("include_defaults", false); GetSettingsRequest getSettingsRequest = new GetSettingsRequest() diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java index 5bd102594d7..70608930917 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.indices.settings; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -53,14 +53,14 @@ public class RestUpdateSettingsAction extends BaseRestHandler { "allow_no_indices")); @Inject - public RestUpdateSettingsAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestUpdateSettingsAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.PUT, "/{index}/_settings", this); controller.registerHandler(RestRequest.Method.PUT, "/_settings", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { UpdateSettingsRequest updateSettingsRequest = updateSettingsRequest(Strings.splitStringByCommaToArray(request.param("index"))); updateSettingsRequest.timeout(request.paramAsTime("timeout", updateSettingsRequest.timeout())); updateSettingsRequest.setPreserveExisting(request.paramAsBoolean("preserve_existing", updateSettingsRequest.isPreserveExisting())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/shards/RestIndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/shards/RestIndicesShardStoresAction.java index 586599c1a1e..8a0d61c933c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/shards/RestIndicesShardStoresAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/shards/RestIndicesShardStoresAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -45,14 +45,14 @@ import static org.elasticsearch.rest.RestStatus.OK; public class RestIndicesShardStoresAction extends BaseRestHandler { @Inject - public RestIndicesShardStoresAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestIndicesShardStoresAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_shard_stores", this); controller.registerHandler(GET, "/{index}/_shard_stores", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { IndicesShardStoresRequest indicesShardStoresRequest = new IndicesShardStoresRequest(Strings.splitStringByCommaToArray(request.param("index"))); if (request.hasParam("status")) { indicesShardStoresRequest.shardStatuses(Strings.splitStringByCommaToArray(request.param("status"))); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java index 623e60cb4b9..93a1dea9b5e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.admin.indices.stats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -46,8 +46,8 @@ import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastSh public class RestIndicesStatsAction extends BaseRestHandler { @Inject - public RestIndicesStatsAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestIndicesStatsAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_stats", this); controller.registerHandler(GET, "/_stats/{metric}", this); controller.registerHandler(GET, "/_stats/{metric}/{indexMetric}", this); @@ -56,7 +56,7 @@ public class RestIndicesStatsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); indicesStatsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesStatsRequest.indicesOptions())); indicesStatsRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java index a59ab9ac704..00a029c34b9 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.admin.indices.template.delete; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -35,13 +35,13 @@ import org.elasticsearch.rest.action.support.AcknowledgedRestListener; public class RestDeleteIndexTemplateAction extends BaseRestHandler { @Inject - public RestDeleteIndexTemplateAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestDeleteIndexTemplateAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.DELETE, "/_template/{name}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { DeleteIndexTemplateRequest deleteIndexTemplateRequest = new DeleteIndexTemplateRequest(request.param("name")); deleteIndexTemplateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexTemplateRequest.masterNodeTimeout())); client.admin().indices().deleteTemplate(deleteIndexTemplateRequest, new AcknowledgedRestListener(channel)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java index d62d97400c5..de4a0f00442 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.admin.indices.template.get; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -49,15 +49,15 @@ import static org.elasticsearch.rest.RestStatus.OK; public class RestGetIndexTemplateAction extends BaseRestHandler { @Inject - public RestGetIndexTemplateAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestGetIndexTemplateAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_template", this); controller.registerHandler(GET, "/_template/{name}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final String[] names = Strings.splitStringByCommaToArray(request.param("name")); GetIndexTemplatesRequest getIndexTemplatesRequest = new GetIndexTemplatesRequest(names); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/head/RestHeadIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/head/RestHeadIndexTemplateAction.java index 2a40de984b9..b790782a01a 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/head/RestHeadIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/head/RestHeadIndexTemplateAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.admin.indices.template.head; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -42,14 +42,14 @@ import static org.elasticsearch.rest.RestStatus.OK; public class RestHeadIndexTemplateAction extends BaseRestHandler { @Inject - public RestHeadIndexTemplateAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestHeadIndexTemplateAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(HEAD, "/_template/{name}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { GetIndexTemplatesRequest getIndexTemplatesRequest = new GetIndexTemplatesRequest(request.param("name")); getIndexTemplatesRequest.local(request.paramAsBoolean("local", getIndexTemplatesRequest.local())); getIndexTemplatesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexTemplatesRequest.masterNodeTimeout())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java index 0b08b64e89b..77f91cebdf7 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.admin.indices.template.put; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -35,15 +35,15 @@ import org.elasticsearch.rest.action.support.AcknowledgedRestListener; public class RestPutIndexTemplateAction extends BaseRestHandler { @Inject - public RestPutIndexTemplateAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestPutIndexTemplateAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.PUT, "/_template/{name}", this); controller.registerHandler(RestRequest.Method.POST, "/_template/{name}", this); } @SuppressWarnings({"unchecked"}) @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest(request.param("name")); putRequest.template(request.param("template", putRequest.template())); putRequest.order(request.paramAsInt("order", putRequest.order())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/upgrade/RestUpgradeAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/upgrade/RestUpgradeAction.java index 7c4c0f51b8e..ee2aa156adc 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/upgrade/RestUpgradeAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/upgrade/RestUpgradeAction.java @@ -23,7 +23,8 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; @@ -48,8 +49,8 @@ import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastSh public class RestUpgradeAction extends BaseRestHandler { @Inject - public RestUpgradeAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestUpgradeAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(POST, "/_upgrade", this); controller.registerHandler(POST, "/{index}/_upgrade", this); @@ -58,7 +59,7 @@ public class RestUpgradeAction extends BaseRestHandler { } @Override - protected void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception { + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { if (request.method().equals(RestRequest.Method.GET)) { handleGet(request, channel, client); } else if (request.method().equals(RestRequest.Method.POST)) { @@ -66,7 +67,7 @@ public class RestUpgradeAction extends BaseRestHandler { } } - void handleGet(final RestRequest request, RestChannel channel, Client client) { + void handleGet(final RestRequest request, RestChannel channel, NodeClient client) { client.admin().indices().prepareUpgradeStatus(Strings.splitStringByCommaToArray(request.param("index"))) .execute(new RestBuilderListener(channel) { @Override @@ -79,7 +80,7 @@ public class RestUpgradeAction extends BaseRestHandler { }); } - void handlePost(final RestRequest request, RestChannel channel, Client client) { + void handlePost(final RestRequest request, RestChannel channel, NodeClient client) { UpgradeRequest upgradeReq = new UpgradeRequest(Strings.splitStringByCommaToArray(request.param("index"))); upgradeReq.upgradeOnlyAncientSegments(request.paramAsBoolean("only_ancient_segments", false)); client.admin().indices().upgrade(upgradeReq, new RestBuilderListener(channel) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java index 6ac71708e32..2000078e011 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.admin.indices.validate.query.QueryExplanation; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -55,8 +55,8 @@ public class RestValidateQueryAction extends BaseRestHandler { private final IndicesQueriesRegistry indicesQueriesRegistry; @Inject - public RestValidateQueryAction(Settings settings, RestController controller, Client client, IndicesQueriesRegistry indicesQueriesRegistry) { - super(settings, client); + public RestValidateQueryAction(Settings settings, RestController controller, IndicesQueriesRegistry indicesQueriesRegistry) { + super(settings); controller.registerHandler(GET, "/_validate/query", this); controller.registerHandler(POST, "/_validate/query", this); controller.registerHandler(GET, "/{index}/_validate/query", this); @@ -67,7 +67,7 @@ public class RestValidateQueryAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest(Strings.splitStringByCommaToArray(request.param("index"))); validateQueryRequest.indicesOptions(IndicesOptions.fromRequest(request, validateQueryRequest.indicesOptions())); validateQueryRequest.explain(request.paramAsBoolean("explain", false)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java b/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java index d9dbb21e804..623af6d2f47 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.BulkShardRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.Requests; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -56,8 +56,8 @@ public class RestBulkAction extends BaseRestHandler { private final boolean allowExplicitIndex; @Inject - public RestBulkAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestBulkAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(POST, "/_bulk", this); controller.registerHandler(PUT, "/_bulk", this); @@ -70,7 +70,7 @@ public class RestBulkAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { BulkRequest bulkRequest = Requests.bulkRequest(); String defaultIndex = request.param("index"); String defaultType = request.param("type"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java index 12393f58007..a40463e3e27 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.rest.action.cat; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Table; import org.elasticsearch.common.io.UTF8StreamWriter; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -38,18 +38,18 @@ import static org.elasticsearch.rest.action.support.RestTable.pad; */ public abstract class AbstractCatAction extends BaseRestHandler { - public AbstractCatAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public AbstractCatAction(Settings settings, RestController controller) { + super(settings); } - protected abstract void doRequest(final RestRequest request, final RestChannel channel, final Client client); + protected abstract void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client); protected abstract void documentation(StringBuilder sb); protected abstract Table getTableWithHeader(final RestRequest request); @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { boolean helpWanted = request.paramAsBoolean("help", false); if (helpWanted) { Table table = getTableWithHeader(request); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java index b322fef9f30..9c68d0abbc7 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.cat; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; @@ -44,15 +44,15 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestAliasAction extends AbstractCatAction { @Inject - public RestAliasAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); + public RestAliasAction(Settings settings, RestController controller) { + super(settings, controller); controller.registerHandler(GET, "/_cat/aliases", this); controller.registerHandler(GET, "/_cat/aliases/{alias}", this); } @Override - protected void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + protected void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final GetAliasesRequest getAliasesRequest = request.hasParam("alias") ? new GetAliasesRequest(request.param("alias")) : new GetAliasesRequest(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java index 4a64ef409db..146b52a63a0 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java @@ -26,7 +26,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Strings; @@ -48,8 +48,8 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestAllocationAction extends AbstractCatAction { @Inject - public RestAllocationAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); + public RestAllocationAction(Settings settings, RestController controller) { + super(settings, controller); controller.registerHandler(GET, "/_cat/allocation", this); controller.registerHandler(GET, "/_cat/allocation/{nodes}", this); } @@ -60,7 +60,7 @@ public class RestAllocationAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final String[] nodes = Strings.splitStringByCommaToArray(request.param("nodes", "data:true")); final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().routingTable(true); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestCatAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestCatAction.java index 23229540b96..b9cc5011a81 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestCatAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestCatAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.rest.action.cat; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -40,8 +40,8 @@ public class RestCatAction extends BaseRestHandler { private final String HELP; @Inject - public RestCatAction(Settings settings, RestController controller, Set catActions, Client client) { - super(settings, client); + public RestCatAction(Settings settings, RestController controller, Set catActions) { + super(settings); controller.registerHandler(GET, "/_cat", this); StringBuilder sb = new StringBuilder(); sb.append(CAT_NL); @@ -52,7 +52,7 @@ public class RestCatAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { channel.sendResponse(new BytesRestResponse(RestStatus.OK, HELP)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestCountAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestCountAction.java index 46e8fadd05a..032fd5c3aba 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestCountAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestCountAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.cat; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.common.bytes.BytesArray; @@ -45,8 +45,8 @@ public class RestCountAction extends AbstractCatAction { private final IndicesQueriesRegistry indicesQueriesRegistry; @Inject - public RestCountAction(Settings settings, RestController restController, RestController controller, Client client, IndicesQueriesRegistry indicesQueriesRegistry) { - super(settings, controller, client); + public RestCountAction(Settings settings, RestController restController, RestController controller, IndicesQueriesRegistry indicesQueriesRegistry) { + super(settings, controller); restController.registerHandler(GET, "/_cat/count", this); restController.registerHandler(GET, "/_cat/count/{index}", this); this.indicesQueriesRegistry = indicesQueriesRegistry; @@ -59,7 +59,7 @@ public class RestCountAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); SearchRequest countRequest = new SearchRequest(indices); String source = request.param("source"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java index 8febf04e892..d3458b40d77 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java @@ -23,7 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectLongCursor; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -43,14 +43,14 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestFielddataAction extends AbstractCatAction { @Inject - public RestFielddataAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); + public RestFielddataAction(Settings settings, RestController controller) { + super(settings, controller); controller.registerHandler(GET, "/_cat/fielddata", this); controller.registerHandler(GET, "/_cat/fielddata/{fields}", this); } @Override - protected void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + protected void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("data:true"); nodesStatsRequest.clear(); nodesStatsRequest.indices(true); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java index 7dc7954a3a4..07f36cc2f6d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.cat; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -39,8 +39,8 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestHealthAction extends AbstractCatAction { @Inject - public RestHealthAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); + public RestHealthAction(Settings settings, RestController controller) { + super(settings, controller); controller.registerHandler(GET, "/_cat/health", this); } @@ -50,7 +50,7 @@ public class RestHealthAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { ClusterHealthRequest clusterHealthRequest = new ClusterHealthRequest(); client.admin().cluster().health(clusterHealthRequest, new RestResponseListener(channel) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java index 38cbba6fd5a..fcdfe009e18 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterIndexHealth; @@ -57,8 +57,8 @@ public class RestIndicesAction extends AbstractCatAction { private final IndexNameExpressionResolver indexNameExpressionResolver; @Inject - public RestIndicesAction(Settings settings, RestController controller, Client client, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, controller, client); + public RestIndicesAction(Settings settings, RestController controller, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, controller); this.indexNameExpressionResolver = indexNameExpressionResolver; controller.registerHandler(GET, "/_cat/indices", this); controller.registerHandler(GET, "/_cat/indices/{index}", this); @@ -71,7 +71,7 @@ public class RestIndicesAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().indices(indices).metaData(true); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java index e7a7d7afc96..164c2f79dac 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.cat; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Table; @@ -39,8 +39,8 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestMasterAction extends AbstractCatAction { @Inject - public RestMasterAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); + public RestMasterAction(Settings settings, RestController controller) { + super(settings, controller); controller.registerHandler(GET, "/_cat/master", this); } @@ -50,7 +50,7 @@ public class RestMasterAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java index 2190e1e2993..815895a2679 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Strings; @@ -47,8 +47,8 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestNodeAttrsAction extends AbstractCatAction { @Inject - public RestNodeAttrsAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); + public RestNodeAttrsAction(Settings settings, RestController controller) { + super(settings, controller); controller.registerHandler(GET, "/_cat/nodeattrs", this); } @@ -58,7 +58,7 @@ public class RestNodeAttrsAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index 1069a879d94..cb7bd59be3c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Strings; @@ -72,8 +72,8 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestNodesAction extends AbstractCatAction { @Inject - public RestNodesAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); + public RestNodesAction(Settings settings, RestController controller) { + super(settings, controller); controller.registerHandler(GET, "/_cat/nodes", this); } @@ -83,7 +83,7 @@ public class RestNodesAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java index b563450f8f6..b85906e25b4 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.cat; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; @@ -37,8 +37,8 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestPendingClusterTasksAction extends AbstractCatAction { @Inject - public RestPendingClusterTasksAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); + public RestPendingClusterTasksAction(Settings settings, RestController controller) { + super(settings, controller); controller.registerHandler(GET, "/_cat/pending_tasks", this); } @@ -48,7 +48,7 @@ public class RestPendingClusterTasksAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(); pendingClusterTasksRequest.masterNodeTimeout(request.paramAsTime("master_timeout", pendingClusterTasksRequest.masterNodeTimeout())); pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java index 189273a7722..13e1f2176d7 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Table; @@ -44,8 +44,8 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestPluginsAction extends AbstractCatAction { @Inject - public RestPluginsAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); + public RestPluginsAction(Settings settings, RestController controller) { + super(settings, controller); controller.registerHandler(GET, "/_cat/plugins", this); } @@ -55,7 +55,7 @@ public class RestPluginsAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java index 07c2611f2ce..6308ce7e689 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java @@ -23,7 +23,7 @@ import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; @@ -51,8 +51,8 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestRecoveryAction extends AbstractCatAction { @Inject - public RestRecoveryAction(Settings settings, RestController restController, RestController controller, Client client) { - super(settings, controller, client); + public RestRecoveryAction(Settings settings, RestController restController, RestController controller) { + super(settings, controller); restController.registerHandler(GET, "/_cat/recovery", this); restController.registerHandler(GET, "/_cat/recovery/{index}", this); } @@ -64,7 +64,7 @@ public class RestRecoveryAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final RecoveryRequest recoveryRequest = new RecoveryRequest(Strings.splitStringByCommaToArray(request.param("index"))); recoveryRequest.detailed(request.paramAsBoolean("detailed", false)); recoveryRequest.activeOnly(request.paramAsBoolean("active_only", false)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java index d19e2b19ca0..ee53e1241c7 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.cat; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; @@ -40,13 +40,13 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; */ public class RestRepositoriesAction extends AbstractCatAction { @Inject - public RestRepositoriesAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); + public RestRepositoriesAction(Settings settings, RestController controller) { + super(settings, controller); controller.registerHandler(GET, "/_cat/repositories", this); } @Override - protected void doRequest(RestRequest request, RestChannel channel, Client client) { + protected void doRequest(RestRequest request, RestChannel channel, NodeClient client) { GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest(); getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local())); getRepositoriesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRepositoriesRequest.masterNodeTimeout())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java index e5a1b4b49a3..2540e194d84 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java @@ -26,7 +26,7 @@ import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.segments.ShardSegments; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; @@ -49,14 +49,14 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestSegmentsAction extends AbstractCatAction { @Inject - public RestSegmentsAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); + public RestSegmentsAction(Settings settings, RestController controller) { + super(settings, controller); controller.registerHandler(GET, "/_cat/segments", this); controller.registerHandler(GET, "/_cat/segments/{index}", this); } @Override - protected void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + protected void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index 0f6bec367d5..214979aef66 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -25,7 +25,7 @@ import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; @@ -49,8 +49,8 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestShardsAction extends AbstractCatAction { @Inject - public RestShardsAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); + public RestShardsAction(Settings settings, RestController controller) { + super(settings, controller); controller.registerHandler(GET, "/_cat/shards", this); controller.registerHandler(GET, "/_cat/shards/{index}", this); } @@ -62,7 +62,7 @@ public class RestShardsAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java index 94d178e4db9..e503118fbab 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.cat; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -47,13 +47,13 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; */ public class RestSnapshotAction extends AbstractCatAction { @Inject - public RestSnapshotAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); + public RestSnapshotAction(Settings settings, RestController controller) { + super(settings, controller); controller.registerHandler(GET, "/_cat/snapshots/{repository}", this); } @Override - protected void doRequest(final RestRequest request, RestChannel channel, Client client) { + protected void doRequest(final RestRequest request, RestChannel channel, NodeClient client) { GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest() .repository(request.param("repository")) .snapshots(new String[]{GetSnapshotsRequest.ALL_SNAPSHOTS}); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java index 9bb01a7b166..7486acfbb80 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.cat; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; @@ -51,8 +51,8 @@ public class RestTasksAction extends AbstractCatAction { private final ClusterService clusterService; @Inject - public RestTasksAction(Settings settings, RestController controller, Client client, ClusterService clusterService) { - super(settings, controller, client); + public RestTasksAction(Settings settings, RestController controller, ClusterService clusterService) { + super(settings, controller); controller.registerHandler(GET, "/_cat/tasks", this); this.clusterService = clusterService; } @@ -63,7 +63,7 @@ public class RestTasksAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { client.admin().cluster().listTasks(generateListTasksRequest(request), new RestResponseListener(channel) { @Override public RestResponse buildResponse(ListTasksResponse listTasksResponse) throws Exception { diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java index 2ade4a1db39..caf28a9b018 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Strings; @@ -108,8 +108,8 @@ public class RestThreadPoolAction extends AbstractCatAction { } @Inject - public RestThreadPoolAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); + public RestThreadPoolAction(Settings settings, RestController controller) { + super(settings, controller); controller.registerHandler(GET, "/_cat/thread_pool", this); } @@ -119,7 +119,7 @@ public class RestThreadPoolAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java b/core/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java index 71e5832071c..d75bb7a0bac 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.count; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; @@ -53,8 +53,8 @@ public class RestCountAction extends BaseRestHandler { private final IndicesQueriesRegistry indicesQueriesRegistry; @Inject - public RestCountAction(Settings settings, RestController controller, Client client, IndicesQueriesRegistry indicesQueriesRegistry) { - super(settings, client); + public RestCountAction(Settings settings, RestController controller, IndicesQueriesRegistry indicesQueriesRegistry) { + super(settings); controller.registerHandler(POST, "/_count", this); controller.registerHandler(GET, "/_count", this); controller.registerHandler(POST, "/{index}/_count", this); @@ -65,7 +65,7 @@ public class RestCountAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { SearchRequest countRequest = new SearchRequest(Strings.splitStringByCommaToArray(request.param("index"))); countRequest.indicesOptions(IndicesOptions.fromRequest(request, countRequest.indicesOptions())); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0); diff --git a/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java b/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java index 29316893504..2f9b10096cc 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.delete; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; @@ -40,13 +40,13 @@ import static org.elasticsearch.rest.RestRequest.Method.DELETE; public class RestDeleteAction extends BaseRestHandler { @Inject - public RestDeleteAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestDeleteAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(DELETE, "/{index}/{type}/{id}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { DeleteRequest deleteRequest = new DeleteRequest(request.param("index"), request.param("type"), request.param("id")); deleteRequest.routing(request.param("routing")); deleteRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing diff --git a/core/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java b/core/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java index 47f5e7da19b..19f55f4cd46 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.explain; import org.apache.lucene.search.Explanation; import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; @@ -56,15 +56,15 @@ public class RestExplainAction extends BaseRestHandler { private final IndicesQueriesRegistry indicesQueriesRegistry; @Inject - public RestExplainAction(Settings settings, RestController controller, Client client, IndicesQueriesRegistry indicesQueriesRegistry) { - super(settings, client); + public RestExplainAction(Settings settings, RestController controller, IndicesQueriesRegistry indicesQueriesRegistry) { + super(settings); this.indicesQueriesRegistry = indicesQueriesRegistry; controller.registerHandler(GET, "/{index}/{type}/{id}/_explain", this); controller.registerHandler(POST, "/{index}/{type}/{id}/_explain", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final ExplainRequest explainRequest = new ExplainRequest(request.param("index"), request.param("type"), request.param("id")); explainRequest.parent(request.param("parent")); explainRequest.routing(request.param("routing")); diff --git a/core/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java index 744a76e7352..9f62024ab80 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.action.fieldstats.FieldStatsRequest; import org.elasticsearch.action.fieldstats.FieldStatsResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -49,8 +49,8 @@ import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastSh public class RestFieldStatsAction extends BaseRestHandler { @Inject - public RestFieldStatsAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestFieldStatsAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_field_stats", this); controller.registerHandler(POST, "/_field_stats", this); controller.registerHandler(GET, "/{index}/_field_stats", this); @@ -59,7 +59,7 @@ public class RestFieldStatsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, - final RestChannel channel, final Client client) throws Exception { + final RestChannel channel, final NodeClient client) throws Exception { if (RestActions.hasBodyContent(request) && request.hasParam("fields")) { throw new IllegalArgumentException("can't specify a request body and [fields] request parameter, " + "either specify a request body or the [fields] request parameter"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java b/core/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java index 193fdd2e747..3cc7d8fd1ae 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.get; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -47,13 +47,13 @@ import static org.elasticsearch.rest.RestStatus.OK; public class RestGetAction extends BaseRestHandler { @Inject - public RestGetAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestGetAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/{index}/{type}/{id}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); getRequest.operationThreaded(true); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java b/core/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java index 7d8a39b02b9..9e9b54b5b0c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.get; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -47,13 +47,13 @@ import static org.elasticsearch.rest.RestStatus.OK; public class RestGetSourceAction extends BaseRestHandler { @Inject - public RestGetSourceAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestGetSourceAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/{index}/{type}/{id}/_source", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); getRequest.operationThreaded(true); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java b/core/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java index 01f3be435e7..ca8900ab77f 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.get; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.inject.Inject; @@ -49,8 +49,8 @@ public abstract class RestHeadAction extends BaseRestHandler { public static class Document extends RestHeadAction { @Inject - public Document(Settings settings, RestController controller, Client client) { - super(settings, client, false); + public Document(Settings settings, RestController controller) { + super(settings, false); controller.registerHandler(HEAD, "/{index}/{type}/{id}", this); } } @@ -61,8 +61,8 @@ public abstract class RestHeadAction extends BaseRestHandler { public static class Source extends RestHeadAction { @Inject - public Source(Settings settings, RestController controller, Client client) { - super(settings, client, true); + public Source(Settings settings, RestController controller) { + super(settings, true); controller.registerHandler(HEAD, "/{index}/{type}/{id}/_source", this); } } @@ -73,17 +73,16 @@ public abstract class RestHeadAction extends BaseRestHandler { * All subclasses must be registered in {@link org.elasticsearch.common.network.NetworkModule}. * * @param settings injected settings - * @param client injected client * @param source {@code false} to check for {@link GetResponse#isExists()}. * {@code true} to also check for {@link GetResponse#isSourceEmpty()}. */ - public RestHeadAction(Settings settings, Client client, boolean source) { - super(settings, client); + public RestHeadAction(Settings settings, boolean source) { + super(settings); this.source = source; } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); getRequest.operationThreaded(true); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java b/core/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java index 1722daa4445..de4f433dcbf 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.get; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.get.MultiGetResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -41,8 +41,8 @@ public class RestMultiGetAction extends BaseRestHandler { private final boolean allowExplicitIndex; @Inject - public RestMultiGetAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestMultiGetAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_mget", this); controller.registerHandler(POST, "/_mget", this); controller.registerHandler(GET, "/{index}/_mget", this); @@ -54,7 +54,7 @@ public class RestMultiGetAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { MultiGetRequest multiGetRequest = new MultiGetRequest(); multiGetRequest.refresh(request.paramAsBoolean("refresh", multiGetRequest.refresh())); multiGetRequest.preference(request.param("preference")); diff --git a/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java index f807e68088a..884adcf0d81 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.index; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -46,30 +46,30 @@ import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; public class RestIndexAction extends BaseRestHandler { @Inject - public RestIndexAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestIndexAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(POST, "/{index}/{type}", this); // auto id creation controller.registerHandler(PUT, "/{index}/{type}/{id}", this); controller.registerHandler(POST, "/{index}/{type}/{id}", this); - CreateHandler createHandler = new CreateHandler(settings, controller, client); + CreateHandler createHandler = new CreateHandler(settings, controller); controller.registerHandler(PUT, "/{index}/{type}/{id}/_create", createHandler); controller.registerHandler(POST, "/{index}/{type}/{id}/_create", createHandler); } final class CreateHandler extends BaseRestHandler { - protected CreateHandler(Settings settings, RestController controller, Client client) { - super(settings, client); + protected CreateHandler(Settings settings, RestController controller) { + super(settings); } @Override - public void handleRequest(RestRequest request, RestChannel channel, final Client client) { + public void handleRequest(RestRequest request, RestChannel channel, final NodeClient client) { request.params().put("op_type", "create"); RestIndexAction.this.handleRequest(request, channel, client); } } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { IndexRequest indexRequest = new IndexRequest(request.param("index"), request.param("type"), request.param("id")); indexRequest.routing(request.param("routing")); indexRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing diff --git a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java index 6559e32817f..7c6200ee97c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.ingest; import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -32,13 +32,13 @@ import org.elasticsearch.rest.action.support.AcknowledgedRestListener; public class RestDeletePipelineAction extends BaseRestHandler { @Inject - public RestDeletePipelineAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestDeletePipelineAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.DELETE, "/_ingest/pipeline/{id}", this); } @Override - protected void handleRequest(RestRequest restRequest, RestChannel channel, Client client) throws Exception { + public void handleRequest(RestRequest restRequest, RestChannel channel, NodeClient client) throws Exception { DeletePipelineRequest request = new DeletePipelineRequest(restRequest.param("id")); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); request.timeout(restRequest.paramAsTime("timeout", request.timeout())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java index c11290329b3..55f14cfaa84 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.ingest; import org.elasticsearch.action.ingest.GetPipelineRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -33,13 +33,13 @@ import org.elasticsearch.rest.action.support.RestStatusToXContentListener; public class RestGetPipelineAction extends BaseRestHandler { @Inject - public RestGetPipelineAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestGetPipelineAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.GET, "/_ingest/pipeline/{id}", this); } @Override - protected void handleRequest(RestRequest restRequest, RestChannel channel, Client client) throws Exception { + public void handleRequest(RestRequest restRequest, RestChannel channel, NodeClient client) throws Exception { GetPipelineRequest request = new GetPipelineRequest(Strings.splitStringByCommaToArray(restRequest.param("id"))); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); client.admin().cluster().getPipeline(request, new RestStatusToXContentListener<>(channel)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java index a96ed3d6424..7d7080e8775 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.ingest; import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -34,13 +34,13 @@ import org.elasticsearch.rest.action.support.RestActions; public class RestPutPipelineAction extends BaseRestHandler { @Inject - public RestPutPipelineAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestPutPipelineAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.PUT, "/_ingest/pipeline/{id}", this); } @Override - protected void handleRequest(RestRequest restRequest, RestChannel channel, Client client) throws Exception { + public void handleRequest(RestRequest restRequest, RestChannel channel, NodeClient client) throws Exception { PutPipelineRequest request = new PutPipelineRequest(restRequest.param("id"), RestActions.getRestContent(restRequest)); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); request.timeout(restRequest.paramAsTime("timeout", request.timeout())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java index fc2e834ea75..35ba1367e63 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest.action.ingest; import org.elasticsearch.action.ingest.SimulatePipelineRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -34,8 +34,8 @@ import org.elasticsearch.rest.action.support.RestToXContentListener; public class RestSimulatePipelineAction extends BaseRestHandler { @Inject - public RestSimulatePipelineAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestSimulatePipelineAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.POST, "/_ingest/pipeline/{id}/_simulate", this); controller.registerHandler(RestRequest.Method.GET, "/_ingest/pipeline/{id}/_simulate", this); controller.registerHandler(RestRequest.Method.POST, "/_ingest/pipeline/_simulate", this); @@ -43,7 +43,7 @@ public class RestSimulatePipelineAction extends BaseRestHandler { } @Override - protected void handleRequest(RestRequest restRequest, RestChannel channel, Client client) throws Exception { + public void handleRequest(RestRequest restRequest, RestChannel channel, NodeClient client) throws Exception { SimulatePipelineRequest request = new SimulatePipelineRequest(RestActions.getRestContent(restRequest)); request.setId(restRequest.param("id")); request.setVerbose(restRequest.paramAsBoolean("verbose", false)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java b/core/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java index e57bb790e5e..24f4e66e7db 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.main; import org.elasticsearch.action.main.MainAction; import org.elasticsearch.action.main.MainRequest; import org.elasticsearch.action.main.MainResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -46,14 +46,14 @@ import static org.elasticsearch.rest.RestRequest.Method.HEAD; public class RestMainAction extends BaseRestHandler { @Inject - public RestMainAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestMainAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/", this); controller.registerHandler(HEAD, "/", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { client.execute(MainAction.INSTANCE, new MainRequest(), new RestBuilderListener(channel) { @Override public RestResponse buildResponse(MainResponse mainResponse, XContentBuilder builder) throws Exception { diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java index 0dce23bf3b1..a189a58a3ae 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.search; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; @@ -46,15 +46,15 @@ import static org.elasticsearch.rest.RestRequest.Method.DELETE; public class RestClearScrollAction extends BaseRestHandler { @Inject - public RestClearScrollAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestClearScrollAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(DELETE, "/_search/scroll", this); controller.registerHandler(DELETE, "/_search/scroll/{scroll_id}", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { String scrollIds = request.param("scroll_id"); ClearScrollRequest clearRequest = new ClearScrollRequest(); clearRequest.setScrollIds(Arrays.asList(splitScrollIds(scrollIds))); diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 2935c88dab2..52965193a2e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -64,9 +64,9 @@ public class RestMultiSearchAction extends BaseRestHandler { private final Suggesters suggesters; @Inject - public RestMultiSearchAction(Settings settings, RestController controller, Client client, IndicesQueriesRegistry indicesQueriesRegistry, + public RestMultiSearchAction(Settings settings, RestController controller, IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, Suggesters suggesters) { - super(settings, client); + super(settings); this.aggParsers = aggParsers; this.suggesters = suggesters; @@ -82,7 +82,7 @@ public class RestMultiSearchAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { MultiSearchRequest multiSearchRequest = parseRequest(request, allowExplicitIndex, indicesQueriesRegistry, parseFieldMatcher, aggParsers, suggesters); client.multiSearch(multiSearchRequest, new RestToXContentListener<>(channel)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 5f5fe84d573..870d4a9eb20 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.search; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -67,9 +67,9 @@ public class RestSearchAction extends BaseRestHandler { private final Suggesters suggesters; @Inject - public RestSearchAction(Settings settings, RestController controller, Client client, IndicesQueriesRegistry queryRegistry, + public RestSearchAction(Settings settings, RestController controller, IndicesQueriesRegistry queryRegistry, AggregatorParsers aggParsers, Suggesters suggesters) { - super(settings, client); + super(settings); this.queryRegistry = queryRegistry; this.aggParsers = aggParsers; this.suggesters = suggesters; @@ -82,7 +82,7 @@ public class RestSearchAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws IOException { SearchRequest searchRequest = new SearchRequest(); BytesReference restContent = RestActions.hasBodyContent(request) ? RestActions.getRestContent(request) : null; parseSearchRequest(searchRequest, queryRegistry, request, parseFieldMatcher, aggParsers, suggesters, restContent); diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java index 3a10db38ee1..6b9b09545ad 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.search; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -50,8 +50,8 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestSearchScrollAction extends BaseRestHandler { @Inject - public RestSearchScrollAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestSearchScrollAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_search/scroll", this); controller.registerHandler(POST, "/_search/scroll", this); @@ -60,7 +60,7 @@ public class RestSearchScrollAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { String scrollId = request.param("scroll_id"); SearchScrollRequest searchScrollRequest = new SearchScrollRequest(); searchScrollRequest.scrollId(scrollId); diff --git a/core/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java b/core/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java index c5c6174b02b..f6acfc6daf5 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.suggest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; @@ -61,9 +61,9 @@ public class RestSuggestAction extends BaseRestHandler { private final Suggesters suggesters; @Inject - public RestSuggestAction(Settings settings, RestController controller, Client client, + public RestSuggestAction(Settings settings, RestController controller, IndicesQueriesRegistry queryRegistry, Suggesters suggesters) { - super(settings, client); + super(settings); this.queryRegistry = queryRegistry; this.suggesters = suggesters; controller.registerHandler(POST, "/_suggest", this); @@ -73,7 +73,7 @@ public class RestSuggestAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws IOException { final SearchRequest searchRequest = new SearchRequest(Strings.splitStringByCommaToArray(request.param("index")), new SearchSourceBuilder()); searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions())); if (RestActions.hasBodyContent(request)) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/termvectors/RestMultiTermVectorsAction.java b/core/src/main/java/org/elasticsearch/rest/action/termvectors/RestMultiTermVectorsAction.java index dfcbeef171c..888d854f40c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/termvectors/RestMultiTermVectorsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/termvectors/RestMultiTermVectorsAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.termvectors; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; import org.elasticsearch.action.termvectors.TermVectorsRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -39,8 +39,8 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestMultiTermVectorsAction extends BaseRestHandler { @Inject - public RestMultiTermVectorsAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestMultiTermVectorsAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_mtermvectors", this); controller.registerHandler(POST, "/_mtermvectors", this); controller.registerHandler(GET, "/{index}/_mtermvectors", this); @@ -50,7 +50,7 @@ public class RestMultiTermVectorsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { MultiTermVectorsRequest multiTermVectorsRequest = new MultiTermVectorsRequest(); TermVectorsRequest template = new TermVectorsRequest(); template.index(request.param("index")); diff --git a/core/src/main/java/org/elasticsearch/rest/action/termvectors/RestTermVectorsAction.java b/core/src/main/java/org/elasticsearch/rest/action/termvectors/RestTermVectorsAction.java index d22afa615d8..6aa2d1bdb14 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/termvectors/RestTermVectorsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/termvectors/RestTermVectorsAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.termvectors; import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.termvectors.TermVectorsResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -48,8 +48,8 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestTermVectorsAction extends BaseRestHandler { @Inject - public RestTermVectorsAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestTermVectorsAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/{index}/{type}/_termvectors", this); controller.registerHandler(POST, "/{index}/{type}/_termvectors", this); controller.registerHandler(GET, "/{index}/{type}/{id}/_termvectors", this); @@ -63,7 +63,7 @@ public class RestTermVectorsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { TermVectorsRequest termVectorsRequest = new TermVectorsRequest(request.param("index"), request.param("type"), request.param("id")); if (RestActions.hasBodyContent(request)) { try (XContentParser parser = XContentFactory.xContent(RestActions.guessBodyContentType(request)).createParser(RestActions.getRestContent(request))){ diff --git a/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java b/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java index bdea4e33e6d..7d739983bf4 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.update; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -47,13 +47,13 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestUpdateAction extends BaseRestHandler { @Inject - public RestUpdateAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestUpdateAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(POST, "/{index}/{type}/{id}/_update", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { + public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { UpdateRequest updateRequest = new UpdateRequest(request.param("index"), request.param("type"), request.param("id")); updateRequest.routing(request.param("routing")); updateRequest.parent(request.param("parent")); diff --git a/core/src/test/java/org/elasticsearch/action/ActionModuleTests.java b/core/src/test/java/org/elasticsearch/action/ActionModuleTests.java index 4b9a833e8c4..87c1a227328 100644 --- a/core/src/test/java/org/elasticsearch/action/ActionModuleTests.java +++ b/core/src/test/java/org/elasticsearch/action/ActionModuleTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.main.MainAction; import org.elasticsearch.action.main.TransportMainAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.ActionPlugin; @@ -115,7 +116,7 @@ public class ActionModuleTests extends ESTestCase { public void testPluginCanRegisterRestHandler() { class FakeHandler implements RestHandler { @Override - public void handleRequest(RestRequest request, RestChannel channel) throws Exception { + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { } } ActionPlugin registersFakeHandler = new ActionPlugin() { diff --git a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java index 749ffa3c9d9..9ae0beadf59 100644 --- a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java +++ b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.common.network; import org.elasticsearch.action.support.replication.ReplicationTask; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Table; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.ModuleTestCase; @@ -87,18 +87,18 @@ public class NetworkModuleTests extends ModuleTestCase { static class FakeRestHandler extends BaseRestHandler { public FakeRestHandler() { - super(null, null); + super(null); } @Override - protected void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception {} + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception {} } static class FakeCatRestHandler extends AbstractCatAction { public FakeCatRestHandler() { - super(null, null, null); + super(null, null); } @Override - protected void doRequest(RestRequest request, RestChannel channel, Client client) {} + protected void doRequest(RestRequest request, RestChannel channel, NodeClient client) {} @Override protected void documentation(StringBuilder sb) {} @Override diff --git a/core/src/test/java/org/elasticsearch/http/HttpServerTests.java b/core/src/test/java/org/elasticsearch/http/HttpServerTests.java index 2ba7da84c14..ce0c78c1c92 100644 --- a/core/src/test/java/org/elasticsearch/http/HttpServerTests.java +++ b/core/src/test/java/org/elasticsearch/http/HttpServerTests.java @@ -66,9 +66,9 @@ public class HttpServerTests extends ESTestCase { HttpServerTransport httpServerTransport = new TestHttpServerTransport(); RestController restController = new RestController(settings); restController.registerHandler(RestRequest.Method.GET, "/", - (request, channel) -> channel.sendResponse( + (request, channel, client) -> channel.sendResponse( new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY))); - restController.registerHandler(RestRequest.Method.GET, "/error", (request, channel) -> { + restController.registerHandler(RestRequest.Method.GET, "/error", (request, channel, client) -> { throw new IllegalArgumentException("test error"); }); @@ -76,7 +76,7 @@ public class HttpServerTests extends ESTestCase { new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null); NodeService nodeService = new NodeService(Settings.EMPTY, null, null, null, null, null, null, null, null, clusterService, null); - httpServer = new HttpServer(settings, httpServerTransport, restController, nodeService, circuitBreakerService); + httpServer = new HttpServer(settings, httpServerTransport, restController, nodeService, null, circuitBreakerService); httpServer.start(); } diff --git a/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderRestAction.java b/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderRestAction.java index 39432bd01ea..499b6fadc93 100644 --- a/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderRestAction.java +++ b/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderRestAction.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.plugins.responseheader; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -32,13 +32,13 @@ import org.elasticsearch.rest.RestStatus; public class TestResponseHeaderRestAction extends BaseRestHandler { @Inject - public TestResponseHeaderRestAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public TestResponseHeaderRestAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(RestRequest.Method.GET, "/_protected", this); } @Override - public void handleRequest(RestRequest request, RestChannel channel, Client client) { + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) { if ("password".equals(request.header("Secret"))) { RestResponse response = new BytesRestResponse(RestStatus.OK, "Access granted"); response.addHeader("Secret", "granted"); diff --git a/core/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/core/src/test/java/org/elasticsearch/rest/RestControllerTests.java index 9cade7aa513..834afe5d5cd 100644 --- a/core/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/core/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.rest; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; @@ -78,7 +79,7 @@ public class RestControllerTests extends ESTestCase { } @Override - void executeHandler(RestRequest request, RestChannel channel) throws Exception { + void executeHandler(RestRequest request, RestChannel channel, NodeClient client) throws Exception { assertEquals("true", threadContext.getHeader("header.1")); assertEquals("true", threadContext.getHeader("header.2")); assertNull(threadContext.getHeader("header.3")); @@ -91,7 +92,7 @@ public class RestControllerTests extends ESTestCase { restHeaders.put("header.1", "true"); restHeaders.put("header.2", "true"); restHeaders.put("header.3", "false"); - restController.dispatchRequest(new FakeRestRequest.Builder().withHeaders(restHeaders).build(), null, threadContext); + restController.dispatchRequest(new FakeRestRequest.Builder().withHeaders(restHeaders).build(), null, null, threadContext); assertNull(threadContext.getHeader("header.1")); assertNull(threadContext.getHeader("header.2")); assertEquals("true", threadContext.getHeader("header.3")); @@ -117,7 +118,7 @@ public class RestControllerTests extends ESTestCase { } @Override - public void handleRequest(RestRequest request, RestChannel channel) throws Exception { + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { //no op } diff --git a/core/src/test/java/org/elasticsearch/rest/RestFilterChainTests.java b/core/src/test/java/org/elasticsearch/rest/RestFilterChainTests.java index 51f36d1e25f..dd6d1dac47b 100644 --- a/core/src/test/java/org/elasticsearch/rest/RestFilterChainTests.java +++ b/core/src/test/java/org/elasticsearch/rest/RestFilterChainTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.rest; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -71,16 +72,13 @@ public class RestFilterChainTests extends ESTestCase { } } - restController.registerHandler(RestRequest.Method.GET, "/", new RestHandler() { - @Override - public void handleRequest(RestRequest request, RestChannel channel) throws Exception { - channel.sendResponse(new TestResponse()); - } + restController.registerHandler(RestRequest.Method.GET, "/", (request, channel, client) -> { + channel.sendResponse(new TestResponse()); }); FakeRestRequest fakeRestRequest = new FakeRestRequest(); FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), 1); - restController.dispatchRequest(fakeRestRequest, fakeRestChannel, new ThreadContext(Settings.EMPTY)); + restController.dispatchRequest(fakeRestRequest, fakeRestChannel, null, new ThreadContext(Settings.EMPTY)); assertThat(fakeRestChannel.await(), equalTo(true)); @@ -117,12 +115,9 @@ public class RestFilterChainTests extends ESTestCase { final int additionalContinueCount = randomInt(10); - TestFilter testFilter = new TestFilter(randomInt(), new Callback() { - @Override - public void execute(final RestRequest request, final RestChannel channel, final RestFilterChain filterChain) throws Exception { - for (int i = 0; i <= additionalContinueCount; i++) { - filterChain.continueProcessing(request, channel); - } + TestFilter testFilter = new TestFilter(randomInt(), (request, channel, client, filterChain) -> { + for (int i = 0; i <= additionalContinueCount; i++) { + filterChain.continueProcessing(request, channel, null); } }); @@ -131,14 +126,14 @@ public class RestFilterChainTests extends ESTestCase { restController.registerHandler(RestRequest.Method.GET, "/", new RestHandler() { @Override - public void handleRequest(RestRequest request, RestChannel channel) throws Exception { + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { channel.sendResponse(new TestResponse()); } }); FakeRestRequest fakeRestRequest = new FakeRestRequest(); FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), additionalContinueCount + 1); - restController.dispatchRequest(fakeRestRequest, fakeRestChannel, new ThreadContext(Settings.EMPTY)); + restController.dispatchRequest(fakeRestRequest, fakeRestChannel, null, new ThreadContext(Settings.EMPTY)); fakeRestChannel.await(); assertThat(testFilter.runs.get(), equalTo(1)); @@ -150,20 +145,20 @@ public class RestFilterChainTests extends ESTestCase { private static enum Operation implements Callback { CONTINUE_PROCESSING { @Override - public void execute(RestRequest request, RestChannel channel, RestFilterChain filterChain) throws Exception { - filterChain.continueProcessing(request, channel); + public void execute(RestRequest request, RestChannel channel, NodeClient client, RestFilterChain filterChain) throws Exception { + filterChain.continueProcessing(request, channel, client); } }, CHANNEL_RESPONSE { @Override - public void execute(RestRequest request, RestChannel channel, RestFilterChain filterChain) throws Exception { + public void execute(RestRequest request, RestChannel channel, NodeClient client, RestFilterChain filterChain) throws Exception { channel.sendResponse(new TestResponse()); } } } private static interface Callback { - void execute(RestRequest request, RestChannel channel, RestFilterChain filterChain) throws Exception; + void execute(RestRequest request, RestChannel channel, NodeClient client, RestFilterChain filterChain) throws Exception; } private final AtomicInteger counter = new AtomicInteger(); @@ -180,10 +175,10 @@ public class RestFilterChainTests extends ESTestCase { } @Override - public void process(RestRequest request, RestChannel channel, RestFilterChain filterChain) throws Exception { + public void process(RestRequest request, RestChannel channel, NodeClient client, RestFilterChain filterChain) throws Exception { this.runs.incrementAndGet(); this.executionToken = counter.incrementAndGet(); - this.callback.execute(request, channel, filterChain); + this.callback.execute(request, channel, client, filterChain); } @Override diff --git a/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java index b603ded8697..34275d78d7b 100644 --- a/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java +++ b/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java @@ -51,7 +51,7 @@ public class RestRecoveryActionTests extends ESTestCase { public void testRestRecoveryAction() { final Settings settings = Settings.EMPTY; final RestController restController = new RestController(settings); - final RestRecoveryAction action = new RestRecoveryAction(settings, restController, restController, null); + final RestRecoveryAction action = new RestRecoveryAction(settings, restController, restController); final int totalShards = randomIntBetween(1, 32); final int successfulShards = Math.max(0, totalShards - randomIntBetween(1, 2)); final int failedShards = totalShards - successfulShards; diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 3ba6c875b68..19c508e2bb1 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MetaData; @@ -635,6 +636,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest .put(MockRepository.Plugin.PASSWORD_SETTING.getKey(), "verysecretpassword") ).get(); + NodeClient nodeClient = internalCluster().getInstance(NodeClient.class); RestGetRepositoriesAction getRepoAction = internalCluster().getInstance(RestGetRepositoriesAction.class); RestRequest getRepoRequest = new FakeRestRequest(); getRepoRequest.params().put("repository", "test-repo"); @@ -651,7 +653,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest } getRepoLatch.countDown(); } - }); + }, nodeClient); assertTrue(getRepoLatch.await(1, TimeUnit.SECONDS)); if (getRepoError.get() != null) { throw getRepoError.get(); @@ -672,7 +674,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest } clusterStateLatch.countDown(); } - }); + }, nodeClient); assertTrue(clusterStateLatch.await(1, TimeUnit.SECONDS)); if (clusterStateError.get() != null) { throw clusterStateError.get(); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestDeleteSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestDeleteSearchTemplateAction.java index 46bad569933..22adfe13ac4 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestDeleteSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestDeleteSearchTemplateAction.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.rest.action.search.template; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.RestController; @@ -31,8 +31,8 @@ import static org.elasticsearch.rest.RestRequest.Method.DELETE; public class RestDeleteSearchTemplateAction extends RestDeleteStoredScriptAction { @Inject - public RestDeleteSearchTemplateAction(Settings settings, RestController controller, Client client) { - super(settings, controller, false, client); + public RestDeleteSearchTemplateAction(Settings settings, RestController controller) { + super(settings, controller, false); controller.registerHandler(DELETE, "/_search/template/{id}", this); } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestGetSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestGetSearchTemplateAction.java index 61fcaf1ccec..f92cdc02a67 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestGetSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestGetSearchTemplateAction.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.rest.action.search.template; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.RestController; @@ -33,8 +33,8 @@ public class RestGetSearchTemplateAction extends RestGetStoredScriptAction { private static final String TEMPLATE = "template"; @Inject - public RestGetSearchTemplateAction(Settings settings, RestController controller, Client client) { - super(settings, controller, false, client); + public RestGetSearchTemplateAction(Settings settings, RestController controller) { + super(settings, controller, false); controller.registerHandler(GET, "/_search/template/{id}", this); } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestMultiSearchTemplateAction.java index 48f69dda50f..add4719ad93 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestMultiSearchTemplateAction.java @@ -24,7 +24,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.search.template.MultiSearchTemplateAction; import org.elasticsearch.action.search.template.MultiSearchTemplateRequest; import org.elasticsearch.action.search.template.SearchTemplateRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -45,8 +45,8 @@ public class RestMultiSearchTemplateAction extends BaseRestHandler { private final boolean allowExplicitIndex; @Inject - public RestMultiSearchTemplateAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestMultiSearchTemplateAction(Settings settings, RestController controller) { + super(settings); this.allowExplicitIndex = MULTI_ALLOW_EXPLICIT_INDEX.get(settings); controller.registerHandler(GET, "/_msearch/template", this); @@ -58,7 +58,7 @@ public class RestMultiSearchTemplateAction extends BaseRestHandler { } @Override - protected void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception { + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { if (RestActions.hasBodyContent(request) == false) { throw new ElasticsearchException("request body is required"); } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestPutSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestPutSearchTemplateAction.java index 90854bee01c..48e98bc9237 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestPutSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestPutSearchTemplateAction.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.rest.action.search.template; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.RestController; @@ -32,8 +32,8 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; public class RestPutSearchTemplateAction extends RestPutStoredScriptAction { @Inject - public RestPutSearchTemplateAction(Settings settings, RestController controller, Client client) { - super(settings, controller, false, client); + public RestPutSearchTemplateAction(Settings settings, RestController controller) { + super(settings, controller, false); controller.registerHandler(POST, "/_search/template/{id}", this); controller.registerHandler(PUT, "/_search/template/{id}", this); } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestRenderSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestRenderSearchTemplateAction.java index 388837962c5..5e7e75e9c81 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestRenderSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestRenderSearchTemplateAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.search.template; import org.elasticsearch.action.search.template.SearchTemplateAction; import org.elasticsearch.action.search.template.SearchTemplateRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; @@ -38,8 +38,8 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestRenderSearchTemplateAction extends BaseRestHandler { @Inject - public RestRenderSearchTemplateAction(Settings settings, RestController controller, Client client) { - super(settings, client); + public RestRenderSearchTemplateAction(Settings settings, RestController controller) { + super(settings); controller.registerHandler(GET, "/_render/template", this); controller.registerHandler(POST, "/_render/template", this); controller.registerHandler(GET, "/_render/template/{id}", this); @@ -47,7 +47,7 @@ public class RestRenderSearchTemplateAction extends BaseRestHandler { } @Override - protected void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception { + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { // Creates the render template request SearchTemplateRequest renderRequest = RestSearchTemplateAction.parse(RestActions.getRestContent(request)); renderRequest.setSimulate(true); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestSearchTemplateAction.java index b15efa74420..131443da887 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/rest/action/search/template/RestSearchTemplateAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.template.SearchTemplateAction; import org.elasticsearch.action.search.template.SearchTemplateRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcherSupplier; @@ -88,9 +88,9 @@ public class RestSearchTemplateAction extends BaseRestHandler { private final Suggesters suggesters; @Inject - public RestSearchTemplateAction(Settings settings, RestController controller, Client client, IndicesQueriesRegistry queryRegistry, + public RestSearchTemplateAction(Settings settings, RestController controller, IndicesQueriesRegistry queryRegistry, AggregatorParsers aggregatorParsers, Suggesters suggesters) { - super(settings, client); + super(settings); this.queryRegistry = queryRegistry; this.aggParsers = aggregatorParsers; this.suggesters = suggesters; @@ -104,7 +104,7 @@ public class RestSearchTemplateAction extends BaseRestHandler { } @Override - protected void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception { + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { if (RestActions.hasBodyContent(request) == false) { throw new ElasticsearchException("request body is required"); } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/RestMultiPercolateAction.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/RestMultiPercolateAction.java index a2902a9a7c2..6461ab3028d 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/RestMultiPercolateAction.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/RestMultiPercolateAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.percolator; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -39,9 +39,9 @@ public class RestMultiPercolateAction extends BaseRestHandler { private final TransportMultiPercolateAction action; @Inject - public RestMultiPercolateAction(Settings settings, RestController controller, Client client, + public RestMultiPercolateAction(Settings settings, RestController controller, TransportMultiPercolateAction action) { - super(settings, client); + super(settings); this.action = action; controller.registerHandler(POST, "/_mpercolate", this); controller.registerHandler(POST, "/{index}/_mpercolate", this); @@ -55,7 +55,7 @@ public class RestMultiPercolateAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest restRequest, final RestChannel restChannel, final Client client) throws Exception { + public void handleRequest(final RestRequest restRequest, final RestChannel restChannel, final NodeClient client) throws Exception { MultiPercolateRequest multiPercolateRequest = new MultiPercolateRequest(); multiPercolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, multiPercolateRequest.indicesOptions())); multiPercolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param("index"))); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java index b752cc55f6c..a09107eedda 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.percolator; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -40,26 +40,26 @@ public class RestPercolateAction extends BaseRestHandler { private final TransportPercolateAction action; @Inject - public RestPercolateAction(Settings settings, RestController controller, Client client, TransportPercolateAction action) { - super(settings, client); + public RestPercolateAction(Settings settings, RestController controller, TransportPercolateAction action) { + super(settings); this.action = action; controller.registerHandler(GET, "/{index}/{type}/_percolate", this); controller.registerHandler(POST, "/{index}/{type}/_percolate", this); - RestPercolateExistingDocHandler existingDocHandler = new RestPercolateExistingDocHandler(settings, controller, client); + RestPercolateExistingDocHandler existingDocHandler = new RestPercolateExistingDocHandler(settings, controller); controller.registerHandler(GET, "/{index}/{type}/{id}/_percolate", existingDocHandler); controller.registerHandler(POST, "/{index}/{type}/{id}/_percolate", existingDocHandler); - RestCountPercolateDocHandler countHandler = new RestCountPercolateDocHandler(settings, controller, client); + RestCountPercolateDocHandler countHandler = new RestCountPercolateDocHandler(settings, controller); controller.registerHandler(GET, "/{index}/{type}/_percolate/count", countHandler); controller.registerHandler(POST, "/{index}/{type}/_percolate/count", countHandler); - RestCountPercolateExistingDocHandler countExistingDocHandler = new RestCountPercolateExistingDocHandler(settings, controller, client); + RestCountPercolateExistingDocHandler countExistingDocHandler = new RestCountPercolateExistingDocHandler(settings, controller); controller.registerHandler(GET, "/{index}/{type}/{id}/_percolate/count", countExistingDocHandler); controller.registerHandler(POST, "/{index}/{type}/{id}/_percolate/count", countExistingDocHandler); } - void parseDocPercolate(PercolateRequest percolateRequest, RestRequest restRequest, RestChannel restChannel, final Client client) { + void parseDocPercolate(PercolateRequest percolateRequest, RestRequest restRequest, RestChannel restChannel) { percolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param("index"))); percolateRequest.documentType(restRequest.param("type")); percolateRequest.routing(restRequest.param("routing")); @@ -70,7 +70,7 @@ public class RestPercolateAction extends BaseRestHandler { executePercolate(percolateRequest, restChannel); } - void parseExistingDocPercolate(PercolateRequest percolateRequest, RestRequest restRequest, RestChannel restChannel, final Client client) { + void parseExistingDocPercolate(PercolateRequest percolateRequest, RestRequest restRequest, RestChannel restChannel) { String index = restRequest.param("index"); String type = restRequest.param("type"); percolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param("percolate_index", index))); @@ -99,49 +99,49 @@ public class RestPercolateAction extends BaseRestHandler { } @Override - public void handleRequest(RestRequest restRequest, RestChannel restChannel, final Client client) { + public void handleRequest(RestRequest restRequest, RestChannel restChannel, final NodeClient client) { PercolateRequest percolateRequest = new PercolateRequest(); - parseDocPercolate(percolateRequest, restRequest, restChannel, client); + parseDocPercolate(percolateRequest, restRequest, restChannel); } final class RestCountPercolateDocHandler extends BaseRestHandler { - private RestCountPercolateDocHandler(Settings settings, final RestController controller, Client client) { - super(settings, client); + private RestCountPercolateDocHandler(Settings settings, final RestController controller) { + super(settings); } @Override - public void handleRequest(RestRequest restRequest, RestChannel restChannel, final Client client) { + public void handleRequest(RestRequest restRequest, RestChannel restChannel, final NodeClient client) { PercolateRequest percolateRequest = new PercolateRequest(); percolateRequest.onlyCount(true); - parseDocPercolate(percolateRequest, restRequest, restChannel, client); + parseDocPercolate(percolateRequest, restRequest, restChannel); } } final class RestPercolateExistingDocHandler extends BaseRestHandler { - protected RestPercolateExistingDocHandler(Settings settings, final RestController controller, Client client) { - super(settings, client); + protected RestPercolateExistingDocHandler(Settings settings, final RestController controller) { + super(settings); } @Override - public void handleRequest(RestRequest restRequest, RestChannel restChannel, final Client client) { + public void handleRequest(RestRequest restRequest, RestChannel restChannel, final NodeClient client) { PercolateRequest percolateRequest = new PercolateRequest(); - parseExistingDocPercolate(percolateRequest, restRequest, restChannel, client); + parseExistingDocPercolate(percolateRequest, restRequest, restChannel); } } final class RestCountPercolateExistingDocHandler extends BaseRestHandler { - protected RestCountPercolateExistingDocHandler(Settings settings, final RestController controller, Client client) { - super(settings, client); + protected RestCountPercolateExistingDocHandler(Settings settings, final RestController controller) { + super(settings); } @Override - public void handleRequest(RestRequest restRequest, RestChannel restChannel, final Client client) { + public void handleRequest(RestRequest restRequest, RestChannel restChannel, final NodeClient client) { PercolateRequest percolateRequest = new PercolateRequest(); percolateRequest.onlyCount(true); - parseExistingDocPercolate(percolateRequest, restRequest, restChannel, client); + parseExistingDocPercolate(percolateRequest, restRequest, restChannel); } } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java index 284e51e054f..048e4208fea 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java @@ -52,10 +52,10 @@ public abstract class AbstractBaseReindexRestHandler< private final ClusterService clusterService; private final TA action; - protected AbstractBaseReindexRestHandler(Settings settings, Client client, - IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, Suggesters suggesters, - ClusterService clusterService, TA action) { - super(settings, client); + protected AbstractBaseReindexRestHandler(Settings settings, IndicesQueriesRegistry indicesQueriesRegistry, + AggregatorParsers aggParsers, Suggesters suggesters, + ClusterService clusterService, TA action) { + super(settings); this.indicesQueriesRegistry = indicesQueriesRegistry; this.aggParsers = aggParsers; this.suggesters = suggesters; @@ -63,7 +63,7 @@ public abstract class AbstractBaseReindexRestHandler< this.action = action; } - protected void handleRequest(RestRequest request, RestChannel channel, + public void handleRequest(RestRequest request, RestChannel channel, boolean includeCreated, boolean includeUpdated) throws IOException { // Build the internal request diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java index 926da3befdd..9e4d8fc6d4e 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java @@ -50,10 +50,10 @@ public abstract class AbstractBulkByQueryRestHandler< Request extends AbstractBulkByScrollRequest, TA extends TransportAction> extends AbstractBaseReindexRestHandler { - protected AbstractBulkByQueryRestHandler(Settings settings, Client client, IndicesQueriesRegistry indicesQueriesRegistry, + protected AbstractBulkByQueryRestHandler(Settings settings, IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, Suggesters suggesters, ClusterService clusterService, TA action) { - super(settings, client, indicesQueriesRegistry, aggParsers, suggesters, clusterService, action); + super(settings, indicesQueriesRegistry, aggParsers, suggesters, clusterService, action); } protected void parseInternalRequest(Request internal, RestRequest restRequest, diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java index bb894584c8b..7a1f466c3c0 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -42,16 +42,16 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestDeleteByQueryAction extends AbstractBulkByQueryRestHandler { @Inject - public RestDeleteByQueryAction(Settings settings, RestController controller, Client client, + public RestDeleteByQueryAction(Settings settings, RestController controller, IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, Suggesters suggesters, ClusterService clusterService, TransportDeleteByQueryAction action) { - super(settings, client, indicesQueriesRegistry, aggParsers, suggesters, clusterService, action); + super(settings, indicesQueriesRegistry, aggParsers, suggesters, clusterService, action); controller.registerHandler(POST, "/{index}/_delete_by_query", this); controller.registerHandler(POST, "/{index}/{type}/_delete_by_query", this); } @Override - protected void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception { + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { if (false == request.hasContent()) { throw new ElasticsearchException("_delete_by_query requires a request body"); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index 22fcd390430..af0bd168ff0 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; @@ -103,15 +103,15 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler { @Inject - public RestUpdateByQueryAction(Settings settings, RestController controller, Client client, + public RestUpdateByQueryAction(Settings settings, RestController controller, IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, Suggesters suggesters, ClusterService clusterService, TransportUpdateByQueryAction action) { - super(settings, client, indicesQueriesRegistry, aggParsers, suggesters, clusterService, action); + super(settings, indicesQueriesRegistry, aggParsers, suggesters, clusterService, action); controller.registerHandler(POST, "/{index}/_update_by_query", this); controller.registerHandler(POST, "/{index}/{type}/_update_by_query", this); } @Override - protected void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception { + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { handleRequest(request, channel, false, true); } diff --git a/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExampleCatAction.java b/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExampleCatAction.java index d5e0a62ecb5..75ae79d5f7d 100644 --- a/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExampleCatAction.java +++ b/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExampleCatAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.plugin.example; import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -38,15 +39,14 @@ public class ExampleCatAction extends AbstractCatAction { private final ExamplePluginConfiguration config; @Inject - public ExampleCatAction(Settings settings, RestController controller, - Client client, ExamplePluginConfiguration config) { - super(settings, controller, client); + public ExampleCatAction(Settings settings, RestController controller, ExamplePluginConfiguration config) { + super(settings, controller); this.config = config; controller.registerHandler(GET, "/_cat/configured_example", this); } @Override - protected void doRequest(final RestRequest request, final RestChannel channel, final Client client) { + protected void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { Table table = getTableWithHeader(request); table.startRow(); table.addCell(config.getTestConfig()); From 8b43480b9464c3420ee7aee94e834024fd408293 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Thu, 30 Jun 2016 09:08:31 +0200 Subject: [PATCH 02/36] Tests: Fix vagrant tests to ignore progress bar in assertions --- .../scripts/module_and_plugin_test_cases.bash | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash index c81f84cb778..336fa3ee305 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash @@ -418,17 +418,18 @@ fi @test "[$GROUP] install jvm-example with different logging modes and check output" { local relativePath=${1:-$(readlink -m jvm-example-*.zip)} sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" > /tmp/plugin-cli-output - local loglines=$(cat /tmp/plugin-cli-output | wc -l) + # exclude progress line + local loglines=$(cat /tmp/plugin-cli-output | grep -v "^[[:cntrl:]]" | wc -l) if [ "$GROUP" == "TAR PLUGINS" ]; then # tar extraction does not create the plugins directory so the plugin tool will print an additional line that the directory will be created [ "$loglines" -eq "3" ] || { - echo "Expected 3 lines but the output was:" + echo "Expected 3 lines excluding progress bar but the output had $loglines lines and was:" cat /tmp/plugin-cli-output false } else [ "$loglines" -eq "2" ] || { - echo "Expected 2 lines but the output was:" + echo "Expected 2 lines excluding progress bar but the output had $loglines lines and was:" cat /tmp/plugin-cli-output false } @@ -437,16 +438,16 @@ fi local relativePath=${1:-$(readlink -m jvm-example-*.zip)} sudo -E -u $ESPLUGIN_COMMAND_USER ES_JAVA_OPTS="-Des.logger.level=DEBUG" "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" > /tmp/plugin-cli-output - local loglines=$(cat /tmp/plugin-cli-output | wc -l) + local loglines=$(cat /tmp/plugin-cli-output | grep -v "^[[:cntrl:]]" | wc -l) if [ "$GROUP" == "TAR PLUGINS" ]; then [ "$loglines" -gt "3" ] || { - echo "Expected more than 3 lines but the output was:" + echo "Expected more than 3 lines excluding progress bar but the output had $loglines lines and was:" cat /tmp/plugin-cli-output false } else [ "$loglines" -gt "2" ] || { - echo "Expected more than 2 lines but the output was:" + echo "Expected more than 2 lines excluding progress bar but the output had $loglines lines and was:" cat /tmp/plugin-cli-output false } From 299c6fcc63d6172c6d50684840943abeda51c06b Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 30 Jun 2016 11:05:54 +0200 Subject: [PATCH 03/36] test: use the reader from the searcher (newSearcher(...) method may change the reader) instead of the reader we create in the test Closes #19151 --- .../test/java/org/elasticsearch/common/lucene/LuceneTests.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index f4260626160..33a0b855a7a 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -357,7 +357,6 @@ public class LuceneTests extends ESTestCase { dir.close(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/19151") public void testAsSequentialAccessBits() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new KeywordAnalyzer())); @@ -378,7 +377,7 @@ public class LuceneTests extends ESTestCase { IndexSearcher searcher = newSearcher(reader); Weight termWeight = new TermQuery(new Term("foo", "bar")).createWeight(searcher, false); assertEquals(1, reader.leaves().size()); - LeafReaderContext leafReaderContext = reader.leaves().get(0); + LeafReaderContext leafReaderContext = searcher.getIndexReader().leaves().get(0); Bits bits = Lucene.asSequentialAccessBits(leafReaderContext.reader().maxDoc(), termWeight.scorer(leafReaderContext)); expectThrows(IndexOutOfBoundsException.class, () -> bits.get(-1)); From 66e3b15d21e183d652cce79c38301feedf7eb0a3 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Jun 2016 11:12:20 +0200 Subject: [PATCH 04/36] Fix NPE when GCE region is empty When GCE region is empty we get back from the API something like: ``` { "id": "dummy" } ``` instead of: ``` { "id": "dummy", "items":[ ] } ``` This generates a NPE when we aggregate all the lists into a single one. Closes #16967. --- .../cloud/gce/GceComputeServiceImpl.java | 2 +- .../discovery/gce/GceDiscoveryTests.java | 13 +++++++ .../zones/europe-west1-b/instances | 36 +++++++++++++++++++ .../zones/us-central1-a/instances | 3 ++ 4 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/emptyregion16967/zones/europe-west1-b/instances create mode 100644 plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/emptyregion16967/zones/us-central1-a/instances diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java index 85e0910736f..b27c9e409fe 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java @@ -97,7 +97,7 @@ public class GceComputeServiceImpl extends AbstractLifecycleComponentemptyList() : instanceList.getItems(); + return instanceList.isEmpty() || instanceList.getItems() == null ? Collections.emptyList() : instanceList.getItems(); } catch (PrivilegedActionException e) { logger.warn("Problem fetching instance list for zone {}", e, zoneId); logger.debug("Full exception:", e); diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java index 07b2ef774b4..a256b660d5e 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java @@ -254,4 +254,17 @@ public class GceDiscoveryTests extends ESTestCase { assertThat(expected.getMessage(), containsString("one or more gce discovery settings are missing.")); } } + + /** + * For issue https://github.com/elastic/elasticsearch/issues/16967 + */ + public void testEmptyRegion16967() { + Settings nodeSettings = Settings.builder() + .put(GceComputeService.PROJECT_SETTING.getKey(), projectName) + .putArray(GceComputeService.ZONE_SETTING.getKey(), "europe-west1-b", "us-central1-a") + .build(); + mock = new GceComputeServiceMock(nodeSettings, networkService); + List discoveryNodes = buildDynamicNodes(mock, nodeSettings); + assertThat(discoveryNodes, hasSize(1)); + } } diff --git a/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/emptyregion16967/zones/europe-west1-b/instances b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/emptyregion16967/zones/europe-west1-b/instances new file mode 100644 index 00000000000..049e0e1e1b1 --- /dev/null +++ b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/emptyregion16967/zones/europe-west1-b/instances @@ -0,0 +1,36 @@ +{ + "id": "dummy", + "items":[ + { + "description": "ES Node 1", + "id": "9309873766428965105", + "kind": "compute#instance", + "machineType": "n1-standard-1", + "name": "test1", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "kind": "compute#accessConfig", + "name": "External NAT", + "natIP": "104.155.13.147", + "type": "ONE_TO_ONE_NAT" + } + ], + "name": "nic0", + "network": "default", + "networkIP": "10.240.79.59" + } + ], + "status": "RUNNING", + "tags": { + "fingerprint": "xA6QJb-rGtg=", + "items": [ + "elasticsearch", + "dev" + ] + }, + "zone": "europe-west1-b" + } + ] +} diff --git a/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/emptyregion16967/zones/us-central1-a/instances b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/emptyregion16967/zones/us-central1-a/instances new file mode 100644 index 00000000000..989b7507fe8 --- /dev/null +++ b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/emptyregion16967/zones/us-central1-a/instances @@ -0,0 +1,3 @@ +{ + "id": "dummy" +} From f9d22b3598d61ddf123e185cc384c5540cd8a213 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Jun 2016 11:32:39 +0200 Subject: [PATCH 05/36] Add more javadoc and rename test --- .../org/elasticsearch/discovery/gce/GceDiscoveryTests.java | 6 ++++-- .../zones/europe-west1-b/instances | 0 .../zones/us-central1-a/instances | 0 3 files changed, 4 insertions(+), 2 deletions(-) rename plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/{emptyregion16967 => noregionreturnsemptylist}/zones/europe-west1-b/instances (100%) rename plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/{emptyregion16967 => noregionreturnsemptylist}/zones/us-central1-a/instances (100%) diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java index a256b660d5e..92eb12a99b2 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java @@ -256,9 +256,11 @@ public class GceDiscoveryTests extends ESTestCase { } /** - * For issue https://github.com/elastic/elasticsearch/issues/16967 + * For issue https://github.com/elastic/elasticsearch/issues/16967: + * When using multiple regions and one of them has no instance at all, this + * was producing a NPE as a result. */ - public void testEmptyRegion16967() { + public void testNoRegionReturnsEmptyList() { Settings nodeSettings = Settings.builder() .put(GceComputeService.PROJECT_SETTING.getKey(), projectName) .putArray(GceComputeService.ZONE_SETTING.getKey(), "europe-west1-b", "us-central1-a") diff --git a/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/emptyregion16967/zones/europe-west1-b/instances b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/noregionreturnsemptylist/zones/europe-west1-b/instances similarity index 100% rename from plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/emptyregion16967/zones/europe-west1-b/instances rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/noregionreturnsemptylist/zones/europe-west1-b/instances diff --git a/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/emptyregion16967/zones/us-central1-a/instances b/plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/noregionreturnsemptylist/zones/us-central1-a/instances similarity index 100% rename from plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/emptyregion16967/zones/us-central1-a/instances rename to plugins/discovery-gce/src/test/resources/org/elasticsearch/discovery/gce/compute/v1/projects/noregionreturnsemptylist/zones/us-central1-a/instances From e4f265eb3a9cc3a1f160ef06670c4f3bc63a4bc2 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 30 Jun 2016 02:28:26 -0700 Subject: [PATCH 06/36] Ingest: Remove generics from Processor.Factory The factory for ingest processor is generic, but that is only for the return type of the create mehtod. However, the actual consumer of the factories only cares about Processor, so generics are not needed. This change removes the generic type from the factory. It also removes AbstractProcessorFactory which only existed in order pull the optional tag from config. This functionality is moved to the caller of the factories in ConfigurationUtil, and the create method now takes the tag. This allows the covariant return of the implementation to work with tests not needing casts. --- .../ingest/SimulateProcessorResult.java | 4 +- .../ingest/AbstractProcessorFactory.java | 38 ------------------- .../ingest/ConfigurationUtils.java | 6 ++- .../org/elasticsearch/ingest/Processor.java | 7 +++- .../ingest/ProcessorsRegistry.java | 10 ++--- .../org/elasticsearch/node/NodeModule.java | 2 +- .../ingest/ConfigurationUtilsTests.java | 2 +- .../ingest/PipelineFactoryTests.java | 2 +- .../ingest/PipelineStoreTests.java | 4 +- .../common/AbstractStringProcessor.java | 8 ++-- .../ingest/common/AppendProcessor.java | 7 ++-- .../ingest/common/ConvertProcessor.java | 6 +-- .../ingest/common/DateIndexNameProcessor.java | 6 +-- .../ingest/common/DateProcessor.java | 6 +-- .../ingest/common/FailProcessor.java | 6 +-- .../ingest/common/ForEachProcessor.java | 5 +-- .../ingest/common/GrokProcessor.java | 6 +-- .../ingest/common/GsubProcessor.java | 6 +-- .../ingest/common/JoinProcessor.java | 6 +-- .../ingest/common/LowercaseProcessor.java | 2 +- .../ingest/common/RemoveProcessor.java | 6 +-- .../ingest/common/RenameProcessor.java | 6 +-- .../ingest/common/ScriptProcessor.java | 6 +-- .../ingest/common/SetProcessor.java | 6 +-- .../ingest/common/SortProcessor.java | 6 +-- .../ingest/common/SplitProcessor.java | 6 +-- .../ingest/common/TrimProcessor.java | 2 +- .../ingest/common/UppercaseProcessor.java | 2 +- .../common/AppendProcessorFactoryTests.java | 10 ++--- .../common/ConvertProcessorFactoryTests.java | 13 +++---- .../common/DateIndexNameFactoryTests.java | 14 +++---- .../common/DateProcessorFactoryTests.java | 22 +++++------ .../common/FailProcessorFactoryTests.java | 6 +-- .../common/ForEachProcessorFactoryTests.java | 8 ++-- .../common/GrokProcessorFactoryTests.java | 16 ++++---- .../common/GsubProcessorFactoryTests.java | 12 +++--- .../common/JoinProcessorFactoryTests.java | 8 ++-- .../LowercaseProcessorFactoryTests.java | 6 +-- .../common/RemoveProcessorFactoryTests.java | 6 +-- .../common/RenameProcessorFactoryTests.java | 8 ++-- .../common/ScriptProcessorFactoryTests.java | 4 +- .../common/SetProcessorFactoryTests.java | 13 +++---- .../common/SplitProcessorFactoryTests.java | 8 ++-- .../ingest/common/SplitProcessorTests.java | 2 +- .../common/TrimProcessorFactoryTests.java | 6 +-- .../UppercaseProcessorFactoryTests.java | 6 +-- .../attachment/AttachmentProcessor.java | 6 +-- .../AttachmentProcessorFactoryTests.java | 16 ++++---- .../ingest/geoip/GeoIpProcessor.java | 6 +-- .../geoip/GeoIpProcessorFactoryTests.java | 23 +++++------ .../ingest/IngestTestPlugin.java | 2 +- .../elasticsearch/ingest/TestProcessor.java | 4 +- 52 files changed, 167 insertions(+), 237 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/ingest/AbstractProcessorFactory.java diff --git a/core/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java b/core/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java index 6924550d79b..ce98767bcc8 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.ingest.AbstractProcessorFactory; +import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; import java.io.IOException; @@ -91,7 +91,7 @@ public class SimulateProcessorResult implements Writeable, ToXContent { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); if (processorTag != null) { - builder.field(AbstractProcessorFactory.TAG_KEY, processorTag); + builder.field(ConfigurationUtils.TAG_KEY, processorTag); } if (failure == null) { ingestDocument.toXContent(builder, params); diff --git a/core/src/main/java/org/elasticsearch/ingest/AbstractProcessorFactory.java b/core/src/main/java/org/elasticsearch/ingest/AbstractProcessorFactory.java deleted file mode 100644 index 0fb50228793..00000000000 --- a/core/src/main/java/org/elasticsearch/ingest/AbstractProcessorFactory.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest; - -import java.util.Map; - -/** - * A processor implementation may modify the data belonging to a document. - * Whether changes are made and what exactly is modified is up to the implementation. - */ -public abstract class AbstractProcessorFactory

implements Processor.Factory

{ - public static final String TAG_KEY = "tag"; - - @Override - public P create(Map config) throws Exception { - String tag = ConfigurationUtils.readOptionalStringProperty(null, null, config, TAG_KEY); - return doCreate(tag, config); - } - - protected abstract P doCreate(String tag, Map config) throws Exception; -} diff --git a/core/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java b/core/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java index 623c4f315a4..54fbc4a0237 100644 --- a/core/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java +++ b/core/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java @@ -29,6 +29,8 @@ import java.util.Map; public final class ConfigurationUtils { + public static final String TAG_KEY = "tag"; + private ConfigurationUtils() { } @@ -255,8 +257,8 @@ public final class ConfigurationUtils { ConfigurationUtils.readOptionalList(null, null, config, Pipeline.ON_FAILURE_KEY); List onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry); - Processor processor; - processor = factory.create(config); + String tag = ConfigurationUtils.readOptionalStringProperty(null, null, config, TAG_KEY); + Processor processor = factory.create(tag, config); if (onFailureProcessorConfigs != null && onFailureProcessors.isEmpty()) { throw newConfigurationException(processor.getType(), processor.getTag(), Pipeline.ON_FAILURE_KEY, diff --git a/core/src/main/java/org/elasticsearch/ingest/Processor.java b/core/src/main/java/org/elasticsearch/ingest/Processor.java index 92c18464e94..d4a5462c1c9 100644 --- a/core/src/main/java/org/elasticsearch/ingest/Processor.java +++ b/core/src/main/java/org/elasticsearch/ingest/Processor.java @@ -45,14 +45,17 @@ public interface Processor { /** * A factory that knows how to construct a processor based on a map of maps. */ - interface Factory

{ + interface Factory { /** * Creates a processor based on the specified map of maps config. * + * @param tag The tag for the processor + * @param config Configuration for the processor to create + * * Implementations are responsible for removing the used keys, so that after creating a pipeline ingest can * verify if all configurations settings have been used. */ - P create(Map config) throws Exception; + Processor create(String tag, Map config) throws Exception; } } diff --git a/core/src/main/java/org/elasticsearch/ingest/ProcessorsRegistry.java b/core/src/main/java/org/elasticsearch/ingest/ProcessorsRegistry.java index dd16898aea2..461247b6d09 100644 --- a/core/src/main/java/org/elasticsearch/ingest/ProcessorsRegistry.java +++ b/core/src/main/java/org/elasticsearch/ingest/ProcessorsRegistry.java @@ -37,12 +37,12 @@ public final class ProcessorsRegistry { private final ClusterService clusterService; private ProcessorsRegistry(ScriptService scriptService, ClusterService clusterService, - Map>> providers) { + Map> providers) { this.templateService = new InternalTemplateService(scriptService); this.scriptService = scriptService; this.clusterService = clusterService; Map processorFactories = new HashMap<>(); - for (Map.Entry>> entry : providers.entrySet()) { + for (Map.Entry> entry : providers.entrySet()) { processorFactories.put(entry.getKey(), entry.getValue().apply(this)); } this.processorFactories = Collections.unmodifiableMap(processorFactories); @@ -71,13 +71,13 @@ public final class ProcessorsRegistry { public static final class Builder { - private final Map>> providers = new HashMap<>(); + private final Map> providers = new HashMap<>(); /** * Adds a processor factory under a specific name. */ - public void registerProcessor(String name, Function> provider) { - Function> previous = this.providers.putIfAbsent(name, provider); + public void registerProcessor(String name, Function provider) { + Function previous = this.providers.putIfAbsent(name, provider); if (previous != null) { throw new IllegalArgumentException("Processor factory already registered for name [" + name + "]"); } diff --git a/core/src/main/java/org/elasticsearch/node/NodeModule.java b/core/src/main/java/org/elasticsearch/node/NodeModule.java index 6d8be732dd2..49de57a87b6 100644 --- a/core/src/main/java/org/elasticsearch/node/NodeModule.java +++ b/core/src/main/java/org/elasticsearch/node/NodeModule.java @@ -62,7 +62,7 @@ public class NodeModule extends AbstractModule { /** * Adds a processor factory under a specific type name. */ - public void registerProcessor(String type, Function> provider) { + public void registerProcessor(String type, Function provider) { processorsRegistryBuilder.registerProcessor(type, provider); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java b/core/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java index fa78d5aa16c..82886628c85 100644 --- a/core/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java @@ -98,7 +98,7 @@ public class ConfigurationUtilsTests extends ESTestCase { public void testReadProcessors() throws Exception { Processor processor = mock(Processor.class); ProcessorsRegistry.Builder builder = new ProcessorsRegistry.Builder(); - builder.registerProcessor("test_processor", (registry) -> config -> processor); + builder.registerProcessor("test_processor", (registry) -> (tag, config) -> processor); ProcessorsRegistry registry = builder.build(mock(ScriptService.class), mock(ClusterService.class)); diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java index cb7bd849a47..8fe7ddd84cb 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java @@ -40,7 +40,7 @@ public class PipelineFactoryTests extends ESTestCase { public void testCreate() throws Exception { Map processorConfig0 = new HashMap<>(); Map processorConfig1 = new HashMap<>(); - processorConfig0.put(AbstractProcessorFactory.TAG_KEY, "first-processor"); + processorConfig0.put(ConfigurationUtils.TAG_KEY, "first-processor"); Map pipelineConfig = new HashMap<>(); pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.PROCESSORS_KEY, diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java index 55ea4360ece..ec41eda47b0 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java @@ -58,7 +58,7 @@ public class PipelineStoreTests extends ESTestCase { public void init() throws Exception { store = new PipelineStore(Settings.EMPTY); ProcessorsRegistry.Builder registryBuilder = new ProcessorsRegistry.Builder(); - registryBuilder.registerProcessor("set", (registry) -> config -> { + registryBuilder.registerProcessor("set", (registry) -> (tag, config) -> { String field = (String) config.remove("field"); String value = (String) config.remove("value"); return new Processor() { @@ -78,7 +78,7 @@ public class PipelineStoreTests extends ESTestCase { } }; }); - registryBuilder.registerProcessor("remove", (registry) -> config -> { + registryBuilder.registerProcessor("remove", (registry) -> (tag, config) -> { String field = (String) config.remove("field"); return new Processor() { @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java index d9ffd34cac1..f35f3fec534 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java @@ -20,9 +20,9 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import java.util.Map; @@ -53,7 +53,7 @@ abstract class AbstractStringProcessor extends AbstractProcessor { protected abstract String process(String value); - static abstract class Factory extends AbstractProcessorFactory { + static abstract class Factory implements Processor.Factory { protected final String processorType; protected Factory(String processorType) { @@ -61,11 +61,11 @@ abstract class AbstractStringProcessor extends AbstractProcessor { } @Override - public T doCreate(String processorTag, Map config) throws Exception { + public AbstractStringProcessor create(String processorTag, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(processorType, processorTag, config, "field"); return newProcessor(processorTag, field); } - protected abstract T newProcessor(String processorTag, String field); + protected abstract AbstractStringProcessor newProcessor(String processorTag, String field); } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java index af163c3c187..26b0e66a63c 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java @@ -20,9 +20,10 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; +import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.TemplateService; import org.elasticsearch.ingest.ValueSource; @@ -64,7 +65,7 @@ public final class AppendProcessor extends AbstractProcessor { return TYPE; } - public static final class Factory extends AbstractProcessorFactory { + public static final class Factory implements Processor.Factory { private final TemplateService templateService; @@ -73,7 +74,7 @@ public final class AppendProcessor extends AbstractProcessor { } @Override - public AppendProcessor doCreate(String processorTag, Map config) throws Exception { + public AppendProcessor create(String processorTag, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); Object value = ConfigurationUtils.readObject(TYPE, processorTag, config, "value"); return new AppendProcessor(processorTag, templateService.compile(field), ValueSource.wrap(value, templateService)); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java index 015c56c72c3..558bdde96a2 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java @@ -20,9 +20,9 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import java.util.ArrayList; import java.util.List; @@ -160,9 +160,9 @@ public final class ConvertProcessor extends AbstractProcessor { return TYPE; } - public static final class Factory extends AbstractProcessorFactory { + public static final class Factory implements Processor.Factory { @Override - public ConvertProcessor doCreate(String processorTag, Map config) throws Exception { + public ConvertProcessor create(String processorTag, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); String typeProperty = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "type"); String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", field); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java index a94d4d048a8..c750c84c576 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java @@ -21,9 +21,9 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; @@ -120,10 +120,10 @@ public final class DateIndexNameProcessor extends AbstractProcessor { return dateFormats; } - public static final class Factory extends AbstractProcessorFactory { + public static final class Factory implements Processor.Factory { @Override - protected DateIndexNameProcessor doCreate(String tag, Map config) throws Exception { + public DateIndexNameProcessor create(String tag, Map config) throws Exception { String localeString = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, "locale"); String timezoneString = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, "timezone"); DateTimeZone timezone = timezoneString == null ? DateTimeZone.UTC : DateTimeZone.forID(timezoneString); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java index b82b9c8b76c..e61ed513114 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java @@ -21,9 +21,9 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.ISODateTimeFormat; @@ -108,10 +108,10 @@ public final class DateProcessor extends AbstractProcessor { return formats; } - public static final class Factory extends AbstractProcessorFactory { + public static final class Factory implements Processor.Factory { @SuppressWarnings("unchecked") - public DateProcessor doCreate(String processorTag, Map config) throws Exception { + public DateProcessor create(String processorTag, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", DEFAULT_TARGET_FIELD); String timezoneString = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "timezone"); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/FailProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/FailProcessor.java index 6c434d85d5a..a24322f556f 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/FailProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/FailProcessor.java @@ -20,9 +20,9 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.TemplateService; import java.util.Map; @@ -56,7 +56,7 @@ public final class FailProcessor extends AbstractProcessor { return TYPE; } - public static final class Factory extends AbstractProcessorFactory { + public static final class Factory implements Processor.Factory { private final TemplateService templateService; @@ -65,7 +65,7 @@ public final class FailProcessor extends AbstractProcessor { } @Override - public FailProcessor doCreate(String processorTag, Map config) throws Exception { + public FailProcessor create(String processorTag, Map config) throws Exception { String message = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "message"); return new FailProcessor(processorTag, templateService.compile(message)); } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java index b6d14d1b8c5..05be47633ba 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; @@ -83,7 +82,7 @@ public final class ForEachProcessor extends AbstractProcessor { return processors; } - public static final class Factory extends AbstractProcessorFactory { + public static final class Factory implements Processor.Factory { private final ProcessorsRegistry processorRegistry; @@ -92,7 +91,7 @@ public final class ForEachProcessor extends AbstractProcessor { } @Override - protected ForEachProcessor doCreate(String tag, Map config) throws Exception { + public ForEachProcessor create(String tag, Map config) throws Exception { String field = readStringProperty(TYPE, tag, config, "field"); List>> processorConfigs = readList(TYPE, tag, config, "processors"); List processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, processorRegistry); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java index aa9fbb905cc..32edc665d58 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java @@ -20,9 +20,9 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import java.util.HashMap; import java.util.List; @@ -114,7 +114,7 @@ public final class GrokProcessor extends AbstractProcessor { return combinedPattern; } - public final static class Factory extends AbstractProcessorFactory { + public final static class Factory implements Processor.Factory { private final Map builtinPatterns; @@ -123,7 +123,7 @@ public final class GrokProcessor extends AbstractProcessor { } @Override - public GrokProcessor doCreate(String processorTag, Map config) throws Exception { + public GrokProcessor create(String processorTag, Map config) throws Exception { String matchField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); List matchPatterns = ConfigurationUtils.readList(TYPE, processorTag, config, "patterns"); boolean traceMatch = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "trace_match", false); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GsubProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GsubProcessor.java index 72bc9e76710..1dfc566670f 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GsubProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GsubProcessor.java @@ -20,8 +20,8 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import java.util.Map; import java.util.regex.Matcher; @@ -78,9 +78,9 @@ public final class GsubProcessor extends AbstractProcessor { return TYPE; } - public static final class Factory extends AbstractProcessorFactory { + public static final class Factory implements Processor.Factory { @Override - public GsubProcessor doCreate(String processorTag, Map config) throws Exception { + public GsubProcessor create(String processorTag, Map config) throws Exception { String field = readStringProperty(TYPE, processorTag, config, "field"); String pattern = readStringProperty(TYPE, processorTag, config, "pattern"); String replacement = readStringProperty(TYPE, processorTag, config, "replacement"); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JoinProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JoinProcessor.java index 8114d20f28f..f8bc2afffdd 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JoinProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JoinProcessor.java @@ -20,9 +20,9 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import java.util.List; import java.util.Map; @@ -70,9 +70,9 @@ public final class JoinProcessor extends AbstractProcessor { return TYPE; } - public final static class Factory extends AbstractProcessorFactory { + public final static class Factory implements Processor.Factory { @Override - public JoinProcessor doCreate(String processorTag, Map config) throws Exception { + public JoinProcessor create(String processorTag, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); String separator = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "separator"); return new JoinProcessor(processorTag, field, separator); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java index 9f8ea7a5614..e7a8f3f3e6a 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java @@ -44,7 +44,7 @@ public final class LowercaseProcessor extends AbstractStringProcessor { return TYPE; } - public final static class Factory extends AbstractStringProcessor.Factory { + public final static class Factory extends AbstractStringProcessor.Factory { public Factory() { super(TYPE); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java index 98c4e18a408..a7de33a7e2c 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java @@ -20,9 +20,9 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.TemplateService; import java.util.Map; @@ -55,7 +55,7 @@ public final class RemoveProcessor extends AbstractProcessor { return TYPE; } - public static final class Factory extends AbstractProcessorFactory { + public static final class Factory implements Processor.Factory { private final TemplateService templateService; @@ -64,7 +64,7 @@ public final class RemoveProcessor extends AbstractProcessor { } @Override - public RemoveProcessor doCreate(String processorTag, Map config) throws Exception { + public RemoveProcessor create(String processorTag, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); return new RemoveProcessor(processorTag, templateService.compile(field)); } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java index 9143321c4aa..ae81291b644 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java @@ -20,9 +20,9 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import java.util.Map; @@ -75,9 +75,9 @@ public final class RenameProcessor extends AbstractProcessor { return TYPE; } - public static final class Factory extends AbstractProcessorFactory { + public static final class Factory implements Processor.Factory { @Override - public RenameProcessor doCreate(String processorTag, Map config) throws Exception { + public RenameProcessor create(String processorTag, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field"); return new RenameProcessor(processorTag, field, targetField); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java index e4881366165..4733c1f5866 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java @@ -24,8 +24,8 @@ import java.util.Map; import org.elasticsearch.common.Strings; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; @@ -77,7 +77,7 @@ public final class ScriptProcessor extends AbstractProcessor { return TYPE; } - public static final class Factory extends AbstractProcessorFactory { + public static final class Factory implements Processor.Factory { private final ScriptService scriptService; @@ -86,7 +86,7 @@ public final class ScriptProcessor extends AbstractProcessor { } @Override - public ScriptProcessor doCreate(String processorTag, Map config) throws Exception { + public ScriptProcessor create(String processorTag, Map config) throws Exception { String field = readOptionalStringProperty(TYPE, processorTag, config, "field"); String lang = readStringProperty(TYPE, processorTag, config, "lang"); String inline = readOptionalStringProperty(TYPE, processorTag, config, "inline"); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java index a78701645a9..ce328e34cb7 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java @@ -20,9 +20,9 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.TemplateService; import org.elasticsearch.ingest.ValueSource; @@ -75,7 +75,7 @@ public final class SetProcessor extends AbstractProcessor { return TYPE; } - public static final class Factory extends AbstractProcessorFactory { + public static final class Factory implements Processor.Factory { private final TemplateService templateService; @@ -84,7 +84,7 @@ public final class SetProcessor extends AbstractProcessor { } @Override - public SetProcessor doCreate(String processorTag, Map config) throws Exception { + public SetProcessor create(String processorTag, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); Object value = ConfigurationUtils.readObject(TYPE, processorTag, config, "value"); boolean overrideEnabled = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "override", true); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SortProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SortProcessor.java index 706a1cef9c5..5ab4016921d 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SortProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SortProcessor.java @@ -20,9 +20,9 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import java.util.Collections; import java.util.List; @@ -111,10 +111,10 @@ public final class SortProcessor extends AbstractProcessor { return TYPE; } - public final static class Factory extends AbstractProcessorFactory { + public final static class Factory implements Processor.Factory { @Override - public SortProcessor doCreate(String processorTag, Map config) throws Exception { + public SortProcessor create(String processorTag, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, FIELD); try { SortOrder direction = SortOrder.fromString( diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SplitProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SplitProcessor.java index f7c5e8befc4..87526f62297 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SplitProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SplitProcessor.java @@ -20,9 +20,9 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import java.util.ArrayList; import java.util.Collections; @@ -72,9 +72,9 @@ public final class SplitProcessor extends AbstractProcessor { return TYPE; } - public static class Factory extends AbstractProcessorFactory { + public static class Factory implements Processor.Factory { @Override - public SplitProcessor doCreate(String processorTag, Map config) throws Exception { + public SplitProcessor create(String processorTag, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); return new SplitProcessor(processorTag, field, ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "separator")); } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java index a57a25125d6..e852f887da0 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java @@ -41,7 +41,7 @@ public final class TrimProcessor extends AbstractStringProcessor { return TYPE; } - public static final class Factory extends AbstractStringProcessor.Factory { + public static final class Factory extends AbstractStringProcessor.Factory { public Factory() { super(TYPE); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java index a5c817352a1..5585a130eaf 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java @@ -43,7 +43,7 @@ public final class UppercaseProcessor extends AbstractStringProcessor { return TYPE; } - public static final class Factory extends AbstractStringProcessor.Factory { + public static final class Factory extends AbstractStringProcessor.Factory { public Factory() { super(TYPE); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorFactoryTests.java index fbf77cc4285..b49a44cc04c 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorFactoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -52,8 +51,7 @@ public class AppendProcessorFactoryTests extends ESTestCase { } config.put("value", value); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - AppendProcessor appendProcessor = factory.create(config); + AppendProcessor appendProcessor = factory.create(processorTag, config); assertThat(appendProcessor.getTag(), equalTo(processorTag)); assertThat(appendProcessor.getField().execute(Collections.emptyMap()), equalTo("field1")); assertThat(appendProcessor.getValue().copyAndResolve(Collections.emptyMap()), equalTo(value)); @@ -63,7 +61,7 @@ public class AppendProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("value", "value1"); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); @@ -74,7 +72,7 @@ public class AppendProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "field1"); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[value] required property is missing")); @@ -86,7 +84,7 @@ public class AppendProcessorFactoryTests extends ESTestCase { config.put("field", "field1"); config.put("value", null); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[value] required property is missing")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorFactoryTests.java index 1ec5362af14..7dd8bbf97e2 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorFactoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -39,8 +38,7 @@ public class ConvertProcessorFactoryTests extends ESTestCase { config.put("field", "field1"); config.put("type", type.toString()); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - ConvertProcessor convertProcessor = factory.create(config); + ConvertProcessor convertProcessor = factory.create(processorTag, config); assertThat(convertProcessor.getTag(), equalTo(processorTag)); assertThat(convertProcessor.getField(), equalTo("field1")); assertThat(convertProcessor.getTargetField(), equalTo("field1")); @@ -54,7 +52,7 @@ public class ConvertProcessorFactoryTests extends ESTestCase { config.put("field", "field1"); config.put("type", type); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), Matchers.equalTo("[type] type [" + type + "] not supported, cannot convert field.")); @@ -70,7 +68,7 @@ public class ConvertProcessorFactoryTests extends ESTestCase { String type = "type-" + randomAsciiOfLengthBetween(1, 10); config.put("type", type); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), Matchers.equalTo("[field] required property is missing")); @@ -82,7 +80,7 @@ public class ConvertProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "field1"); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), Matchers.equalTo("[type] required property is missing")); @@ -97,8 +95,7 @@ public class ConvertProcessorFactoryTests extends ESTestCase { config.put("target_field", "field2"); config.put("type", type.toString()); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - ConvertProcessor convertProcessor = factory.create(config); + ConvertProcessor convertProcessor = factory.create(processorTag, config); assertThat(convertProcessor.getTag(), equalTo(processorTag)); assertThat(convertProcessor.getField(), equalTo("field1")); assertThat(convertProcessor.getTargetField(), equalTo("field2")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java index 42877236b88..cc272d0b120 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java @@ -36,7 +36,7 @@ public class DateIndexNameFactoryTests extends ESTestCase { config.put("field", "_field"); config.put("date_rounding", "y"); - DateIndexNameProcessor processor = factory.create(config); + DateIndexNameProcessor processor = factory.create(null, config); assertThat(processor.getDateFormats().size(), Matchers.equalTo(1)); assertThat(processor.getField(), Matchers.equalTo("_field")); assertThat(processor.getIndexNamePrefix(), Matchers.equalTo("")); @@ -53,7 +53,7 @@ public class DateIndexNameFactoryTests extends ESTestCase { config.put("date_rounding", "y"); config.put("date_formats", Arrays.asList("UNIX", "UNIX_MS")); - DateIndexNameProcessor processor = factory.create(config); + DateIndexNameProcessor processor = factory.create(null, config); assertThat(processor.getDateFormats().size(), Matchers.equalTo(2)); config = new HashMap<>(); @@ -62,7 +62,7 @@ public class DateIndexNameFactoryTests extends ESTestCase { config.put("date_rounding", "y"); config.put("index_name_format", "yyyyMMdd"); - processor = factory.create(config); + processor = factory.create(null, config); assertThat(processor.getIndexNameFormat(), Matchers.equalTo("yyyyMMdd")); config = new HashMap<>(); @@ -71,7 +71,7 @@ public class DateIndexNameFactoryTests extends ESTestCase { config.put("date_rounding", "y"); config.put("timezone", "+02:00"); - processor = factory.create(config); + processor = factory.create(null, config); assertThat(processor.getTimezone(), Matchers.equalTo(DateTimeZone.forOffsetHours(2))); config = new HashMap<>(); @@ -79,7 +79,7 @@ public class DateIndexNameFactoryTests extends ESTestCase { config.put("index_name_prefix", "_prefix"); config.put("date_rounding", "y"); - processor = factory.create(config); + processor = factory.create(null, config); assertThat(processor.getIndexNamePrefix(), Matchers.equalTo("_prefix")); } @@ -87,12 +87,12 @@ public class DateIndexNameFactoryTests extends ESTestCase { DateIndexNameProcessor.Factory factory = new DateIndexNameProcessor.Factory(); Map config = new HashMap<>(); config.put("date_rounding", "y"); - ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(config)); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, config)); assertThat(e.getMessage(), Matchers.equalTo("[field] required property is missing")); config.clear(); config.put("field", "_field"); - e = expectThrows(ElasticsearchParseException.class, () -> factory.create(config)); + e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, config)); assertThat(e.getMessage(), Matchers.equalTo("[date_rounding] required property is missing")); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java index 65dcdf6082c..9c5352c7ee4 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.test.ESTestCase; import org.joda.time.DateTimeZone; @@ -42,8 +41,7 @@ public class DateProcessorFactoryTests extends ESTestCase { config.put("field", sourceField); config.put("formats", Collections.singletonList("dd/MM/yyyyy")); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - DateProcessor processor = factory.create(config); + DateProcessor processor = factory.create(processorTag, config); assertThat(processor.getTag(), equalTo(processorTag)); assertThat(processor.getField(), equalTo(sourceField)); assertThat(processor.getTargetField(), equalTo(DateProcessor.DEFAULT_TARGET_FIELD)); @@ -60,7 +58,7 @@ public class DateProcessorFactoryTests extends ESTestCase { config.put("formats", Collections.singletonList("dd/MM/yyyyy")); try { - factory.create(config); + factory.create(null, config); fail("processor creation should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), containsString("[field] required property is missing")); @@ -76,7 +74,7 @@ public class DateProcessorFactoryTests extends ESTestCase { config.put("target_field", targetField); try { - factory.create(config); + factory.create(null, config); fail("processor creation should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), containsString("[formats] required property is missing")); @@ -92,7 +90,7 @@ public class DateProcessorFactoryTests extends ESTestCase { Locale locale = randomLocale(random()); config.put("locale", locale.toLanguageTag()); - DateProcessor processor = factory.create(config); + DateProcessor processor = factory.create(null, config); assertThat(processor.getLocale().toLanguageTag(), equalTo(locale.toLanguageTag())); } @@ -104,7 +102,7 @@ public class DateProcessorFactoryTests extends ESTestCase { config.put("formats", Collections.singletonList("dd/MM/yyyyy")); config.put("locale", "invalid_locale"); try { - factory.create(config); + factory.create(null, config); fail("should fail with invalid locale"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("Invalid language tag specified: invalid_locale")); @@ -120,7 +118,7 @@ public class DateProcessorFactoryTests extends ESTestCase { DateTimeZone timezone = randomDateTimeZone(); config.put("timezone", timezone.getID()); - DateProcessor processor = factory.create(config); + DateProcessor processor = factory.create(null, config); assertThat(processor.getTimezone(), equalTo(timezone)); } @@ -132,7 +130,7 @@ public class DateProcessorFactoryTests extends ESTestCase { config.put("match_formats", Collections.singletonList("dd/MM/yyyyy")); config.put("timezone", "invalid_timezone"); try { - factory.create(config); + factory.create(null, config); fail("invalid timezone should fail"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("The datetime zone id 'invalid_timezone' is not recognised")); @@ -146,7 +144,7 @@ public class DateProcessorFactoryTests extends ESTestCase { config.put("field", sourceField); config.put("formats", Arrays.asList("dd/MM/yyyy", "dd-MM-yyyy")); - DateProcessor processor = factory.create(config); + DateProcessor processor = factory.create(null, config); assertThat(processor.getFormats(), equalTo(Arrays.asList("dd/MM/yyyy", "dd-MM-yyyy"))); } @@ -158,7 +156,7 @@ public class DateProcessorFactoryTests extends ESTestCase { config.put("formats", "dd/MM/yyyy"); try { - factory.create(config); + factory.create(null, config); fail("processor creation should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), containsString("[formats] property isn't a list, but of type [java.lang.String]")); @@ -174,7 +172,7 @@ public class DateProcessorFactoryTests extends ESTestCase { config.put("target_field", targetField); config.put("formats", Arrays.asList("dd/MM/yyyy", "dd-MM-yyyy")); - DateProcessor processor = factory.create(config); + DateProcessor processor = factory.create(null, config); assertThat(processor.getTargetField(), equalTo(targetField)); } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/FailProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/FailProcessorFactoryTests.java index db16b78b316..a385a58ef50 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/FailProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/FailProcessorFactoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -44,8 +43,7 @@ public class FailProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("message", "error"); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - FailProcessor failProcessor = factory.create(config); + FailProcessor failProcessor = factory.create(processorTag, config); assertThat(failProcessor.getTag(), equalTo(processorTag)); assertThat(failProcessor.getMessage().execute(Collections.emptyMap()), equalTo("error")); } @@ -53,7 +51,7 @@ public class FailProcessorFactoryTests extends ESTestCase { public void testCreateMissingMessageField() throws Exception { Map config = new HashMap<>(); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[message] required property is missing")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java index d45e98ab06a..0bbad532fb3 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java @@ -38,14 +38,14 @@ public class ForEachProcessorFactoryTests extends ESTestCase { public void testCreate() throws Exception { ProcessorsRegistry.Builder builder = new ProcessorsRegistry.Builder(); Processor processor = new TestProcessor(ingestDocument -> {}); - builder.registerProcessor("_name", (registry) -> config -> processor); + builder.registerProcessor("_name", (registry) -> (tag, config) -> processor); ProcessorsRegistry registry = builder.build(mock(ScriptService.class), mock(ClusterService.class)); ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(registry); Map config = new HashMap<>(); config.put("field", "_field"); config.put("processors", Collections.singletonList(Collections.singletonMap("_name", Collections.emptyMap()))); - ForEachProcessor forEachProcessor = forEachFactory.create(config); + ForEachProcessor forEachProcessor = forEachFactory.create(null, config); assertThat(forEachProcessor, Matchers.notNullValue()); assertThat(forEachProcessor.getField(), Matchers.equalTo("_field")); assertThat(forEachProcessor.getProcessors().size(), Matchers.equalTo(1)); @@ -54,7 +54,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase { config = new HashMap<>(); config.put("processors", Collections.singletonList(Collections.singletonMap("_name", Collections.emptyMap()))); try { - forEachFactory.create(config); + forEachFactory.create(null, config); fail("exception expected"); } catch (Exception e) { assertThat(e.getMessage(), Matchers.equalTo("[field] required property is missing")); @@ -63,7 +63,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase { config = new HashMap<>(); config.put("field", "_field"); try { - forEachFactory.create(config); + forEachFactory.create(null, config); fail("exception expected"); } catch (Exception e) { assertThat(e.getMessage(), Matchers.equalTo("[processors] required property is missing")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java index a7a133b4363..1287d066420 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.test.ESTestCase; import java.util.Collections; @@ -39,8 +38,7 @@ public class GrokProcessorFactoryTests extends ESTestCase { config.put("field", "_field"); config.put("patterns", Collections.singletonList("(?\\w+)")); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - GrokProcessor processor = factory.create(config); + GrokProcessor processor = factory.create(processorTag, config); assertThat(processor.getTag(), equalTo(processorTag)); assertThat(processor.getMatchField(), equalTo("_field")); assertThat(processor.getGrok(), notNullValue()); @@ -50,7 +48,7 @@ public class GrokProcessorFactoryTests extends ESTestCase { GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap()); Map config = new HashMap<>(); config.put("patterns", Collections.singletonList("(?\\w+)")); - ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(config)); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create("tag", config)); assertThat(e.getMessage(), equalTo("[field] required property is missing")); } @@ -58,7 +56,7 @@ public class GrokProcessorFactoryTests extends ESTestCase { GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap()); Map config = new HashMap<>(); config.put("field", "foo"); - ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(config)); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create("tag", config)); assertThat(e.getMessage(), equalTo("[patterns] required property is missing")); } @@ -67,7 +65,7 @@ public class GrokProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "foo"); config.put("patterns", Collections.emptyList()); - ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(config)); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create("tag", config)); assertThat(e.getMessage(), equalTo("[patterns] List of patterns must not be empty")); } @@ -78,7 +76,7 @@ public class GrokProcessorFactoryTests extends ESTestCase { config.put("field", "_field"); config.put("patterns", Collections.singletonList("%{MY_PATTERN:name}!")); config.put("pattern_definitions", Collections.singletonMap("MY_PATTERN", "foo")); - GrokProcessor processor = factory.create(config); + GrokProcessor processor = factory.create(null, config); assertThat(processor.getMatchField(), equalTo("_field")); assertThat(processor.getGrok(), notNullValue()); assertThat(processor.getGrok().match("foo!"), equalTo(true)); @@ -89,7 +87,7 @@ public class GrokProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "_field"); config.put("patterns", Collections.singletonList("[")); - ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(config)); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create("tag", config)); assertThat(e.getMessage(), equalTo("[patterns] Invalid regex pattern found in: [[]. premature end of char-class")); } @@ -99,7 +97,7 @@ public class GrokProcessorFactoryTests extends ESTestCase { config.put("field", "_field"); config.put("patterns", Collections.singletonList("%{MY_PATTERN:name}!")); config.put("pattern_definitions", Collections.singletonMap("MY_PATTERN", "[")); - ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(config)); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create("tag", config)); assertThat(e.getMessage(), equalTo("[patterns] Invalid regex pattern found in: [%{MY_PATTERN:name}!]. premature end of char-class")); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java index 60cceb34024..0086457f857 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; @@ -38,8 +37,7 @@ public class GsubProcessorFactoryTests extends ESTestCase { config.put("pattern", "\\."); config.put("replacement", "-"); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - GsubProcessor gsubProcessor = factory.create(config); + GsubProcessor gsubProcessor = factory.create(processorTag, config); assertThat(gsubProcessor.getTag(), equalTo(processorTag)); assertThat(gsubProcessor.getField(), equalTo("field1")); assertThat(gsubProcessor.getPattern().toString(), equalTo("\\.")); @@ -52,7 +50,7 @@ public class GsubProcessorFactoryTests extends ESTestCase { config.put("pattern", "\\."); config.put("replacement", "-"); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); @@ -65,7 +63,7 @@ public class GsubProcessorFactoryTests extends ESTestCase { config.put("field", "field1"); config.put("replacement", "-"); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[pattern] required property is missing")); @@ -78,7 +76,7 @@ public class GsubProcessorFactoryTests extends ESTestCase { config.put("field", "field1"); config.put("pattern", "\\."); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[replacement] required property is missing")); @@ -92,7 +90,7 @@ public class GsubProcessorFactoryTests extends ESTestCase { config.put("pattern", "["); config.put("replacement", "-"); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), containsString("[pattern] Invalid regex pattern. Unclosed character class")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JoinProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JoinProcessorFactoryTests.java index 970fd8b8b9a..b2386c17a2b 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JoinProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JoinProcessorFactoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; @@ -36,8 +35,7 @@ public class JoinProcessorFactoryTests extends ESTestCase { config.put("field", "field1"); config.put("separator", "-"); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - JoinProcessor joinProcessor = factory.create(config); + JoinProcessor joinProcessor = factory.create(processorTag, config); assertThat(joinProcessor.getTag(), equalTo(processorTag)); assertThat(joinProcessor.getField(), equalTo("field1")); assertThat(joinProcessor.getSeparator(), equalTo("-")); @@ -48,7 +46,7 @@ public class JoinProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("separator", "-"); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); @@ -60,7 +58,7 @@ public class JoinProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "field1"); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[separator] required property is missing")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorFactoryTests.java index 4dec115458c..2e0682beb6e 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorFactoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; @@ -35,8 +34,7 @@ public class LowercaseProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "field1"); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - LowercaseProcessor uppercaseProcessor = factory.create(config); + LowercaseProcessor uppercaseProcessor = (LowercaseProcessor)factory.create(processorTag, config); assertThat(uppercaseProcessor.getTag(), equalTo(processorTag)); assertThat(uppercaseProcessor.getField(), equalTo("field1")); } @@ -45,7 +43,7 @@ public class LowercaseProcessorFactoryTests extends ESTestCase { LowercaseProcessor.Factory factory = new LowercaseProcessor.Factory(); Map config = new HashMap<>(); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RemoveProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RemoveProcessorFactoryTests.java index a5f88103e96..133ddbeebfa 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RemoveProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RemoveProcessorFactoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -44,8 +43,7 @@ public class RemoveProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "field1"); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - RemoveProcessor removeProcessor = factory.create(config); + RemoveProcessor removeProcessor = factory.create(processorTag, config); assertThat(removeProcessor.getTag(), equalTo(processorTag)); assertThat(removeProcessor.getField().execute(Collections.emptyMap()), equalTo("field1")); } @@ -53,7 +51,7 @@ public class RemoveProcessorFactoryTests extends ESTestCase { public void testCreateMissingField() throws Exception { Map config = new HashMap<>(); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorFactoryTests.java index c078f09dd92..b969a3b6247 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorFactoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; @@ -36,8 +35,7 @@ public class RenameProcessorFactoryTests extends ESTestCase { config.put("field", "old_field"); config.put("target_field", "new_field"); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - RenameProcessor renameProcessor = factory.create(config); + RenameProcessor renameProcessor = factory.create(processorTag, config); assertThat(renameProcessor.getTag(), equalTo(processorTag)); assertThat(renameProcessor.getField(), equalTo("old_field")); assertThat(renameProcessor.getTargetField(), equalTo("new_field")); @@ -48,7 +46,7 @@ public class RenameProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("target_field", "new_field"); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); @@ -60,7 +58,7 @@ public class RenameProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "old_field"); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[target_field] required property is missing")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java index ed47894d4d9..79048cd7769 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java @@ -55,7 +55,7 @@ public class ScriptProcessorFactoryTests extends ESTestCase { configMap.put("lang", "mockscript"); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> factory.doCreate(randomAsciiOfLength(10), configMap)); + () -> factory.create(randomAsciiOfLength(10), configMap)); assertThat(exception.getMessage(), is("[null] Only one of [file], [id], or [inline] may be configured")); } @@ -66,7 +66,7 @@ public class ScriptProcessorFactoryTests extends ESTestCase { configMap.put("lang", "mockscript"); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> factory.doCreate(randomAsciiOfLength(10), configMap)); + () -> factory.create(randomAsciiOfLength(10), configMap)); assertThat(exception.getMessage(), is("[null] Need [file], [id], or [inline] parameter to refer to scripts")); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java index b8c97a379cb..cbbf3f40902 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -45,8 +44,7 @@ public class SetProcessorFactoryTests extends ESTestCase { config.put("field", "field1"); config.put("value", "value1"); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - SetProcessor setProcessor = factory.create(config); + SetProcessor setProcessor = factory.create(processorTag, config); assertThat(setProcessor.getTag(), equalTo(processorTag)); assertThat(setProcessor.getField().execute(Collections.emptyMap()), equalTo("field1")); assertThat(setProcessor.getValue().copyAndResolve(Collections.emptyMap()), equalTo("value1")); @@ -60,8 +58,7 @@ public class SetProcessorFactoryTests extends ESTestCase { config.put("value", "value1"); config.put("override", overrideEnabled); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - SetProcessor setProcessor = factory.create(config); + SetProcessor setProcessor = factory.create(processorTag, config); assertThat(setProcessor.getTag(), equalTo(processorTag)); assertThat(setProcessor.getField().execute(Collections.emptyMap()), equalTo("field1")); assertThat(setProcessor.getValue().copyAndResolve(Collections.emptyMap()), equalTo("value1")); @@ -72,7 +69,7 @@ public class SetProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("value", "value1"); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); @@ -83,7 +80,7 @@ public class SetProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "field1"); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[value] required property is missing")); @@ -95,7 +92,7 @@ public class SetProcessorFactoryTests extends ESTestCase { config.put("field", "field1"); config.put("value", null); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[value] required property is missing")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorFactoryTests.java index c747807b710..4f85d61e629 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorFactoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; @@ -36,8 +35,7 @@ public class SplitProcessorFactoryTests extends ESTestCase { config.put("field", "field1"); config.put("separator", "\\."); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - SplitProcessor splitProcessor = factory.create(config); + SplitProcessor splitProcessor = factory.create(processorTag, config); assertThat(splitProcessor.getTag(), equalTo(processorTag)); assertThat(splitProcessor.getField(), equalTo("field1")); assertThat(splitProcessor.getSeparator(), equalTo("\\.")); @@ -48,7 +46,7 @@ public class SplitProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("separator", "\\."); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); @@ -60,7 +58,7 @@ public class SplitProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "field1"); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[separator] required property is missing")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorTests.java index 13d45dc126b..a7e1313a099 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorTests.java @@ -84,7 +84,7 @@ public class SplitProcessorTests extends ESTestCase { Map splitConfig = new HashMap<>(); splitConfig.put("field", "flags"); splitConfig.put("separator", "\\|"); - Processor splitProcessor = (new SplitProcessor.Factory()).create(splitConfig); + Processor splitProcessor = (new SplitProcessor.Factory()).create("tag", splitConfig); Map source = new HashMap<>(); source.put("flags", "new|hot|super|fun|interesting"); IngestDocument ingestDocument = new IngestDocument(source, new HashMap<>()); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorFactoryTests.java index 54904775478..fa9a33f41de 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorFactoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; @@ -35,8 +34,7 @@ public class TrimProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "field1"); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - TrimProcessor uppercaseProcessor = factory.create(config); + TrimProcessor uppercaseProcessor = (TrimProcessor)factory.create(processorTag, config); assertThat(uppercaseProcessor.getTag(), equalTo(processorTag)); assertThat(uppercaseProcessor.getField(), equalTo("field1")); } @@ -45,7 +43,7 @@ public class TrimProcessorFactoryTests extends ESTestCase { TrimProcessor.Factory factory = new TrimProcessor.Factory(); Map config = new HashMap<>(); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorFactoryTests.java index cd4d1faf767..91698d4fcc4 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorFactoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; @@ -35,8 +34,7 @@ public class UppercaseProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "field1"); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - UppercaseProcessor uppercaseProcessor = factory.create(config); + UppercaseProcessor uppercaseProcessor = (UppercaseProcessor)factory.create(processorTag, config); assertThat(uppercaseProcessor.getTag(), equalTo(processorTag)); assertThat(uppercaseProcessor.getField(), equalTo("field1")); } @@ -45,7 +43,7 @@ public class UppercaseProcessorFactoryTests extends ESTestCase { UppercaseProcessor.Factory factory = new UppercaseProcessor.Factory(); Map config = new HashMap<>(); try { - factory.create(config); + factory.create(null, config); fail("factory create should have failed"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[field] required property is missing")); diff --git a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java index 40ebe2592ab..fb4dc37d641 100644 --- a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java +++ b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java @@ -25,8 +25,8 @@ import org.apache.tika.metadata.TikaCoreProperties; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import java.io.IOException; import java.util.Arrays; @@ -150,12 +150,12 @@ public final class AttachmentProcessor extends AbstractProcessor { return indexedChars; } - public static final class Factory extends AbstractProcessorFactory { + public static final class Factory implements Processor.Factory { static final Set DEFAULT_PROPERTIES = EnumSet.allOf(Property.class); @Override - public AttachmentProcessor doCreate(String processorTag, Map config) throws Exception { + public AttachmentProcessor create(String processorTag, Map config) throws Exception { String field = readStringProperty(TYPE, processorTag, config, "field"); String targetField = readStringProperty(TYPE, processorTag, config, "target_field", "attachment"); List properyNames = readOptionalList(TYPE, processorTag, config, "properties"); diff --git a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorFactoryTests.java b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorFactoryTests.java index 6bd4e07702e..8d011056854 100644 --- a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorFactoryTests.java +++ b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorFactoryTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.ingest.attachment; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.AbstractProcessorFactory; +import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -46,9 +46,8 @@ public class AttachmentProcessorFactoryTests extends ESTestCase { config.put("field", "_field"); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - AttachmentProcessor processor = factory.create(config); + AttachmentProcessor processor = factory.create(processorTag, config); assertThat(processor.getTag(), equalTo(processorTag)); assertThat(processor.getField(), equalTo("_field")); assertThat(processor.getTargetField(), equalTo("attachment")); @@ -62,8 +61,7 @@ public class AttachmentProcessorFactoryTests extends ESTestCase { config.put("indexed_chars", indexedChars); String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - AttachmentProcessor processor = factory.create(config); + AttachmentProcessor processor = factory.create(processorTag, config); assertThat(processor.getTag(), equalTo(processorTag)); assertThat(processor.getIndexedChars(), is(indexedChars)); } @@ -72,7 +70,7 @@ public class AttachmentProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "_field"); config.put("target_field", "_field"); - AttachmentProcessor processor = factory.create(config); + AttachmentProcessor processor = factory.create(null, config); assertThat(processor.getField(), equalTo("_field")); assertThat(processor.getTargetField(), equalTo("_field")); } @@ -89,7 +87,7 @@ public class AttachmentProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "_field"); config.put("properties", fieldNames); - AttachmentProcessor processor = factory.create(config); + AttachmentProcessor processor = factory.create(null, config); assertThat(processor.getField(), equalTo("_field")); assertThat(processor.getProperties(), equalTo(properties)); } @@ -99,7 +97,7 @@ public class AttachmentProcessorFactoryTests extends ESTestCase { config.put("field", "_field"); config.put("properties", Collections.singletonList("invalid")); try { - factory.create(config); + factory.create(null, config); fail("exception expected"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), containsString("[properties] illegal field option [invalid]")); @@ -113,7 +111,7 @@ public class AttachmentProcessorFactoryTests extends ESTestCase { config.put("field", "_field"); config.put("properties", "invalid"); try { - factory.create(config); + factory.create(null, config); fail("exception expected"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[properties] property isn't a list, but of type [java.lang.String]")); diff --git a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index 0b18cae25e7..92c4785dbaf 100644 --- a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -34,8 +34,8 @@ import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import java.io.Closeable; import java.io.IOException; @@ -217,7 +217,7 @@ public final class GeoIpProcessor extends AbstractProcessor { return geoData; } - public static final class Factory extends AbstractProcessorFactory { + public static final class Factory implements Processor.Factory { static final Set DEFAULT_CITY_PROPERTIES = EnumSet.of( Property.CONTINENT_NAME, Property.COUNTRY_ISO_CODE, Property.REGION_NAME, Property.CITY_NAME, Property.LOCATION @@ -231,7 +231,7 @@ public final class GeoIpProcessor extends AbstractProcessor { } @Override - public GeoIpProcessor doCreate(String processorTag, Map config) throws Exception { + public GeoIpProcessor create(String processorTag, Map config) throws Exception { String ipField = readStringProperty(TYPE, processorTag, config, "field"); String targetField = readStringProperty(TYPE, processorTag, config, "target_field", "geoip"); String databaseFile = readStringProperty(TYPE, processorTag, config, "database_file", "GeoLite2-City.mmdb.gz"); diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index 04729162729..28043171fcd 100644 --- a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -23,7 +23,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.maxmind.geoip2.DatabaseReader; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Randomness; -import org.elasticsearch.ingest.AbstractProcessorFactory; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.StreamsUtils; import org.junit.AfterClass; @@ -74,11 +73,9 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "_field"); - String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - GeoIpProcessor processor = factory.create(config); + GeoIpProcessor processor = factory.create(processorTag, config); assertThat(processor.getTag(), equalTo(processorTag)); assertThat(processor.getField(), equalTo("_field")); assertThat(processor.getTargetField(), equalTo("geoip")); @@ -92,11 +89,9 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "_field"); config.put("database_file", "GeoLite2-Country.mmdb.gz"); - String processorTag = randomAsciiOfLength(10); - config.put(AbstractProcessorFactory.TAG_KEY, processorTag); - GeoIpProcessor processor = factory.create(config); + GeoIpProcessor processor = factory.create(processorTag, config); assertThat(processor.getTag(), equalTo(processorTag)); assertThat(processor.getField(), equalTo("_field")); assertThat(processor.getTargetField(), equalTo("geoip")); @@ -109,7 +104,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "_field"); config.put("target_field", "_field"); - GeoIpProcessor processor = factory.create(config); + GeoIpProcessor processor = factory.create(null, config); assertThat(processor.getField(), equalTo("_field")); assertThat(processor.getTargetField(), equalTo("_field")); } @@ -119,7 +114,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "_field"); config.put("database_file", "GeoLite2-Country.mmdb.gz"); - GeoIpProcessor processor = factory.create(config); + GeoIpProcessor processor = factory.create(null, config); assertThat(processor.getField(), equalTo("_field")); assertThat(processor.getTargetField(), equalTo("geoip")); assertThat(processor.getDbReader().getMetadata().getDatabaseType(), equalTo("GeoLite2-Country")); @@ -135,7 +130,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { String cityProperty = RandomPicks.randomFrom(Randomness.get(), cityOnlyProperties).toString(); config.put("properties", Collections.singletonList(cityProperty)); try { - factory.create(config); + factory.create(null, config); fail("Exception expected"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[properties] illegal property value [" + cityProperty + @@ -150,7 +145,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { config.put("field", "_field"); config.put("database_file", "does-not-exist.mmdb.gz"); try { - factory.create(config); + factory.create(null, config); fail("Exception expected"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[database_file] database file [does-not-exist.mmdb.gz] doesn't exist")); @@ -171,7 +166,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { Map config = new HashMap<>(); config.put("field", "_field"); config.put("properties", fieldNames); - GeoIpProcessor processor = factory.create(config); + GeoIpProcessor processor = factory.create(null, config); assertThat(processor.getField(), equalTo("_field")); assertThat(processor.getProperties(), equalTo(properties)); } @@ -183,7 +178,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { config.put("field", "_field"); config.put("properties", Collections.singletonList("invalid")); try { - factory.create(config); + factory.create(null, config); fail("exception expected"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[properties] illegal property value [invalid]. valid values are [IP, COUNTRY_ISO_CODE, " + @@ -194,7 +189,7 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { config.put("field", "_field"); config.put("properties", "invalid"); try { - factory.create(config); + factory.create("tag", config); fail("exception expected"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[properties] property isn't a list, but of type [java.lang.String]")); diff --git a/test/framework/src/main/java/org/elasticsearch/ingest/IngestTestPlugin.java b/test/framework/src/main/java/org/elasticsearch/ingest/IngestTestPlugin.java index b32a2eab991..a9585aa8d41 100644 --- a/test/framework/src/main/java/org/elasticsearch/ingest/IngestTestPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/ingest/IngestTestPlugin.java @@ -28,7 +28,7 @@ import org.elasticsearch.plugins.Plugin; public class IngestTestPlugin extends Plugin { public void onModule(NodeModule nodeModule) { - nodeModule.registerProcessor("test", (registry) -> config -> + nodeModule.registerProcessor("test", (registry) -> (tag, config) -> new TestProcessor("id", "test", doc -> { doc.setFieldValue("processed", true); if (doc.hasField("fail") && doc.getFieldValue("fail", Boolean.class)) { diff --git a/test/framework/src/main/java/org/elasticsearch/ingest/TestProcessor.java b/test/framework/src/main/java/org/elasticsearch/ingest/TestProcessor.java index e36d73a8d9f..b309d94fe08 100644 --- a/test/framework/src/main/java/org/elasticsearch/ingest/TestProcessor.java +++ b/test/framework/src/main/java/org/elasticsearch/ingest/TestProcessor.java @@ -64,9 +64,9 @@ public class TestProcessor implements Processor { return invokedCounter.get(); } - public static final class Factory extends AbstractProcessorFactory { + public static final class Factory implements Processor.Factory { @Override - public TestProcessor doCreate(String processorTag, Map config) throws Exception { + public TestProcessor create(String processorTag, Map config) throws Exception { return new TestProcessor(processorTag, "test-processor", ingestDocument -> {}); } } From 1ad3d2251fe16e1608a7ed26c4dc3fbe80d035ba Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Jun 2016 11:43:07 +0200 Subject: [PATCH 07/36] Fix line width --- .../org/elasticsearch/cloud/gce/GceComputeServiceImpl.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java index b27c9e409fe..63a19919a7b 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java @@ -97,7 +97,8 @@ public class GceComputeServiceImpl extends AbstractLifecycleComponentemptyList() : instanceList.getItems(); + return instanceList.isEmpty() || instanceList.getItems() == null ? + Collections.emptyList() : instanceList.getItems(); } catch (PrivilegedActionException e) { logger.warn("Problem fetching instance list for zone {}", e, zoneId); logger.debug("Full exception:", e); From 57a734e641d9bfdff7e8dbae535e577db20e7db3 Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Thu, 30 Jun 2016 11:52:53 +0200 Subject: [PATCH 08/36] [doc] explain avg in function_score better (#19154) * [doc] explain avg in function_score better --- docs/reference/query-dsl/function-score-query.asciidoc | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index b6e4dedbc88..c6477b78d8b 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -83,9 +83,16 @@ First, each document is scored by the defined functions. The parameter `max`:: maximum score is used `min`:: minimum score is used -Because scores can be on different scales (for example, between 0 and 1 for decay functions but arbitrary for `field_value_factor`) and also because sometimes a different impact of functions on the score is desirable, the score of each function can be adjusted with a user defined `weight` (). The `weight` can be defined per function in the `functions` array (example above) and is multiplied with the score computed by the respective function. +Because scores can be on different scales (for example, between 0 and 1 for decay functions but arbitrary for `field_value_factor`) and also +because sometimes a different impact of functions on the score is desirable, the score of each function can be adjusted with a user defined +`weight`. The `weight` can be defined per function in the `functions` array (example above) and is multiplied with the score computed by +the respective function. If weight is given without any other function declaration, `weight` acts as a function that simply returns the `weight`. +In case `score_mode` is set to `avg` the individual scores will be combined by a **weighted** average. +For example, if two functions return score 1 and 2 and their respective weights are 3 and 4, then their scores will be combined as +`(1*3+2*4)/(3+4)` and **not** `(1*3+2*4)/2`. + The new score can be restricted to not exceed a certain limit by setting the `max_boost` parameter. The default for `max_boost` is FLT_MAX. From 0d7c11ea1d6ea3e33d6f493e406dbcd2144b69ad Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 30 Jun 2016 12:28:30 +0100 Subject: [PATCH 09/36] [DOCS] put profiling performance and limitations section on same page --- docs/reference/search/profile.asciidoc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 62e04e669a1..150b1b93a36 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -699,7 +699,9 @@ The meaning of the stats are as follows: This is not currently used and will always report `0`. Currently aggregation profiling only times the shard level parts of the aggregation execution. Timing of the reduce phase will be added later. -=== Performance Notes +=== Profiling Considerations + +==== Performance Notes Like any profiler, the Profile API introduces a non-negligible overhead to search execution. The act of instrumenting low-level method calls such as `collect`, `advance` and `next_doc` can be fairly expensive, since these methods are called @@ -710,7 +712,7 @@ There are also cases where special Lucene optimizations are disabled, since they could cause some queries to report larger relative times than their non-profiled counterparts, but in general should not have a drastic effect compared to other components in the profiled query. -=== Limitations +==== Limitations - Profiling statistics are currently not available for suggestions, highlighting, `dfs_query_then_fetch` - Profiling of the reduce phase of aggregation is currently not available From 40ec639c895aee0f027f1e641de06acb4f6055e6 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 30 Jun 2016 13:41:53 +0200 Subject: [PATCH 10/36] Factor out abstract TCPTransport* classes to reduce the netty footprint (#19096) Today we have a ton of logic inside the NettyTransport* codebase. The footprint of the code that has a direct netty dependency is large and alternative implementations are pretty hard today since they need to know all about our proticol etc. This change moves most of the code into TCPTransport* baseclasses and moves all the protocol send code together. The base classes now contain the majority of the logic while NettyTransport* classes remain to implement the glue code, configuration and optimization. --- .../elasticsearch/ElasticsearchException.java | 7 +- .../client/transport/TransportClient.java | 4 +- .../org/elasticsearch/common/io/Channels.java | 20 - .../common/netty/KeepFrameDecoder.java | 37 - .../netty/ReleaseChannelFutureListener.java | 42 - .../common/settings/ClusterSettings.java | 29 +- .../http/netty/ESHttpResponseEncoder.java | 2 +- .../http/netty/NettyHttpChannel.java | 5 +- .../http/netty/NettyHttpRequest.java | 2 +- .../http/netty/NettyHttpServerTransport.java | 4 +- .../elasticsearch/transport/TcpHeader.java | 49 + .../elasticsearch/transport/TcpTransport.java | 1347 +++++++++++++++++ .../transport/TcpTransportChannel.java | 103 ++ .../elasticsearch/transport/Transport.java | 7 + .../elasticsearch/transport/Transports.java | 9 +- .../netty/ChannelBufferBytesReference.java | 23 +- .../netty/ChannelBufferStreamInput.java | 4 +- .../ChannelBufferStreamInputFactory.java | 36 - .../netty/MessageChannelHandler.java | 437 ------ .../transport/netty/NettyHeader.java | 76 - .../netty/NettyInternalESLogger.java | 4 +- .../netty/NettyInternalESLoggerFactory.java | 35 - .../netty/NettyMessageChannelHandler.java | 86 ++ .../transport/netty/NettyTransport.java | 1264 ++++------------ .../netty/NettyTransportChannel.java | 176 --- .../netty/NettyUtils.java | 18 +- .../netty/OpenChannelsHandler.java | 7 +- .../netty/SizeHeaderFrameDecoder.java | 98 +- .../ExceptionSerializationTests.java | 3 +- .../elasticsearch/common/ChannelsTests.java | 14 - .../util/concurrent}/KeyedLockTests.java | 2 +- .../AbstractSimpleTransportTestCase.java | 17 +- ...shPortTests.java => PublishPortTests.java} | 8 +- ...sportTests.java => TCPTransportTests.java} | 34 +- .../ChannelBufferBytesReferenceTests.java | 3 +- .../netty/NettyScheduledPingTests.java | 25 +- .../transport/netty/NettyTransportIT.java | 54 +- .../netty/NettyTransportMultiPortTests.java | 23 +- .../netty/NettyUtilsTests.java | 3 +- .../test/InternalTestCluster.java | 9 +- 40 files changed, 2024 insertions(+), 2102 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/common/netty/KeepFrameDecoder.java delete mode 100644 core/src/main/java/org/elasticsearch/common/netty/ReleaseChannelFutureListener.java create mode 100644 core/src/main/java/org/elasticsearch/transport/TcpHeader.java create mode 100644 core/src/main/java/org/elasticsearch/transport/TcpTransport.java create mode 100644 core/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java rename core/src/main/java/org/elasticsearch/{common => transport}/netty/ChannelBufferBytesReference.java (85%) delete mode 100644 core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInputFactory.java delete mode 100644 core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java delete mode 100644 core/src/main/java/org/elasticsearch/transport/netty/NettyHeader.java delete mode 100644 core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLoggerFactory.java create mode 100644 core/src/main/java/org/elasticsearch/transport/netty/NettyMessageChannelHandler.java delete mode 100644 core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java rename core/src/main/java/org/elasticsearch/{common => transport}/netty/NettyUtils.java (90%) rename core/src/main/java/org/elasticsearch/{common => transport}/netty/OpenChannelsHandler.java (95%) rename core/src/test/java/org/elasticsearch/{transport/netty => common/util/concurrent}/KeyedLockTests.java (99%) rename core/src/test/java/org/elasticsearch/transport/{netty/NettyPublishPortTests.java => PublishPortTests.java} (94%) rename core/src/test/java/org/elasticsearch/transport/{netty/NettyTransportTests.java => TCPTransportTests.java} (78%) rename core/src/test/java/org/elasticsearch/{common => transport}/netty/ChannelBufferBytesReferenceTests.java (96%) rename core/src/test/java/org/elasticsearch/{common => transport}/netty/NettyUtilsTests.java (98%) diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java index ad5a2e79cd1..aa92fc176cf 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.TcpTransport; import java.io.IOException; import java.util.ArrayList; @@ -496,7 +497,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.index.shard.IndexShardStartedException::new, 23), SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class, org.elasticsearch.search.SearchContextMissingException::new, 24), - GENERAL_SCRIPT_EXCEPTION(org.elasticsearch.script.GeneralScriptException.class, + GENERAL_SCRIPT_EXCEPTION(org.elasticsearch.script.GeneralScriptException.class, org.elasticsearch.script.GeneralScriptException::new, 25), BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class, org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26), @@ -676,8 +677,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.indices.IndexAlreadyExistsException::new, 123), SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class, org.elasticsearch.script.Script.ScriptParseException::new, 124), - HTTP_ON_TRANSPORT_EXCEPTION(org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class, - org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException::new, 125), + HTTP_ON_TRANSPORT_EXCEPTION(TcpTransport.HttpOnTransportException.class, + TcpTransport.HttpOnTransportException::new, 125), MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class, org.elasticsearch.index.mapper.MapperParsingException::new, 126), SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class, diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 5786f6b1cfb..c9313fc08c4 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -50,8 +50,8 @@ import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.search.SearchModule; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.netty.NettyTransport; import java.io.Closeable; import java.util.ArrayList; @@ -107,7 +107,7 @@ public class TransportClient extends AbstractClient { private PluginsService newPluginService(final Settings settings) { final Settings.Builder settingsBuilder = Settings.builder() - .put(NettyTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval + .put(TcpTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval .put(InternalSettingsPreparer.prepareSettings(settings)) .put(NetworkService.NETWORK_SERVER.getKey(), false) .put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE); diff --git a/core/src/main/java/org/elasticsearch/common/io/Channels.java b/core/src/main/java/org/elasticsearch/common/io/Channels.java index 2fa7ca1cdec..71a2d66dee3 100644 --- a/core/src/main/java/org/elasticsearch/common/io/Channels.java +++ b/core/src/main/java/org/elasticsearch/common/io/Channels.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.io; import org.elasticsearch.common.SuppressForbidden; -import org.jboss.netty.buffer.ChannelBuffer; import java.io.EOFException; import java.io.IOException; @@ -159,25 +158,6 @@ public final class Channels { return bytesRead; } - - /** - * Copies bytes from source {@link org.jboss.netty.buffer.ChannelBuffer} to a {@link java.nio.channels.GatheringByteChannel} - * - * @param source ChannelBuffer to copy from - * @param sourceIndex index in source to start copying from - * @param length how many bytes to copy - * @param channel target GatheringByteChannel - */ - public static void writeToChannel(ChannelBuffer source, int sourceIndex, int length, GatheringByteChannel channel) throws IOException { - while (length > 0) { - int written = source.getBytes(sourceIndex, channel, length); - sourceIndex += written; - length -= written; - } - assert length == 0; - } - - /** * Writes part of a byte array to a {@link java.nio.channels.WritableByteChannel} * diff --git a/core/src/main/java/org/elasticsearch/common/netty/KeepFrameDecoder.java b/core/src/main/java/org/elasticsearch/common/netty/KeepFrameDecoder.java deleted file mode 100644 index d2b02b7c51e..00000000000 --- a/core/src/main/java/org/elasticsearch/common/netty/KeepFrameDecoder.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.common.netty; - -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.channel.Channel; -import org.jboss.netty.channel.ChannelHandlerContext; -import org.jboss.netty.handler.codec.frame.FrameDecoder; - -/** - * A marker to not remove frame decoder from the resulting jar so plugins can use it. - */ -public class KeepFrameDecoder extends FrameDecoder { - - public static final KeepFrameDecoder decoder = new KeepFrameDecoder(); - - @Override - protected Object decode(ChannelHandlerContext ctx, Channel channel, ChannelBuffer buffer) throws Exception { - return null; - } -} diff --git a/core/src/main/java/org/elasticsearch/common/netty/ReleaseChannelFutureListener.java b/core/src/main/java/org/elasticsearch/common/netty/ReleaseChannelFutureListener.java deleted file mode 100644 index 6f2979d13ca..00000000000 --- a/core/src/main/java/org/elasticsearch/common/netty/ReleaseChannelFutureListener.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.netty; - -import org.elasticsearch.common.lease.Releasable; -import org.jboss.netty.channel.ChannelFuture; -import org.jboss.netty.channel.ChannelFutureListener; - -/** - * A channel listener that releases a {@link org.elasticsearch.common.lease.Releasable} when - * the operation is complete. - */ -public class ReleaseChannelFutureListener implements ChannelFutureListener { - - private final Releasable releasable; - - public ReleaseChannelFutureListener(Releasable releasable) { - this.releasable = releasable; - } - - @Override - public void operationComplete(ChannelFuture future) throws Exception { - releasable.close(); - } -} diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 5cb37d14858..9469ac8b509 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -89,6 +89,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.SearchService; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; @@ -279,14 +280,14 @@ public final class ClusterSettings extends AbstractScopedSettings { TransportSettings.PUBLISH_PORT, TransportSettings.PORT, NettyTransport.WORKER_COUNT, - NettyTransport.CONNECTIONS_PER_NODE_RECOVERY, - NettyTransport.CONNECTIONS_PER_NODE_BULK, - NettyTransport.CONNECTIONS_PER_NODE_REG, - NettyTransport.CONNECTIONS_PER_NODE_STATE, - NettyTransport.CONNECTIONS_PER_NODE_PING, - NettyTransport.PING_SCHEDULE, - NettyTransport.TCP_BLOCKING_CLIENT, - NettyTransport.TCP_CONNECT_TIMEOUT, + TcpTransport.CONNECTIONS_PER_NODE_RECOVERY, + TcpTransport.CONNECTIONS_PER_NODE_BULK, + TcpTransport.CONNECTIONS_PER_NODE_REG, + TcpTransport.CONNECTIONS_PER_NODE_STATE, + TcpTransport.CONNECTIONS_PER_NODE_PING, + TcpTransport.PING_SCHEDULE, + TcpTransport.TCP_BLOCKING_CLIENT, + TcpTransport.TCP_CONNECT_TIMEOUT, NettyTransport.NETTY_MAX_CUMULATION_BUFFER_CAPACITY, NettyTransport.NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, NettyTransport.NETTY_RECEIVE_PREDICTOR_SIZE, @@ -294,12 +295,12 @@ public final class ClusterSettings extends AbstractScopedSettings { NettyTransport.NETTY_RECEIVE_PREDICTOR_MAX, NetworkService.NETWORK_SERVER, NettyTransport.NETTY_BOSS_COUNT, - NettyTransport.TCP_NO_DELAY, - NettyTransport.TCP_KEEP_ALIVE, - NettyTransport.TCP_REUSE_ADDRESS, - NettyTransport.TCP_SEND_BUFFER_SIZE, - NettyTransport.TCP_RECEIVE_BUFFER_SIZE, - NettyTransport.TCP_BLOCKING_SERVER, + TcpTransport.TCP_NO_DELAY, + TcpTransport.TCP_KEEP_ALIVE, + TcpTransport.TCP_REUSE_ADDRESS, + TcpTransport.TCP_SEND_BUFFER_SIZE, + TcpTransport.TCP_RECEIVE_BUFFER_SIZE, + TcpTransport.TCP_BLOCKING_SERVER, NetworkService.GLOBAL_NETWORK_HOST_SETTING, NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING, NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING, diff --git a/core/src/main/java/org/elasticsearch/http/netty/ESHttpResponseEncoder.java b/core/src/main/java/org/elasticsearch/http/netty/ESHttpResponseEncoder.java index 36f2c3a138b..afa69a2fe02 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/ESHttpResponseEncoder.java +++ b/core/src/main/java/org/elasticsearch/http/netty/ESHttpResponseEncoder.java @@ -19,7 +19,7 @@ package org.elasticsearch.http.netty; -import org.elasticsearch.common.netty.NettyUtils; +import org.elasticsearch.transport.netty.NettyUtils; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.buffer.CompositeChannelBuffer; diff --git a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java index 3413a746963..c4253df2860 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java +++ b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java @@ -24,8 +24,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.netty.NettyUtils; -import org.elasticsearch.common.netty.ReleaseChannelFutureListener; +import org.elasticsearch.transport.netty.NettyUtils; import org.elasticsearch.http.netty.cors.CorsHandler; import org.elasticsearch.http.netty.pipelining.OrderedDownstreamChannelEvent; import org.elasticsearch.http.netty.pipelining.OrderedUpstreamMessageEvent; @@ -128,7 +127,7 @@ public final class NettyHttpChannel extends AbstractRestChannel { } if (content instanceof Releasable) { - future.addListener(new ReleaseChannelFutureListener((Releasable) content)); + future.addListener((x) -> ((Releasable)content).close()); addedReleaseListener = true; } diff --git a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java index d26841ead97..d62252bc0ce 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java +++ b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java @@ -21,7 +21,7 @@ package org.elasticsearch.http.netty; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.netty.NettyUtils; +import org.elasticsearch.transport.netty.NettyUtils; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.support.RestUtils; import org.jboss.netty.channel.Channel; diff --git a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java index 22852b7c0ff..77c140ce7ce 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java +++ b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java @@ -24,8 +24,8 @@ import com.carrotsearch.hppc.IntSet; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.netty.NettyUtils; -import org.elasticsearch.common.netty.OpenChannelsHandler; +import org.elasticsearch.transport.netty.NettyUtils; +import org.elasticsearch.transport.netty.OpenChannelsHandler; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; diff --git a/core/src/main/java/org/elasticsearch/transport/TcpHeader.java b/core/src/main/java/org/elasticsearch/transport/TcpHeader.java new file mode 100644 index 00000000000..bbc54c0eb62 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/transport/TcpHeader.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class TcpHeader { + public static final int MARKER_BYTES_SIZE = 2 * 1; + + public static final int MESSAGE_LENGTH_SIZE = 4; + + public static final int REQUEST_ID_SIZE = 8; + + public static final int STATUS_SIZE = 1; + + public static final int VERSION_ID_SIZE = 4; + + public static final int HEADER_SIZE = MARKER_BYTES_SIZE + MESSAGE_LENGTH_SIZE + REQUEST_ID_SIZE + STATUS_SIZE + VERSION_ID_SIZE; + + public static void writeHeader(StreamOutput output, long requestId, byte status, Version version, int messageSize) throws IOException { + output.writeByte((byte)'E'); + output.writeByte((byte)'S'); + // write the size, the size indicates the remaining message size, not including the size int + output.writeInt(messageSize - TcpHeader.MARKER_BYTES_SIZE - TcpHeader.MESSAGE_LENGTH_SIZE); + output.writeLong(requestId); + output.writeByte(status); + output.writeInt(version.id); + } +} diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java new file mode 100644 index 00000000000..f9b5eceb4b8 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -0,0 +1,1347 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import com.carrotsearch.hppc.IntHashSet; +import com.carrotsearch.hppc.IntSet; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.compress.Compressor; +import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.compress.NotCompressedException; +import org.elasticsearch.common.io.ReleasableBytesStream; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.network.NetworkUtils; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.PortsRange; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.AbstractLifecycleRunnable; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.KeyedLock; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.support.TransportStatus; + +import java.io.Closeable; +import java.io.IOException; +import java.io.StreamCorruptedException; +import java.net.BindException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.nio.channels.CancelledKeyException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.common.settings.Setting.boolSetting; +import static org.elasticsearch.common.settings.Setting.intSetting; +import static org.elasticsearch.common.settings.Setting.timeSetting; +import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseConnectionException; +import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException; +import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; + +/** + */ +public abstract class TcpTransport extends AbstractLifecycleComponent implements Transport { + + public static final String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker"; + public static final String HTTP_SERVER_BOSS_THREAD_NAME_PREFIX = "http_server_boss"; + public static final String TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX = "transport_client_worker"; + public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss"; + + // the scheduled internal ping interval setting, defaults to disabled (-1) + public static final Setting PING_SCHEDULE = + timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), Setting.Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_RECOVERY = + intSetting("transport.connections_per_node.recovery", 2, 1, Setting.Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_BULK = + intSetting("transport.connections_per_node.bulk", 3, 1, Setting.Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_REG = + intSetting("transport.connections_per_node.reg", 6, 1, Setting.Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_STATE = + intSetting("transport.connections_per_node.state", 1, 1, Setting.Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_PING = + intSetting("transport.connections_per_node.ping", 1, 1, Setting.Property.NodeScope); + public static final Setting TCP_CONNECT_TIMEOUT = + timeSetting("transport.tcp.connect_timeout", NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT, Setting.Property.NodeScope); + public static final Setting TCP_NO_DELAY = + boolSetting("transport.tcp_no_delay", NetworkService.TcpSettings.TCP_NO_DELAY, Setting.Property.NodeScope); + public static final Setting TCP_KEEP_ALIVE = + boolSetting("transport.tcp.keep_alive", NetworkService.TcpSettings.TCP_KEEP_ALIVE, Setting.Property.NodeScope); + public static final Setting TCP_REUSE_ADDRESS = + boolSetting("transport.tcp.reuse_address", NetworkService.TcpSettings.TCP_REUSE_ADDRESS, Setting.Property.NodeScope); + public static final Setting TCP_BLOCKING_CLIENT = + boolSetting("transport.tcp.blocking_client", NetworkService.TcpSettings.TCP_BLOCKING_CLIENT, Setting.Property.NodeScope); + public static final Setting TCP_BLOCKING_SERVER = + boolSetting("transport.tcp.blocking_server", NetworkService.TcpSettings.TCP_BLOCKING_SERVER, Setting.Property.NodeScope); + public static final Setting TCP_SEND_BUFFER_SIZE = + Setting.byteSizeSetting("transport.tcp.send_buffer_size", NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, + Setting.Property.NodeScope); + public static final Setting TCP_RECEIVE_BUFFER_SIZE = + Setting.byteSizeSetting("transport.tcp.receive_buffer_size", NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, + Setting.Property.NodeScope); + + private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().bytes() * 0.9); + private static final int PING_DATA_SIZE = -1; + + protected final int connectionsPerNodeRecovery; + protected final int connectionsPerNodeBulk; + protected final int connectionsPerNodeReg; + protected final int connectionsPerNodeState; + protected final int connectionsPerNodePing; + protected final TimeValue connectTimeout; + protected final boolean blockingClient; + private final CircuitBreakerService circuitBreakerService; + // package visibility for tests + protected final ScheduledPing scheduledPing; + private final TimeValue pingSchedule; + protected final ThreadPool threadPool; + private final BigArrays bigArrays; + protected final NetworkService networkService; + + protected volatile TransportServiceAdapter transportServiceAdapter; + // node id to actual channel + protected final ConcurrentMap connectedNodes = newConcurrentMap(); + protected final Map> serverChannels = newConcurrentMap(); + protected final ConcurrentMap profileBoundAddresses = newConcurrentMap(); + + protected final KeyedLock connectionLock = new KeyedLock<>(); + private final NamedWriteableRegistry namedWriteableRegistry; + + // this lock is here to make sure we close this transport and disconnect all the client nodes + // connections while no connect operations is going on... (this might help with 100% CPU when stopping the transport?) + protected final ReadWriteLock globalLock = new ReentrantReadWriteLock(); + protected final boolean compress; + protected volatile BoundTransportAddress boundAddress; + private final String transportName; + + public TcpTransport(String transportName, Settings settings, ThreadPool threadPool, BigArrays bigArrays, + CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService) { + super(settings); + this.threadPool = threadPool; + this.bigArrays = bigArrays; + this.circuitBreakerService = circuitBreakerService; + this.scheduledPing = new ScheduledPing(); + this.pingSchedule = PING_SCHEDULE.get(settings); + this.namedWriteableRegistry = namedWriteableRegistry; + this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings); + this.networkService = networkService; + this.transportName = transportName; + + this.connectionsPerNodeRecovery = CONNECTIONS_PER_NODE_RECOVERY.get(settings); + this.connectionsPerNodeBulk = CONNECTIONS_PER_NODE_BULK.get(settings); + this.connectionsPerNodeReg = CONNECTIONS_PER_NODE_REG.get(settings); + this.connectionsPerNodeState = CONNECTIONS_PER_NODE_STATE.get(settings); + this.connectionsPerNodePing = CONNECTIONS_PER_NODE_PING.get(settings); + this.connectTimeout = TCP_CONNECT_TIMEOUT.get(settings); + this.blockingClient = TCP_BLOCKING_CLIENT.get(settings); + } + + @Override + protected void doStart() { + if (pingSchedule.millis() > 0) { + threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, scheduledPing); + } + } + + @Override + public CircuitBreaker getInFlightRequestBreaker() { + // We always obtain a fresh breaker to reflect changes to the breaker configuration. + return circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); + } + + @Override + public void transportServiceAdapter(TransportServiceAdapter service) { + this.transportServiceAdapter = service; + } + + public Settings settings() { + return this.settings; + } + + public boolean isCompressed() { + return compress; + } + + public class ScheduledPing extends AbstractLifecycleRunnable { + + /** + * The magic number (must be lower than 0) for a ping message. This is handled + * specifically in {@link TcpTransport#validateMessageHeader}. + */ + private final BytesReference pingHeader; + final CounterMetric successfulPings = new CounterMetric(); + final CounterMetric failedPings = new CounterMetric(); + + public ScheduledPing() { + super(lifecycle, logger); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeByte((byte) 'E'); + out.writeByte((byte) 'S'); + out.writeInt(PING_DATA_SIZE); + pingHeader = out.bytes(); + } catch (IOException e) { + throw new IllegalStateException(e.getMessage(), e); // won't happen + } + } + + @Override + protected void doRunInLifecycle() throws Exception { + for (Map.Entry entry : connectedNodes.entrySet()) { + DiscoveryNode node = entry.getKey(); + NodeChannels channels = entry.getValue(); + for (Channel channel : channels.allChannels) { + try { + sendMessage(channel, pingHeader, successfulPings::inc, false); + } catch (Throwable t) { + if (isOpen(channel)) { + logger.debug("[{}] failed to send ping transport message", t, node); + failedPings.inc(); + } else { + logger.trace("[{}] failed to send ping transport message (channel closed)", t, node); + } + } + } + } + } + + public long getSuccessfulPings() { + return successfulPings.count(); + } + + public long getFailedPings() { + return failedPings.count(); + } + + @Override + protected void onAfterInLifecycle() { + threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, this); + } + + @Override + public void onFailure(Throwable t) { + if (lifecycle.stoppedOrClosed()) { + logger.trace("failed to send ping transport message", t); + } else { + logger.warn("failed to send ping transport message", t); + } + } + } + + public class NodeChannels implements Closeable { + + public List allChannels = Collections.emptyList(); + public Channel[] recovery; + public final AtomicInteger recoveryCounter = new AtomicInteger(); + public Channel[] bulk; + public final AtomicInteger bulkCounter = new AtomicInteger(); + public Channel[] reg; + public final AtomicInteger regCounter = new AtomicInteger(); + public Channel[] state; + public final AtomicInteger stateCounter = new AtomicInteger(); + public Channel[] ping; + public final AtomicInteger pingCounter = new AtomicInteger(); + + public NodeChannels(Channel[] recovery, Channel[] bulk, Channel[] reg, Channel[] state, Channel[] ping) { + this.recovery = recovery; + this.bulk = bulk; + this.reg = reg; + this.state = state; + this.ping = ping; + } + + public void start() { + List newAllChannels = new ArrayList<>(); + newAllChannels.addAll(Arrays.asList(recovery)); + newAllChannels.addAll(Arrays.asList(bulk)); + newAllChannels.addAll(Arrays.asList(reg)); + newAllChannels.addAll(Arrays.asList(state)); + newAllChannels.addAll(Arrays.asList(ping)); + this.allChannels = Collections.unmodifiableList(newAllChannels); + } + + public boolean hasChannel(Channel channel) { + for (Channel channel1 : allChannels) { + if (channel.equals(channel1)) { + return true; + } + } + return false; + } + + public Channel channel(TransportRequestOptions.Type type) { + if (type == TransportRequestOptions.Type.REG) { + return reg[Math.floorMod(regCounter.incrementAndGet(), reg.length)]; + } else if (type == TransportRequestOptions.Type.STATE) { + return state[Math.floorMod(stateCounter.incrementAndGet(), state.length)]; + } else if (type == TransportRequestOptions.Type.PING) { + return ping[Math.floorMod(pingCounter.incrementAndGet(), ping.length)]; + } else if (type == TransportRequestOptions.Type.BULK) { + return bulk[Math.floorMod(bulkCounter.incrementAndGet(), bulk.length)]; + } else if (type == TransportRequestOptions.Type.RECOVERY) { + return recovery[Math.floorMod(recoveryCounter.incrementAndGet(), recovery.length)]; + } else { + throw new IllegalArgumentException("no type channel for [" + type + "]"); + } + } + + public synchronized void close() { + closeChannels(allChannels); + } + } + + @Override + public boolean nodeConnected(DiscoveryNode node) { + return connectedNodes.containsKey(node); + } + + @Override + public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException { + connectToNode(node, true); + } + + @Override + public void connectToNode(DiscoveryNode node) { + connectToNode(node, false); + } + + public void connectToNode(DiscoveryNode node, boolean light) { + if (!lifecycle.started()) { + throw new IllegalStateException("can't add nodes to a stopped transport"); + } + if (node == null) { + throw new ConnectTransportException(null, "can't connect to a null node"); + } + globalLock.readLock().lock(); + try { + + try (Releasable ignored = connectionLock.acquire(node.getId())) { + if (!lifecycle.started()) { + throw new IllegalStateException("can't add nodes to a stopped transport"); + } + NodeChannels nodeChannels = connectedNodes.get(node); + if (nodeChannels != null) { + return; + } + try { + if (light) { + nodeChannels = connectToChannelsLight(node); + } else { + try { + nodeChannels = connectToChannels(node); + } catch (Throwable e) { + logger.trace("failed to connect to [{}], cleaning dangling connections", e, node); + throw e; + } + } + // we acquire a connection lock, so no way there is an existing connection + nodeChannels.start(); + connectedNodes.put(node, nodeChannels); + if (logger.isDebugEnabled()) { + logger.debug("connected to node [{}]", node); + } + transportServiceAdapter.raiseNodeConnected(node); + } catch (ConnectTransportException e) { + throw e; + } catch (Exception e) { + throw new ConnectTransportException(node, "general node connection failure", e); + } + } + } finally { + globalLock.readLock().unlock(); + } + } + /** + * Disconnects from a node, only if the relevant channel is found to be part of the node channels. + */ + protected boolean disconnectFromNode(DiscoveryNode node, Channel channel, String reason) { + // this might be called multiple times from all the node channels, so do a lightweight + // check outside of the lock + NodeChannels nodeChannels = connectedNodes.get(node); + if (nodeChannels != null && nodeChannels.hasChannel(channel)) { + try (Releasable ignored = connectionLock.acquire(node.getId())) { + nodeChannels = connectedNodes.get(node); + // check again within the connection lock, if its still applicable to remove it + if (nodeChannels != null && nodeChannels.hasChannel(channel)) { + connectedNodes.remove(node); + try { + logger.debug("disconnecting from [{}], {}", node, reason); + nodeChannels.close(); + } finally { + logger.trace("disconnected from [{}], {}", node, reason); + transportServiceAdapter.raiseNodeDisconnected(node); + } + return true; + } + } + } + return false; + } + + /** + * Disconnects from a node if a channel is found as part of that nodes channels. + */ + protected final void disconnectFromNodeChannel(final Channel channel, final Throwable failure) { + threadPool.generic().execute(() -> { + try { + closeChannels(Collections.singletonList(channel)); + } finally { + for (DiscoveryNode node : connectedNodes.keySet()) { + if (disconnectFromNode(node, channel, ExceptionsHelper.detailedMessage(failure))) { + // if we managed to find this channel and disconnect from it, then break, no need to check on + // the rest of the nodes + break; + } + } + } + }); + } + + protected Channel nodeChannel(DiscoveryNode node, TransportRequestOptions options) throws ConnectTransportException { + NodeChannels nodeChannels = connectedNodes.get(node); + if (nodeChannels == null) { + throw new NodeNotConnectedException(node, "Node not connected"); + } + return nodeChannels.channel(options.type()); + } + + @Override + public void disconnectFromNode(DiscoveryNode node) { + try (Releasable ignored = connectionLock.acquire(node.getId())) { + NodeChannels nodeChannels = connectedNodes.remove(node); + if (nodeChannels != null) { + try { + logger.debug("disconnecting from [{}] due to explicit disconnect call", node); + nodeChannels.close(); + } finally { + logger.trace("disconnected from [{}] due to explicit disconnect call", node); + transportServiceAdapter.raiseNodeDisconnected(node); + } + } + } + } + + protected Version getCurrentVersion() { + // this is just for tests to mock stuff like the nodes version - tests can override this internally + return Version.CURRENT; + } + + @Override + public boolean addressSupported(Class address) { + return InetSocketTransportAddress.class.equals(address); + } + + @Override + public BoundTransportAddress boundAddress() { + return this.boundAddress; + } + + @Override + public Map profileBoundAddresses() { + return unmodifiableMap(new HashMap<>(profileBoundAddresses)); + } + + protected Map buildProfileSettings() { + // extract default profile first and create standard bootstrap + Map profiles = TransportSettings.TRANSPORT_PROFILES_SETTING.get(settings()).getAsGroups(true); + if (!profiles.containsKey(TransportSettings.DEFAULT_PROFILE)) { + profiles = new HashMap<>(profiles); + profiles.put(TransportSettings.DEFAULT_PROFILE, Settings.EMPTY); + } + Settings defaultSettings = profiles.get(TransportSettings.DEFAULT_PROFILE); + Map result = new HashMap<>(); + // loop through all profiles and start them up, special handling for default one + for (Map.Entry entry : profiles.entrySet()) { + Settings profileSettings = entry.getValue(); + String name = entry.getKey(); + + if (!Strings.hasLength(name)) { + logger.info("transport profile configured without a name. skipping profile with settings [{}]", + profileSettings.toDelimitedString(',')); + continue; + } else if (TransportSettings.DEFAULT_PROFILE.equals(name)) { + profileSettings = Settings.builder() + .put(profileSettings) + .put("port", profileSettings.get("port", TransportSettings.PORT.get(this.settings))) + .build(); + } else if (profileSettings.get("port") == null) { + // if profile does not have a port, skip it + logger.info("No port configured for profile [{}], not binding", name); + continue; + } + Settings mergedSettings = Settings.builder() + .put(defaultSettings) + .put(profileSettings) + .build(); + result.put(name, mergedSettings); + } + return result; + } + + @Override + public List getLocalAddresses() { + List local = new ArrayList<>(); + local.add("127.0.0.1"); + // check if v6 is supported, if so, v4 will also work via mapped addresses. + if (NetworkUtils.SUPPORTS_V6) { + local.add("[::1]"); // may get ports appended! + } + return local; + } + + protected void bindServer(final String name, final Settings settings) { + // Bind and start to accept incoming connections. + InetAddress hostAddresses[]; + String bindHosts[] = settings.getAsArray("bind_host", null); + try { + hostAddresses = networkService.resolveBindHostAddresses(bindHosts); + } catch (IOException e) { + throw new BindTransportException("Failed to resolve host " + Arrays.toString(bindHosts) + "", e); + } + if (logger.isDebugEnabled()) { + String[] addresses = new String[hostAddresses.length]; + for (int i = 0; i < hostAddresses.length; i++) { + addresses[i] = NetworkAddress.format(hostAddresses[i]); + } + logger.debug("binding server bootstrap to: {}", (Object)addresses); + } + + assert hostAddresses.length > 0; + + List boundAddresses = new ArrayList<>(); + for (InetAddress hostAddress : hostAddresses) { + boundAddresses.add(bindToPort(name, hostAddress, settings.get("port"))); + } + + final BoundTransportAddress boundTransportAddress = createBoundTransportAddress(name, settings, boundAddresses); + + if (TransportSettings.DEFAULT_PROFILE.equals(name)) { + this.boundAddress = boundTransportAddress; + } else { + profileBoundAddresses.put(name, boundTransportAddress); + } + } + + protected InetSocketAddress bindToPort(final String name, final InetAddress hostAddress, String port) { + PortsRange portsRange = new PortsRange(port); + final AtomicReference lastException = new AtomicReference<>(); + final AtomicReference boundSocket = new AtomicReference<>(); + boolean success = portsRange.iterate(portNumber -> { + try { + Channel channel = bind(name, new InetSocketAddress(hostAddress, portNumber)); + synchronized (serverChannels) { + List list = serverChannels.get(name); + if (list == null) { + list = new ArrayList<>(); + serverChannels.put(name, list); + } + list.add(channel); + boundSocket.set(getLocalAddress(channel)); + } + } catch (Exception e) { + lastException.set(e); + return false; + } + return true; + }); + if (!success) { + throw new BindTransportException("Failed to bind to [" + port + "]", lastException.get()); + } + + if (logger.isDebugEnabled()) { + logger.debug("Bound profile [{}] to address {{}}", name, NetworkAddress.format(boundSocket.get())); + } + + return boundSocket.get(); + } + + private BoundTransportAddress createBoundTransportAddress(String name, Settings profileSettings, + List boundAddresses) { + String[] boundAddressesHostStrings = new String[boundAddresses.size()]; + TransportAddress[] transportBoundAddresses = new TransportAddress[boundAddresses.size()]; + for (int i = 0; i < boundAddresses.size(); i++) { + InetSocketAddress boundAddress = boundAddresses.get(i); + boundAddressesHostStrings[i] = boundAddress.getHostString(); + transportBoundAddresses[i] = new InetSocketTransportAddress(boundAddress); + } + + final String[] publishHosts; + if (TransportSettings.DEFAULT_PROFILE.equals(name)) { + publishHosts = TransportSettings.PUBLISH_HOST.get(settings).toArray(Strings.EMPTY_ARRAY); + } else { + publishHosts = profileSettings.getAsArray("publish_host", boundAddressesHostStrings); + } + + final InetAddress publishInetAddress; + try { + publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts); + } catch (Exception e) { + throw new BindTransportException("Failed to resolve publish address", e); + } + + final int publishPort = resolvePublishPort(name, settings, profileSettings, boundAddresses, publishInetAddress); + final TransportAddress publishAddress = new InetSocketTransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); + return new BoundTransportAddress(transportBoundAddresses, publishAddress); + } + + // package private for tests + public static int resolvePublishPort(String profileName, Settings settings, Settings profileSettings, + List boundAddresses, InetAddress publishInetAddress) { + int publishPort; + if (TransportSettings.DEFAULT_PROFILE.equals(profileName)) { + publishPort = TransportSettings.PUBLISH_PORT.get(settings); + } else { + publishPort = profileSettings.getAsInt("publish_port", -1); + } + + // if port not explicitly provided, search for port of address in boundAddresses that matches publishInetAddress + if (publishPort < 0) { + for (InetSocketAddress boundAddress : boundAddresses) { + InetAddress boundInetAddress = boundAddress.getAddress(); + if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { + publishPort = boundAddress.getPort(); + break; + } + } + } + + // if no matching boundAddress found, check if there is a unique port for all bound addresses + if (publishPort < 0) { + final IntSet ports = new IntHashSet(); + for (InetSocketAddress boundAddress : boundAddresses) { + ports.add(boundAddress.getPort()); + } + if (ports.size() == 1) { + publishPort = ports.iterator().next().value; + } + } + + if (publishPort < 0) { + String profileExplanation = TransportSettings.DEFAULT_PROFILE.equals(profileName) ? "" : " for profile " + profileName; + throw new BindTransportException("Failed to auto-resolve publish port" + profileExplanation + ", multiple bound addresses " + + boundAddresses + " with distinct ports and none of them matched the publish address (" + publishInetAddress + "). " + + "Please specify a unique port by setting " + TransportSettings.PORT.getKey() + " or " + + TransportSettings.PUBLISH_PORT.getKey()); + } + return publishPort; + } + + @Override + public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception { + return parse(address, settings.get("transport.profiles.default.port", TransportSettings.PORT.get(settings)), perAddressLimit); + } + + // this code is a take on guava's HostAndPort, like a HostAndPortRange + + // pattern for validating ipv6 bracket addresses. + // not perfect, but PortsRange should take care of any port range validation, not a regex + private static final Pattern BRACKET_PATTERN = Pattern.compile("^\\[(.*:.*)\\](?::([\\d\\-]*))?$"); + + /** parse a hostname+port range spec into its equivalent addresses */ + static TransportAddress[] parse(String hostPortString, String defaultPortRange, int perAddressLimit) throws UnknownHostException { + Objects.requireNonNull(hostPortString); + String host; + String portString = null; + + if (hostPortString.startsWith("[")) { + // Parse a bracketed host, typically an IPv6 literal. + Matcher matcher = BRACKET_PATTERN.matcher(hostPortString); + if (!matcher.matches()) { + throw new IllegalArgumentException("Invalid bracketed host/port range: " + hostPortString); + } + host = matcher.group(1); + portString = matcher.group(2); // could be null + } else { + int colonPos = hostPortString.indexOf(':'); + if (colonPos >= 0 && hostPortString.indexOf(':', colonPos + 1) == -1) { + // Exactly 1 colon. Split into host:port. + host = hostPortString.substring(0, colonPos); + portString = hostPortString.substring(colonPos + 1); + } else { + // 0 or 2+ colons. Bare hostname or IPv6 literal. + host = hostPortString; + // 2+ colons and not bracketed: exception + if (colonPos >= 0) { + throw new IllegalArgumentException("IPv6 addresses must be bracketed: " + hostPortString); + } + } + } + + // if port isn't specified, fill with the default + if (portString == null || portString.isEmpty()) { + portString = defaultPortRange; + } + + // generate address for each port in the range + Set addresses = new HashSet<>(Arrays.asList(InetAddress.getAllByName(host))); + List transportAddresses = new ArrayList<>(); + int[] ports = new PortsRange(portString).ports(); + int limit = Math.min(ports.length, perAddressLimit); + for (int i = 0; i < limit; i++) { + for (InetAddress address : addresses) { + transportAddresses.add(new InetSocketTransportAddress(address, ports[i])); + } + } + return transportAddresses.toArray(new TransportAddress[transportAddresses.size()]); + } + + @Override + protected final void doClose() { + } + + @Override + protected final void doStop() { + final CountDownLatch latch = new CountDownLatch(1); + // make sure we run it on another thread than a possible IO handler thread + threadPool.generic().execute(() -> { + globalLock.writeLock().lock(); + try { + for (Iterator it = connectedNodes.values().iterator(); it.hasNext(); ) { + NodeChannels nodeChannels = it.next(); + it.remove(); + nodeChannels.close(); + } + + for (Map.Entry> entry : serverChannels.entrySet()) { + try { + closeChannels(entry.getValue()); + } catch (Throwable t) { + logger.debug("Error closing serverChannel for profile [{}]", t, entry.getKey()); + } + } + try { + stopInternal(); + } finally { + for (Iterator it = connectedNodes.values().iterator(); it.hasNext(); ) { + NodeChannels nodeChannels = it.next(); + it.remove(); + nodeChannels.close(); + } + } + + } finally { + globalLock.writeLock().unlock(); + latch.countDown(); + } + }); + + try { + latch.await(30, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.interrupted(); + // ignore + } + } + + protected void onException(Channel channel, Throwable e) { + if (!lifecycle.started()) { + // ignore + return; + } + if (isCloseConnectionException(e)) { + logger.trace("close connection exception caught on transport layer [{}], disconnecting from relevant node", e, + channel); + // close the channel, which will cause a node to be disconnected if relevant + disconnectFromNodeChannel(channel, e); + } else if (isConnectException(e)) { + logger.trace("connect exception caught on transport layer [{}]", e, channel); + // close the channel as safe measure, which will cause a node to be disconnected if relevant + disconnectFromNodeChannel(channel, e); + } else if (e instanceof BindException) { + logger.trace("bind exception caught on transport layer [{}]", e, channel); + // close the channel as safe measure, which will cause a node to be disconnected if relevant + disconnectFromNodeChannel(channel, e); + } else if (e instanceof CancelledKeyException) { + logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e, + channel); + // close the channel as safe measure, which will cause a node to be disconnected if relevant + disconnectFromNodeChannel(channel, e); + } else if (e instanceof TcpTransport.HttpOnTransportException) { + // in case we are able to return data, serialize the exception content and sent it back to the client + if (isOpen(channel)) { + sendMessage(channel, new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)), () -> {}, true); + } + } else { + logger.warn("exception caught on transport layer [{}], closing connection", e, channel); + // close the channel, which will cause a node to be disconnected if relevant + disconnectFromNodeChannel(channel, e); + } + } + + /** + * Returns the channels local address + */ + protected abstract InetSocketAddress getLocalAddress(Channel channel); + + /** + * Binds to the given {@link InetSocketAddress} + * @param name the profile name + * @param address the address to bind to + */ + protected abstract Channel bind(String name, InetSocketAddress address); + + /** + * Closes all channels in this list + */ + protected abstract void closeChannels(List channel); + + /** + * Connects to the given node in a light way. This means we are not creating multiple connections like we do + * for production connections. This connection is for pings or handshakes + */ + protected abstract NodeChannels connectToChannelsLight(DiscoveryNode node); + + + protected abstract void sendMessage(Channel channel, BytesReference reference, Runnable sendListener, boolean close); + + /** + * Connects to the node in a heavy way. + * + * @see #connectToChannelsLight(DiscoveryNode) + */ + protected abstract NodeChannels connectToChannels(DiscoveryNode node); + + /** + * Called to tear down internal resources + */ + protected void stopInternal() {} + + public boolean canCompress(TransportRequest request) { + return compress; + } + + @Override + public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, + TransportRequestOptions options) throws IOException, TransportException { + + Channel targetChannel = nodeChannel(node, options); + + if (compress) { + options = TransportRequestOptions.builder(options).withCompress(true).build(); + } + byte status = 0; + status = TransportStatus.setRequest(status); + ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); + boolean addedReleaseListener = false; + try { + bStream.skip(TcpHeader.HEADER_SIZE); + StreamOutput stream = bStream; + // only compress if asked, and, the request is not bytes, since then only + // the header part is compressed, and the "body" can't be extracted as compressed + if (options.compress() && canCompress(request)) { + status = TransportStatus.setCompress(status); + stream = CompressorFactory.COMPRESSOR.streamOutput(stream); + } + + // we pick the smallest of the 2, to support both backward and forward compatibility + // note, this is the only place we need to do this, since from here on, we use the serialized version + // as the version to use also when the node receiving this request will send the response with + Version version = Version.smallest(getCurrentVersion(), node.getVersion()); + + stream.setVersion(version); + threadPool.getThreadContext().writeTo(stream); + stream.writeString(action); + + Message writeable = prepareSend(node.getVersion(), request, stream, bStream); + try (StreamOutput headerOutput = writeable.getHeaderOutput()) { + TcpHeader.writeHeader(headerOutput, requestId, status, version, + writeable.size()); + } + final TransportRequestOptions finalOptions = options; + Runnable onRequestSent = () -> { + try { + Releasables.close(bStream.bytes()); + } finally { + transportServiceAdapter.onRequestSent(node, requestId, action, request, finalOptions); + } + }; + writeable.send(targetChannel, onRequestSent); + addedReleaseListener = true; + + } finally { + if (!addedReleaseListener) { + Releasables.close(bStream.bytes()); + } + } + } + + /** + * Sends back an error response to the caller via the given channel + * @param nodeVersion the caller node version + * @param channel the channel to send the response to + * @param error the error to return + * @param requestId the request ID this response replies to + * @param action the action this response replies to + */ + public void sendErrorResponse(Version nodeVersion, Channel channel, final Throwable error, final long requestId, + final String action) throws IOException { + BytesStreamOutput stream = new BytesStreamOutput(); + stream.setVersion(nodeVersion); + stream.skip(TcpHeader.HEADER_SIZE); + RemoteTransportException tx = new RemoteTransportException( + nodeName(), new InetSocketTransportAddress(getLocalAddress(channel)), action, error); + stream.writeThrowable(tx); + byte status = 0; + status = TransportStatus.setResponse(status); + status = TransportStatus.setError(status); + + final BytesReference bytes = stream.bytes(); + Message writeable = prepareSend(nodeVersion, bytes); + try (StreamOutput headerOutput = writeable.getHeaderOutput()) { + TcpHeader.writeHeader(headerOutput, requestId, status, nodeVersion, + writeable.size()); + } + Runnable onRequestSent = () -> { + transportServiceAdapter.onResponseSent(requestId, action, error); + }; + writeable.send(channel, onRequestSent); + } + + /** + * Sends the response to the given channel. This method should be used to send {@link TransportResponse} objects back to the caller. + * + * @see #sendErrorResponse(Version, Object, Throwable, long, String) for sending back errors to the caller + */ + public void sendResponse(Version nodeVersion, Channel channel, final TransportResponse response, final long requestId, + final String action, TransportResponseOptions options) throws IOException { + if (compress) { + options = TransportResponseOptions.builder(options).withCompress(true).build(); + } + + byte status = 0; + status = TransportStatus.setResponse(status); // TODO share some code with sendRequest + ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); + boolean addedReleaseListener = false; + try { + bStream.skip(TcpHeader.HEADER_SIZE); + StreamOutput stream = bStream; + if (options.compress()) { + status = TransportStatus.setCompress(status); + stream = CompressorFactory.COMPRESSOR.streamOutput(stream); + } + stream.setVersion(nodeVersion); + Message writeable = prepareSend(nodeVersion, response, stream, bStream); + try (StreamOutput headerOutput = writeable.getHeaderOutput()) { + TcpHeader.writeHeader(headerOutput, requestId, status, nodeVersion, + writeable.size()); + } + final TransportResponseOptions finalOptions = options; + Runnable onRequestSent = () -> { + try { + Releasables.close(bStream.bytes()); + } finally { + transportServiceAdapter.onResponseSent(requestId, action, response, finalOptions); + } + }; + writeable.send(channel, onRequestSent); + addedReleaseListener = true; + + } finally { + if (!addedReleaseListener) { + Releasables.close(bStream.bytes()); + } + } + } + + /** + * Serializes the given message into a bytes representation and forwards to {@link #prepareSend(Version, TransportMessage, + * StreamOutput, ReleasableBytesStream)} + */ + protected Message prepareSend(Version nodeVersion, TransportMessage message, StreamOutput stream, + ReleasableBytesStream writtenBytes) throws IOException { + message.writeTo(stream); + stream.close(); + return prepareSend(nodeVersion, writtenBytes.bytes()); + } + + /** + * prepares a implementation specific message to send across the network + */ + protected abstract Message prepareSend(Version nodeVersion, BytesReference bytesReference) throws IOException; + + /** + * Allows implementations to transform TransportMessages into implementation specific messages + */ + protected interface Message { + /** + * Creates an output to write the message header to. + */ + StreamOutput getHeaderOutput(); + + /** + * Returns the size of the message in bytes + */ + int size(); + + /** + * sends the message to the channel + * @param channel the channe to send the message to + * @param onRequestSent a callback executed once the message has been fully send + */ + void send(Channel channel, Runnable onRequestSent); + } + + + /** + * Validates the first N bytes of the message header and returns true if the message is + * a ping message and has no payload ie. isn't a real user level message. + * + * @throws IllegalStateException if the message is too short, less than the header or less that the header plus the message size + * @throws HttpOnTransportException if the message has no valid header and appears to be a HTTP message + * @throws IllegalArgumentException if the message is greater that the maximum allowed frame size. This is dependent on the available + * memory. + */ + public static boolean validateMessageHeader(BytesReference buffer) throws IOException { + final int sizeHeaderLength = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; + if (buffer.length() < sizeHeaderLength) { + throw new IllegalStateException("message size must be >= to the header size"); + } + int offset = 0; + if (buffer.get(offset) != 'E' || buffer.get(offset + 1) != 'S') { + // special handling for what is probably HTTP + if (bufferStartsWith(buffer, offset, "GET ") || + bufferStartsWith(buffer, offset, "POST ") || + bufferStartsWith(buffer, offset, "PUT ") || + bufferStartsWith(buffer, offset, "HEAD ") || + bufferStartsWith(buffer, offset, "DELETE ") || + bufferStartsWith(buffer, offset, "OPTIONS ") || + bufferStartsWith(buffer, offset, "PATCH ") || + bufferStartsWith(buffer, offset, "TRACE ")) { + + throw new HttpOnTransportException("This is not a HTTP port"); + } + + // we have 6 readable bytes, show 4 (should be enough) + throw new StreamCorruptedException("invalid internal transport message format, got (" + + Integer.toHexString(buffer.get(offset) & 0xFF) + "," + + Integer.toHexString(buffer.get(offset + 1) & 0xFF) + "," + + Integer.toHexString(buffer.get(offset + 2) & 0xFF) + "," + + Integer.toHexString(buffer.get(offset + 3) & 0xFF) + ")"); + } + + final int dataLen; + try (StreamInput input = buffer.streamInput()) { + input.skip(TcpHeader.MARKER_BYTES_SIZE); + dataLen = input.readInt(); + if (dataLen == PING_DATA_SIZE) { + // discard the messages we read and continue, this is achieved by skipping the bytes + // and returning null + return false; + } + } + if (dataLen <= 0) { + throw new StreamCorruptedException("invalid data length: " + dataLen); + } + // safety against too large frames being sent + if (dataLen > NINETY_PER_HEAP_SIZE) { + throw new IllegalArgumentException("transport content length received [" + new ByteSizeValue(dataLen) + "] exceeded [" + + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]"); + } + + if (buffer.length() < dataLen + sizeHeaderLength) { + throw new IllegalStateException("buffer must be >= to the message size but wasn't"); + } + return true; + } + + private static boolean bufferStartsWith(BytesReference buffer, int offset, String method) { + char[] chars = method.toCharArray(); + for (int i = 0; i < chars.length; i++) { + if (buffer.get(offset+ i) != chars[i]) { + return false; + } + } + + return true; + } + + /** + * A helper exception to mark an incoming connection as potentially being HTTP + * so an appropriate error code can be returned + */ + public static class HttpOnTransportException extends ElasticsearchException { + + public HttpOnTransportException(String msg) { + super(msg); + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } + + public HttpOnTransportException(StreamInput in) throws IOException{ + super(in); + } + } + + protected abstract boolean isOpen(Channel channel); + + /** + * This method handles the message receive part for both request and responses + */ + public final void messageReceived(BytesReference reference, Channel channel, String profileName, + InetSocketAddress remoteAddress, int messageLengthBytes) throws IOException { + final int totalMessageSize = messageLengthBytes + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; + transportServiceAdapter.received(totalMessageSize); + // we have additional bytes to read, outside of the header + boolean hasMessageBytesToRead = (totalMessageSize - TcpHeader.HEADER_SIZE) > 0; + StreamInput streamIn = reference.streamInput(); + boolean success = false; + try (ThreadContext.StoredContext tCtx = threadPool.getThreadContext().stashContext()) { + long requestId = streamIn.readLong(); + byte status = streamIn.readByte(); + Version version = Version.fromId(streamIn.readInt()); + if (TransportStatus.isCompress(status) && hasMessageBytesToRead && streamIn.available() > 0) { + Compressor compressor; + try { + final int bytesConsumed = TcpHeader.REQUEST_ID_SIZE + TcpHeader.STATUS_SIZE + TcpHeader.VERSION_ID_SIZE; + compressor = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed)); + } catch (NotCompressedException ex) { + int maxToRead = Math.min(reference.length(), 10); + StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead) + .append("] content bytes out of [").append(reference.length()) + .append("] readable bytes with message size [").append(messageLengthBytes).append("] ").append("] are ["); + for (int i = 0; i < maxToRead; i++) { + sb.append(reference.get(i)).append(","); + } + sb.append("]"); + throw new IllegalStateException(sb.toString()); + } + streamIn = compressor.streamInput(streamIn); + } + if (version.onOrAfter(Version.CURRENT.minimumCompatibilityVersion()) == false || version.major != Version.CURRENT.major) { + throw new IllegalStateException("Received message from unsupported version: [" + version + + "] minimal compatible version is: [" +Version.CURRENT.minimumCompatibilityVersion() + "]"); + } + streamIn = new NamedWriteableAwareStreamInput(streamIn, namedWriteableRegistry); + streamIn.setVersion(version); + if (TransportStatus.isRequest(status)) { + threadPool.getThreadContext().readHeaders(streamIn); + handleRequest(channel, profileName, streamIn, requestId, messageLengthBytes, version, remoteAddress); + } else { + final TransportResponseHandler handler = transportServiceAdapter.onResponseReceived(requestId); + // ignore if its null, the adapter logs it + if (handler != null) { + if (TransportStatus.isError(status)) { + handlerResponseError(streamIn, handler); + } else { + handleResponse(remoteAddress, streamIn, handler); + } + // Check the entire message has been read + final int nextByte = streamIn.read(); + // calling read() is useful to make sure the message is fully read, even if there is an EOS marker + if (nextByte != -1) { + throw new IllegalStateException("Message not fully read (response) for requestId [" + requestId + "], handler [" + + handler + "], error [" + TransportStatus.isError(status) + "]; resetting"); + } + } + } + success = true; + } finally { + if (success) { + IOUtils.close(streamIn); + } else { + IOUtils.closeWhileHandlingException(streamIn); + } + } + } + + private void handleResponse(InetSocketAddress remoteAddress, final StreamInput stream, final TransportResponseHandler handler) { + final TransportResponse response = handler.newInstance(); + response.remoteAddress(new InetSocketTransportAddress(remoteAddress)); + try { + response.readFrom(stream); + } catch (Throwable e) { + handleException(handler, new TransportSerializationException( + "Failed to deserialize response of type [" + response.getClass().getName() + "]", e)); + return; + } + threadPool.executor(handler.executor()).execute(new AbstractRunnable() { + @Override + public void onFailure(Throwable t) { + handleException(handler, new ResponseHandlerFailureTransportException(t)); + } + + @Override + protected void doRun() throws Exception { + handler.handleResponse(response); + }}); + + } + + /** + * Executed for a received response error + */ + private void handlerResponseError(StreamInput stream, final TransportResponseHandler handler) { + Throwable error; + try { + error = stream.readThrowable(); + } catch (Throwable e) { + error = new TransportSerializationException("Failed to deserialize exception response from stream", e); + } + handleException(handler, error); + } + + private void handleException(final TransportResponseHandler handler, Throwable error) { + if (!(error instanceof RemoteTransportException)) { + error = new RemoteTransportException(error.getMessage(), error); + } + final RemoteTransportException rtx = (RemoteTransportException) error; + threadPool.executor(handler.executor()).execute(() -> { + try { + handler.handleException(rtx); + } catch (Throwable e) { + logger.error("failed to handle exception response [{}]", e, handler); + } + }); + } + + protected String handleRequest(Channel channel, String profileName, final StreamInput stream, long requestId, + int messageLengthBytes, Version version, InetSocketAddress remoteAddress) throws IOException { + final String action = stream.readString(); + transportServiceAdapter.onRequestReceived(requestId, action); + TransportChannel transportChannel = null; + try { + final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); + if (reg == null) { + throw new ActionNotFoundTransportException(action); + } + if (reg.canTripCircuitBreaker()) { + getInFlightRequestBreaker().addEstimateBytesAndMaybeBreak(messageLengthBytes, ""); + } else { + getInFlightRequestBreaker().addWithoutBreaking(messageLengthBytes); + } + transportChannel = new TcpTransportChannel<>(this, channel, transportName, action, requestId, version, profileName, + messageLengthBytes); + final TransportRequest request = reg.newRequest(); + request.remoteAddress(new InetSocketTransportAddress(remoteAddress)); + request.readFrom(stream); + // in case we throw an exception, i.e. when the limit is hit, we don't want to verify + validateRequest(stream, requestId, action); + threadPool.executor(reg.getExecutor()).execute(new RequestHandler(reg, request, transportChannel)); + } catch (Throwable e) { + // the circuit breaker tripped + if (transportChannel == null) { + transportChannel = new TcpTransportChannel<>(this, channel, transportName, action, requestId, version, profileName, 0); + } + try { + transportChannel.sendResponse(e); + } catch (IOException e1) { + logger.warn("Failed to send error message back to client for action [{}]", e, action); + logger.warn("Actual Exception", e1); + } + } + return action; + } + + // This template method is needed to inject custom error checking logic in tests. + protected void validateRequest(StreamInput stream, long requestId, String action) throws IOException { + final int nextByte = stream.read(); + // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker + if (nextByte != -1) { + throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action [" + action + + "], available [" + stream.available() + "]; resetting"); + } + } + + class RequestHandler extends AbstractRunnable { + private final RequestHandlerRegistry reg; + private final TransportRequest request; + private final TransportChannel transportChannel; + + public RequestHandler(RequestHandlerRegistry reg, TransportRequest request, TransportChannel transportChannel) { + this.reg = reg; + this.request = request; + this.transportChannel = transportChannel; + } + + @SuppressWarnings({"unchecked"}) + @Override + protected void doRun() throws Exception { + reg.processMessageReceived(request, transportChannel); + } + + @Override + public boolean isForceExecution() { + return reg.isForceExecution(); + } + + @Override + public void onFailure(Throwable e) { + if (lifecycleState() == Lifecycle.State.STARTED) { + // we can only send a response transport is started.... + try { + transportChannel.sendResponse(e); + } catch (Throwable e1) { + logger.warn("Failed to send error message back to client for action [{}]", e1, reg.getAction()); + logger.warn("Actual Exception", e); + } + } + } + } +} diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java b/core/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java new file mode 100644 index 00000000000..74bdad48746 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.Version; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * + */ +public final class TcpTransportChannel implements TransportChannel { + private final TcpTransport transport; + protected final Version version; + protected final String action; + protected final long requestId; + private final String profileName; + private final long reservedBytes; + private final AtomicBoolean released = new AtomicBoolean(); + private final String channelType; + private final Channel channel; + + public TcpTransportChannel(TcpTransport transport, Channel channel, String channelType, String action, + long requestId, Version version, String profileName, long reservedBytes) { + this.version = version; + this.channel = channel; + this.transport = transport; + this.action = action; + this.requestId = requestId; + this.profileName = profileName; + this.reservedBytes = reservedBytes; + this.channelType = channelType; + } + + @Override + public final String getProfileName() { + return profileName; + } + + @Override + public final String action() { + return this.action; + } + + @Override + public final void sendResponse(TransportResponse response) throws IOException { + sendResponse(response, TransportResponseOptions.EMPTY); + } + + @Override + public final void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException { + release(); + transport.sendResponse(version, channel, response, requestId, action, options); + + } + + @Override + public void sendResponse(Throwable error) throws IOException { + release(); + transport.sendErrorResponse(version, channel, error, requestId, action); + } + + private void release() { + // attempt to release once atomically + if (released.compareAndSet(false, true) == false) { + throw new IllegalStateException("reserved bytes are already released"); + } + transport.getInFlightRequestBreaker().addWithoutBreaking(-reservedBytes); + } + + @Override + public final long getRequestId() { + return requestId; + } + + @Override + public final String getChannelType() { + return channelType; + } + + public Channel getChannel() { + return channel; + } + +} + diff --git a/core/src/main/java/org/elasticsearch/transport/Transport.java b/core/src/main/java/org/elasticsearch/transport/Transport.java index 76793d5598f..754d2105e83 100644 --- a/core/src/main/java/org/elasticsearch/transport/Transport.java +++ b/core/src/main/java/org/elasticsearch/transport/Transport.java @@ -20,6 +20,8 @@ package org.elasticsearch.transport; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -94,4 +96,9 @@ public interface Transport extends LifecycleComponent { long serverOpen(); List getLocalAddresses(); + + default CircuitBreaker getInFlightRequestBreaker() { + return new NoopCircuitBreaker("in-flight-noop"); + } + } diff --git a/core/src/main/java/org/elasticsearch/transport/Transports.java b/core/src/main/java/org/elasticsearch/transport/Transports.java index 68d828fc72f..1186c821309 100644 --- a/core/src/main/java/org/elasticsearch/transport/Transports.java +++ b/core/src/main/java/org/elasticsearch/transport/Transports.java @@ -20,7 +20,6 @@ package org.elasticsearch.transport; import org.elasticsearch.transport.local.LocalTransport; -import org.elasticsearch.transport.netty.NettyTransport; import java.util.Arrays; @@ -39,10 +38,10 @@ public enum Transports { final String threadName = t.getName(); for (String s : Arrays.asList( LocalTransport.LOCAL_TRANSPORT_THREAD_NAME_PREFIX, - NettyTransport.HTTP_SERVER_BOSS_THREAD_NAME_PREFIX, - NettyTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX, - NettyTransport.TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX, - NettyTransport.TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX, + TcpTransport.HTTP_SERVER_BOSS_THREAD_NAME_PREFIX, + TcpTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX, + TcpTransport.TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX, + TcpTransport.TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX, TEST_MOCK_TRANSPORT_THREAD_PREFIX)) { if (threadName.contains(s)) { return true; diff --git a/core/src/main/java/org/elasticsearch/common/netty/ChannelBufferBytesReference.java b/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferBytesReference.java similarity index 85% rename from core/src/main/java/org/elasticsearch/common/netty/ChannelBufferBytesReference.java rename to core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferBytesReference.java index 42cfe3c611f..60dbdd21bd1 100644 --- a/core/src/main/java/org/elasticsearch/common/netty/ChannelBufferBytesReference.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferBytesReference.java @@ -16,13 +16,12 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.common.netty; +package org.elasticsearch.transport.netty; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.transport.netty.ChannelBufferStreamInputFactory; import org.jboss.netty.buffer.ChannelBuffer; import java.io.IOException; @@ -34,9 +33,12 @@ import java.nio.charset.StandardCharsets; final class ChannelBufferBytesReference implements BytesReference { private final ChannelBuffer buffer; + private final int size; - ChannelBufferBytesReference(ChannelBuffer buffer) { + ChannelBufferBytesReference(ChannelBuffer buffer, int size) { this.buffer = buffer; + this.size = size; + assert size <= buffer.readableBytes() : "size[" + size +"] > " + buffer.readableBytes(); } @Override @@ -46,25 +48,24 @@ final class ChannelBufferBytesReference implements BytesReference { @Override public int length() { - return buffer.readableBytes(); + return size; } @Override public BytesReference slice(int from, int length) { - return new ChannelBufferBytesReference(buffer.slice(buffer.readerIndex() + from, length)); + return new ChannelBufferBytesReference(buffer.slice(buffer.readerIndex() + from, length), length); } @Override public StreamInput streamInput() { - return ChannelBufferStreamInputFactory.create(buffer.duplicate()); + return new ChannelBufferStreamInput(buffer.duplicate(), size); } @Override public void writeTo(OutputStream os) throws IOException { - buffer.getBytes(buffer.readerIndex(), os, length()); + buffer.getBytes(buffer.readerIndex(), os, size); } - @Override public byte[] toBytes() { return copyBytesArray().toBytes(); } @@ -72,7 +73,7 @@ final class ChannelBufferBytesReference implements BytesReference { @Override public BytesArray toBytesArray() { if (buffer.hasArray()) { - return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), buffer.readableBytes()); + return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), size); } return copyBytesArray(); } @@ -111,7 +112,7 @@ final class ChannelBufferBytesReference implements BytesReference { @Override public BytesRef toBytesRef() { if (buffer.hasArray()) { - return new BytesRef(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), buffer.readableBytes()); + return new BytesRef(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), size); } byte[] copy = new byte[buffer.readableBytes()]; buffer.getBytes(buffer.readerIndex(), copy); @@ -120,7 +121,7 @@ final class ChannelBufferBytesReference implements BytesReference { @Override public BytesRef copyBytesRef() { - byte[] copy = new byte[buffer.readableBytes()]; + byte[] copy = new byte[size]; buffer.getBytes(buffer.readerIndex(), copy); return new BytesRef(copy); } diff --git a/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java b/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java index 9e2b43d43db..3b95ddd74c7 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java @@ -22,16 +22,14 @@ package org.elasticsearch.transport.netty; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.netty.NettyUtils; import org.jboss.netty.buffer.ChannelBuffer; -import java.io.EOFException; import java.io.IOException; /** * A Netty {@link org.jboss.netty.buffer.ChannelBuffer} based {@link org.elasticsearch.common.io.stream.StreamInput}. */ -public class ChannelBufferStreamInput extends StreamInput { +class ChannelBufferStreamInput extends StreamInput { private final ChannelBuffer buffer; private final int startIndex; diff --git a/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInputFactory.java b/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInputFactory.java deleted file mode 100644 index 554f710ec4d..00000000000 --- a/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInputFactory.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.netty; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.jboss.netty.buffer.ChannelBuffer; - -/** - */ -public class ChannelBufferStreamInputFactory { - - public static StreamInput create(ChannelBuffer buffer) { - return new ChannelBufferStreamInput(buffer, buffer.readableBytes()); - } - - public static StreamInput create(ChannelBuffer buffer, int size) { - return new ChannelBufferStreamInput(buffer, size); - } -} diff --git a/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java b/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java deleted file mode 100644 index 3274aa7b975..00000000000 --- a/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java +++ /dev/null @@ -1,437 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.netty; - -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.Version; -import org.elasticsearch.common.component.Lifecycle; -import org.elasticsearch.common.compress.Compressor; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.compress.NotCompressedException; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.netty.NettyUtils; -import org.elasticsearch.common.transport.InetSocketTransportAddress; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.ActionNotFoundTransportException; -import org.elasticsearch.transport.RemoteTransportException; -import org.elasticsearch.transport.RequestHandlerRegistry; -import org.elasticsearch.transport.ResponseHandlerFailureTransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportSerializationException; -import org.elasticsearch.transport.TransportServiceAdapter; -import org.elasticsearch.transport.Transports; -import org.elasticsearch.transport.support.TransportStatus; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.channel.Channel; -import org.jboss.netty.channel.ChannelHandlerContext; -import org.jboss.netty.channel.ExceptionEvent; -import org.jboss.netty.channel.MessageEvent; -import org.jboss.netty.channel.SimpleChannelUpstreamHandler; -import org.jboss.netty.channel.WriteCompletionEvent; - -import java.io.IOException; -import java.net.InetSocketAddress; - -/** - * A handler (must be the last one!) that does size based frame decoding and forwards the actual message - * to the relevant action. - */ -public class MessageChannelHandler extends SimpleChannelUpstreamHandler { - - protected final ESLogger logger; - protected final ThreadPool threadPool; - protected final TransportServiceAdapter transportServiceAdapter; - protected final NettyTransport transport; - protected final String profileName; - private final ThreadContext threadContext; - - public MessageChannelHandler(NettyTransport transport, ESLogger logger, String profileName) { - this.threadPool = transport.threadPool(); - this.threadContext = threadPool.getThreadContext(); - this.transportServiceAdapter = transport.transportServiceAdapter(); - this.transport = transport; - this.logger = logger; - this.profileName = profileName; - } - - @Override - public void writeComplete(ChannelHandlerContext ctx, WriteCompletionEvent e) throws Exception { - transportServiceAdapter.sent(e.getWrittenAmount()); - super.writeComplete(ctx, e); - } - - @Override - public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception { - Transports.assertTransportThread(); - Object m = e.getMessage(); - if (!(m instanceof ChannelBuffer)) { - ctx.sendUpstream(e); - return; - } - ChannelBuffer buffer = (ChannelBuffer) m; - Marker marker = new Marker(buffer); - int size = marker.messageSizeWithRemainingHeaders(); - transportServiceAdapter.received(marker.messageSizeWithAllHeaders()); - - // we have additional bytes to read, outside of the header - boolean hasMessageBytesToRead = marker.messageSize() != 0; - - // netty always copies a buffer, either in NioWorker in its read handler, where it copies to a fresh - // buffer, or in the cumulation buffer, which is cleaned each time - StreamInput streamIn = ChannelBufferStreamInputFactory.create(buffer, size); - boolean success = false; - try (ThreadContext.StoredContext tCtx = threadContext.stashContext()) { - long requestId = streamIn.readLong(); - byte status = streamIn.readByte(); - Version version = Version.fromId(streamIn.readInt()); - - if (TransportStatus.isCompress(status) && hasMessageBytesToRead && buffer.readable()) { - Compressor compressor; - try { - compressor = CompressorFactory.compressor(NettyUtils.toBytesReference(buffer)); - } catch (NotCompressedException ex) { - int maxToRead = Math.min(buffer.readableBytes(), 10); - int offset = buffer.readerIndex(); - StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead) - .append("] content bytes out of [").append(buffer.readableBytes()) - .append("] readable bytes with message size [").append(size).append("] ").append("] are ["); - for (int i = 0; i < maxToRead; i++) { - sb.append(buffer.getByte(offset + i)).append(","); - } - sb.append("]"); - throw new IllegalStateException(sb.toString()); - } - streamIn = compressor.streamInput(streamIn); - } - if (version.onOrAfter(Version.CURRENT.minimumCompatibilityVersion()) == false || version.major != Version.CURRENT.major) { - throw new IllegalStateException("Received message from unsupported version: [" + version - + "] minimal compatible version is: [" +Version.CURRENT.minimumCompatibilityVersion() + "]"); - } - streamIn.setVersion(version); - if (TransportStatus.isRequest(status)) { - threadContext.readHeaders(streamIn); - handleRequest(ctx.getChannel(), marker, streamIn, requestId, size, version); - } else { - TransportResponseHandler handler = transportServiceAdapter.onResponseReceived(requestId); - // ignore if its null, the adapter logs it - if (handler != null) { - if (TransportStatus.isError(status)) { - handlerResponseError(streamIn, handler); - } else { - handleResponse(ctx.getChannel(), streamIn, handler); - } - marker.validateResponse(streamIn, requestId, handler, TransportStatus.isError(status)); - } - } - success = true; - } finally { - try { - if (success) { - IOUtils.close(streamIn); - } else { - IOUtils.closeWhileHandlingException(streamIn); - } - } finally { - // Set the expected position of the buffer, no matter what happened - buffer.readerIndex(marker.expectedReaderIndex()); - } - } - } - - protected void handleResponse(Channel channel, StreamInput buffer, final TransportResponseHandler handler) { - buffer = new NamedWriteableAwareStreamInput(buffer, transport.namedWriteableRegistry); - final TransportResponse response = handler.newInstance(); - response.remoteAddress(new InetSocketTransportAddress((InetSocketAddress) channel.getRemoteAddress())); - response.remoteAddress(); - try { - response.readFrom(buffer); - } catch (Throwable e) { - handleException(handler, new TransportSerializationException( - "Failed to deserialize response of type [" + response.getClass().getName() + "]", e)); - return; - } - try { - if (ThreadPool.Names.SAME.equals(handler.executor())) { - //noinspection unchecked - handler.handleResponse(response); - } else { - threadPool.executor(handler.executor()).execute(new ResponseHandler(handler, response)); - } - } catch (Throwable e) { - handleException(handler, new ResponseHandlerFailureTransportException(e)); - } - } - - private void handlerResponseError(StreamInput buffer, final TransportResponseHandler handler) { - Throwable error; - try { - error = buffer.readThrowable(); - } catch (Throwable e) { - error = new TransportSerializationException("Failed to deserialize exception response from stream", e); - } - handleException(handler, error); - } - - private void handleException(final TransportResponseHandler handler, Throwable error) { - if (!(error instanceof RemoteTransportException)) { - error = new RemoteTransportException(error.getMessage(), error); - } - final RemoteTransportException rtx = (RemoteTransportException) error; - if (ThreadPool.Names.SAME.equals(handler.executor())) { - try { - handler.handleException(rtx); - } catch (Throwable e) { - logger.error("failed to handle exception response [{}]", e, handler); - } - } else { - threadPool.executor(handler.executor()).execute(new Runnable() { - @Override - public void run() { - try { - handler.handleException(rtx); - } catch (Throwable e) { - logger.error("failed to handle exception response [{}]", e, handler); - } - } - }); - } - } - - protected String handleRequest(Channel channel, Marker marker, StreamInput buffer, long requestId, int messageLengthBytes, - Version version) throws IOException { - buffer = new NamedWriteableAwareStreamInput(buffer, transport.namedWriteableRegistry); - final String action = buffer.readString(); - transportServiceAdapter.onRequestReceived(requestId, action); - NettyTransportChannel transportChannel = null; - try { - final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); - if (reg == null) { - throw new ActionNotFoundTransportException(action); - } - if (reg.canTripCircuitBreaker()) { - transport.inFlightRequestsBreaker().addEstimateBytesAndMaybeBreak(messageLengthBytes, ""); - } else { - transport.inFlightRequestsBreaker().addWithoutBreaking(messageLengthBytes); - } - transportChannel = new NettyTransportChannel(transport, transportServiceAdapter, action, channel, - requestId, version, profileName, messageLengthBytes); - final TransportRequest request = reg.newRequest(); - request.remoteAddress(new InetSocketTransportAddress((InetSocketAddress) channel.getRemoteAddress())); - request.readFrom(buffer); - // in case we throw an exception, i.e. when the limit is hit, we don't want to verify - validateRequest(marker, buffer, requestId, action); - if (ThreadPool.Names.SAME.equals(reg.getExecutor())) { - //noinspection unchecked - reg.processMessageReceived(request, transportChannel); - } else { - threadPool.executor(reg.getExecutor()).execute(new RequestHandler(reg, request, transportChannel)); - } - } catch (Throwable e) { - // the circuit breaker tripped - if (transportChannel == null) { - transportChannel = new NettyTransportChannel(transport, transportServiceAdapter, action, channel, - requestId, version, profileName, 0); - } - try { - transportChannel.sendResponse(e); - } catch (IOException e1) { - logger.warn("Failed to send error message back to client for action [{}]", e, action); - logger.warn("Actual Exception", e1); - } - } - return action; - } - - // This template method is needed to inject custom error checking logic in tests. - protected void validateRequest(Marker marker, StreamInput buffer, long requestId, String action) throws IOException { - marker.validateRequest(buffer, requestId, action); - } - - - @Override - public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception { - transport.exceptionCaught(ctx, e); - } - - class ResponseHandler implements Runnable { - - private final TransportResponseHandler handler; - private final TransportResponse response; - - public ResponseHandler(TransportResponseHandler handler, TransportResponse response) { - this.handler = handler; - this.response = response; - } - - @SuppressWarnings({"unchecked"}) - @Override - public void run() { - try { - handler.handleResponse(response); - } catch (Throwable e) { - handleException(handler, new ResponseHandlerFailureTransportException(e)); - } - } - } - - class RequestHandler extends AbstractRunnable { - private final RequestHandlerRegistry reg; - private final TransportRequest request; - private final NettyTransportChannel transportChannel; - - public RequestHandler(RequestHandlerRegistry reg, TransportRequest request, NettyTransportChannel transportChannel) { - this.reg = reg; - this.request = request; - this.transportChannel = transportChannel; - } - - @SuppressWarnings({"unchecked"}) - @Override - protected void doRun() throws Exception { - reg.processMessageReceived(request, transportChannel); - } - - @Override - public boolean isForceExecution() { - return reg.isForceExecution(); - } - - @Override - public void onFailure(Throwable e) { - if (transport.lifecycleState() == Lifecycle.State.STARTED) { - // we can only send a response transport is started.... - try { - transportChannel.sendResponse(e); - } catch (Throwable e1) { - logger.warn("Failed to send error message back to client for action [{}]", e1, reg.getAction()); - logger.warn("Actual Exception", e); - } - } - } - } - - /** - * Internal helper class to store characteristic offsets of a buffer during processing - */ - protected static final class Marker { - private final ChannelBuffer buffer; - private final int remainingMessageSize; - private final int expectedReaderIndex; - - public Marker(ChannelBuffer buffer) { - this.buffer = buffer; - // when this constructor is called, we have read already two parts of the message header: the marker bytes and the message - // message length (see SizeHeaderFrameDecoder). Hence we have to rewind the index for MESSAGE_LENGTH_SIZE bytes to read the - // remaining message length again. - this.remainingMessageSize = buffer.getInt(buffer.readerIndex() - NettyHeader.MESSAGE_LENGTH_SIZE); - this.expectedReaderIndex = buffer.readerIndex() + remainingMessageSize; - } - - /** - * @return the number of bytes that have yet to be read from the buffer - */ - public int messageSizeWithRemainingHeaders() { - return remainingMessageSize; - } - - /** - * @return the number in bytes for the message including all headers (even the ones that have been read from the buffer already) - */ - public int messageSizeWithAllHeaders() { - return remainingMessageSize + NettyHeader.MARKER_BYTES_SIZE + NettyHeader.MESSAGE_LENGTH_SIZE; - } - - /** - * @return the number of bytes for the message itself (excluding all headers). - */ - public int messageSize() { - return messageSizeWithAllHeaders() - NettyHeader.HEADER_SIZE; - } - - /** - * @return the expected index of the buffer's reader after the message has been consumed entirely. - */ - public int expectedReaderIndex() { - return expectedReaderIndex; - } - - /** - * Validates that a request has been fully read (not too few bytes but also not too many bytes). - * - * @param stream A stream that is associated with the buffer that is tracked by this marker. - * @param requestId The current request id. - * @param action The currently executed action. - * @throws IOException Iff the stream could not be read. - * @throws IllegalStateException Iff the request has not been fully read. - */ - public void validateRequest(StreamInput stream, long requestId, String action) throws IOException { - final int nextByte = stream.read(); - // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker - if (nextByte != -1) { - throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action [" + action - + "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedReaderIndex + "]; resetting"); - } - if (buffer.readerIndex() < expectedReaderIndex) { - throw new IllegalStateException("Message is fully read (request), yet there are " - + (expectedReaderIndex - buffer.readerIndex()) + " remaining bytes; resetting"); - } - if (buffer.readerIndex() > expectedReaderIndex) { - throw new IllegalStateException( - "Message read past expected size (request) for requestId [" + requestId + "], action [" + action - + "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedReaderIndex + "]; resetting"); - } - } - - /** - * Validates that a response has been fully read (not too few bytes but also not too many bytes). - * - * @param stream A stream that is associated with the buffer that is tracked by this marker. - * @param requestId The corresponding request id for this response. - * @param handler The current response handler. - * @param error Whether validate an error response. - * @throws IOException Iff the stream could not be read. - * @throws IllegalStateException Iff the request has not been fully read. - */ - public void validateResponse(StreamInput stream, long requestId, - TransportResponseHandler handler, boolean error) throws IOException { - // Check the entire message has been read - final int nextByte = stream.read(); - // calling read() is useful to make sure the message is fully read, even if there is an EOS marker - if (nextByte != -1) { - throw new IllegalStateException("Message not fully read (response) for requestId [" + requestId + "], handler [" - + handler + "], error [" + error + "]; resetting"); - } - if (buffer.readerIndex() < expectedReaderIndex) { - throw new IllegalStateException("Message is fully read (response), yet there are " - + (expectedReaderIndex - buffer.readerIndex()) + " remaining bytes; resetting"); - } - if (buffer.readerIndex() > expectedReaderIndex) { - throw new IllegalStateException("Message read past expected size (response) for requestId [" + requestId - + "], handler [" + handler + "], error [" + error + "]; resetting"); - } - } - } -} diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyHeader.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyHeader.java deleted file mode 100644 index 8e4423fb447..00000000000 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyHeader.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.netty; - -import org.elasticsearch.Version; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffers; - -/** - */ -public class NettyHeader { - public static final int MARKER_BYTES_SIZE = 2 * 1; - - public static final int MESSAGE_LENGTH_SIZE = 4; - - public static final int REQUEST_ID_SIZE = 8; - - public static final int STATUS_SIZE = 1; - - public static final int VERSION_ID_SIZE = 4; - - public static final int HEADER_SIZE = MARKER_BYTES_SIZE + MESSAGE_LENGTH_SIZE + REQUEST_ID_SIZE + STATUS_SIZE + VERSION_ID_SIZE; - - /** - * The magic number (must be lower than 0) for a ping message. This is handled - * specifically in {@link org.elasticsearch.transport.netty.SizeHeaderFrameDecoder}. - */ - public static final int PING_DATA_SIZE = -1; - private final static ChannelBuffer pingHeader; - static { - pingHeader = ChannelBuffers.buffer(6); - pingHeader.writeByte('E'); - pingHeader.writeByte('S'); - pingHeader.writeInt(PING_DATA_SIZE); - } - - /** - * A ping header is same as regular header, just with -1 for the size of the message. - */ - public static ChannelBuffer pingHeader() { - return pingHeader.duplicate(); - } - - public static void writeHeader(ChannelBuffer buffer, long requestId, byte status, Version version) { - int index = buffer.readerIndex(); - buffer.setByte(index, 'E'); - index += 1; - buffer.setByte(index, 'S'); - index += 1; - // write the size, the size indicates the remaining message size, not including the size int - buffer.setInt(index, buffer.readableBytes() - MARKER_BYTES_SIZE - MESSAGE_LENGTH_SIZE); - index += MESSAGE_LENGTH_SIZE; - buffer.setLong(index, requestId); - index += REQUEST_ID_SIZE; - buffer.setByte(index, status); - index += STATUS_SIZE; - buffer.setInt(index, version.id); - } -} diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java index 2a1fc3226a4..d8307f32244 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java @@ -27,11 +27,11 @@ import org.jboss.netty.logging.AbstractInternalLogger; * */ @SuppressLoggerChecks(reason = "safely delegates to logger") -public class NettyInternalESLogger extends AbstractInternalLogger { +final class NettyInternalESLogger extends AbstractInternalLogger { private final ESLogger logger; - public NettyInternalESLogger(ESLogger logger) { + NettyInternalESLogger(ESLogger logger) { this.logger = logger; } diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLoggerFactory.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLoggerFactory.java deleted file mode 100644 index 70c5e651c2c..00000000000 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLoggerFactory.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.netty; - -import org.elasticsearch.common.logging.Loggers; -import org.jboss.netty.logging.InternalLogger; -import org.jboss.netty.logging.InternalLoggerFactory; - -/** - * - */ -public class NettyInternalESLoggerFactory extends InternalLoggerFactory { - - @Override - public InternalLogger newInstance(String name) { - return new NettyInternalESLogger(Loggers.getLogger(name)); - } -} diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyMessageChannelHandler.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyMessageChannelHandler.java new file mode 100644 index 00000000000..0f2805459c9 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyMessageChannelHandler.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.netty; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.transport.TcpHeader; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.transport.TcpTransportChannel; +import org.elasticsearch.transport.TransportServiceAdapter; +import org.elasticsearch.transport.Transports; +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.channel.ChannelHandlerContext; +import org.jboss.netty.channel.ExceptionEvent; +import org.jboss.netty.channel.MessageEvent; +import org.jboss.netty.channel.SimpleChannelUpstreamHandler; +import org.jboss.netty.channel.WriteCompletionEvent; + +import java.net.InetSocketAddress; + +/** + * A handler (must be the last one!) that does size based frame decoding and forwards the actual message + * to the relevant action. + */ +class NettyMessageChannelHandler extends SimpleChannelUpstreamHandler { + + protected final TransportServiceAdapter transportServiceAdapter; + protected final NettyTransport transport; + protected final String profileName; + + NettyMessageChannelHandler(NettyTransport transport, String profileName) { + this.transportServiceAdapter = transport.transportServiceAdapter(); + this.transport = transport; + this.profileName = profileName; + } + + @Override + public void writeComplete(ChannelHandlerContext ctx, WriteCompletionEvent e) throws Exception { + transportServiceAdapter.sent(e.getWrittenAmount()); + super.writeComplete(ctx, e); + } + + @Override + public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception { + Transports.assertTransportThread(); + Object m = e.getMessage(); + if (!(m instanceof ChannelBuffer)) { + ctx.sendUpstream(e); + return; + } + final ChannelBuffer buffer = (ChannelBuffer) m; + final int remainingMessageSize = buffer.getInt(buffer.readerIndex() - TcpHeader.MESSAGE_LENGTH_SIZE); + final int expectedReaderIndex = buffer.readerIndex() + remainingMessageSize; + InetSocketAddress remoteAddress = (InetSocketAddress) ctx.getChannel().getRemoteAddress(); + try { + // netty always copies a buffer, either in NioWorker in its read handler, where it copies to a fresh + // buffer, or in the cumulation buffer, which is cleaned each time so it could be bigger than the actual size + BytesReference reference = NettyUtils.toBytesReference(buffer, remainingMessageSize); + transport.messageReceived(reference, ctx.getChannel(), profileName, remoteAddress, remainingMessageSize); + } finally { + // Set the expected position of the buffer, no matter what happened + buffer.readerIndex(expectedReaderIndex); + } + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception { + transport.exceptionCaught(ctx, e); + } +} diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index be1305244bc..3c75f68eeb0 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -19,58 +19,36 @@ package org.elasticsearch.transport.netty; -import com.carrotsearch.hppc.IntHashSet; -import com.carrotsearch.hppc.IntSet; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Booleans; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.ReleasableBytesStream; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.metrics.CounterMetric; -import org.elasticsearch.common.netty.NettyUtils; -import org.elasticsearch.common.netty.OpenChannelsHandler; -import org.elasticsearch.common.netty.ReleaseChannelFutureListener; -import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService.TcpSettings; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress; -import org.elasticsearch.common.transport.PortsRange; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.AbstractLifecycleRunnable; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.BytesTransportRequest; import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.NodeNotConnectedException; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.transport.TransportMessage; import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportServiceAdapter; import org.elasticsearch.transport.TransportSettings; -import org.elasticsearch.transport.support.TransportStatus; import org.jboss.netty.bootstrap.ClientBootstrap; import org.jboss.netty.bootstrap.ServerBootstrap; import org.jboss.netty.buffer.ChannelBuffer; @@ -94,42 +72,17 @@ import org.jboss.netty.channel.socket.oio.OioServerSocketChannelFactory; import org.jboss.netty.util.HashedWheelTimer; import java.io.IOException; -import java.net.BindException; -import java.net.InetAddress; import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.net.UnknownHostException; -import java.nio.channels.CancelledKeyException; -import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import static java.util.Collections.unmodifiableMap; -import static org.elasticsearch.common.settings.Setting.boolSetting; import static org.elasticsearch.common.settings.Setting.byteSizeSetting; import static org.elasticsearch.common.settings.Setting.intSetting; -import static org.elasticsearch.common.settings.Setting.timeSetting; -import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseConnectionException; -import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; @@ -139,51 +92,16 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF * longer. Med is for the typical search / single doc index. And High for things like cluster state. Ping is reserved for * sending out ping requests to other nodes. */ -public class NettyTransport extends AbstractLifecycleComponent implements Transport { +public class NettyTransport extends TcpTransport { static { NettyUtils.setup(); } - public static final String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker"; - public static final String HTTP_SERVER_BOSS_THREAD_NAME_PREFIX = "http_server_boss"; - public static final String TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX = "transport_client_worker"; - public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss"; - public static final Setting WORKER_COUNT = new Setting<>("transport.netty.worker_count", (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_RECOVERY = - intSetting("transport.connections_per_node.recovery", 2, 1, Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_BULK = - intSetting("transport.connections_per_node.bulk", 3, 1, Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_REG = - intSetting("transport.connections_per_node.reg", 6, 1, Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_STATE = - intSetting("transport.connections_per_node.state", 1, 1, Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_PING = - intSetting("transport.connections_per_node.ping", 1, 1, Property.NodeScope); - // the scheduled internal ping interval setting, defaults to disabled (-1) - public static final Setting PING_SCHEDULE = - timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), Property.NodeScope); - public static final Setting TCP_BLOCKING_CLIENT = - boolSetting("transport.tcp.blocking_client", TcpSettings.TCP_BLOCKING_CLIENT, Property.NodeScope); - public static final Setting TCP_CONNECT_TIMEOUT = - timeSetting("transport.tcp.connect_timeout", TcpSettings.TCP_CONNECT_TIMEOUT, Property.NodeScope); - public static final Setting TCP_NO_DELAY = - boolSetting("transport.tcp_no_delay", TcpSettings.TCP_NO_DELAY, Property.NodeScope); - public static final Setting TCP_KEEP_ALIVE = - boolSetting("transport.tcp.keep_alive", TcpSettings.TCP_KEEP_ALIVE, Property.NodeScope); - public static final Setting TCP_BLOCKING_SERVER = - boolSetting("transport.tcp.blocking_server", TcpSettings.TCP_BLOCKING_SERVER, Property.NodeScope); - public static final Setting TCP_REUSE_ADDRESS = - boolSetting("transport.tcp.reuse_address", TcpSettings.TCP_REUSE_ADDRESS, Property.NodeScope); - - public static final Setting TCP_SEND_BUFFER_SIZE = - Setting.byteSizeSetting("transport.tcp.send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE, Property.NodeScope); - public static final Setting TCP_RECEIVE_BUFFER_SIZE = - Setting.byteSizeSetting("transport.tcp.receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE, Property.NodeScope); public static final Setting NETTY_MAX_CUMULATION_BUFFER_CAPACITY = Setting.byteSizeSetting("transport.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), Property.NodeScope); @@ -192,16 +110,16 @@ public class NettyTransport extends AbstractLifecycleComponent implem // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one public static final Setting NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( - "transport.netty.receive_predictor_size", - settings -> { - long defaultReceiverPredictor = 512 * 1024; - if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) { - // we can guess a better default... - long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / WORKER_COUNT.get(settings)); - defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024)); - } - return new ByteSizeValue(defaultReceiverPredictor).toString(); - }, Property.NodeScope); + "transport.netty.receive_predictor_size", + settings -> { + long defaultReceiverPredictor = 512 * 1024; + if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) { + // we can guess a better default... + long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / WORKER_COUNT.get(settings)); + defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024)); + } + return new ByteSizeValue(defaultReceiverPredictor).toString(); + }, Property.NodeScope); public static final Setting NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting("transport.netty.receive_predictor_min", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = @@ -209,69 +127,25 @@ public class NettyTransport extends AbstractLifecycleComponent implem public static final Setting NETTY_BOSS_COUNT = intSetting("transport.netty.boss_count", 1, 1, Property.NodeScope); - protected final NetworkService networkService; - protected final boolean blockingClient; - protected final TimeValue connectTimeout; protected final ByteSizeValue maxCumulationBufferCapacity; protected final int maxCompositeBufferComponents; - protected final boolean compress; protected final ReceiveBufferSizePredictorFactory receiveBufferSizePredictorFactory; protected final int workerCount; protected final ByteSizeValue receivePredictorMin; protected final ByteSizeValue receivePredictorMax; - - protected final int connectionsPerNodeRecovery; - protected final int connectionsPerNodeBulk; - protected final int connectionsPerNodeReg; - protected final int connectionsPerNodeState; - protected final int connectionsPerNodePing; - - private final TimeValue pingSchedule; - - protected final BigArrays bigArrays; - protected final ThreadPool threadPool; // package private for testing volatile OpenChannelsHandler serverOpenChannels; protected volatile ClientBootstrap clientBootstrap; - // node id to actual channel - protected final ConcurrentMap connectedNodes = newConcurrentMap(); protected final Map serverBootstraps = newConcurrentMap(); - protected final Map> serverChannels = newConcurrentMap(); - protected final ConcurrentMap profileBoundAddresses = newConcurrentMap(); - protected volatile TransportServiceAdapter transportServiceAdapter; - protected volatile BoundTransportAddress boundAddress; - protected final KeyedLock connectionLock = new KeyedLock<>(); - protected final NamedWriteableRegistry namedWriteableRegistry; - private final CircuitBreakerService circuitBreakerService; - - // this lock is here to make sure we close this transport and disconnect all the client nodes - // connections while no connect operations is going on... (this might help with 100% CPU when stopping the transport?) - private final ReadWriteLock globalLock = new ReentrantReadWriteLock(); - - // package visibility for tests - final ScheduledPing scheduledPing; @Inject public NettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { - super(settings); - this.threadPool = threadPool; - this.networkService = networkService; - this.bigArrays = bigArrays; - + super("netty", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); this.workerCount = WORKER_COUNT.get(settings); - this.blockingClient = TCP_BLOCKING_CLIENT.get(settings); - this.connectTimeout = TCP_CONNECT_TIMEOUT.get(settings); this.maxCumulationBufferCapacity = NETTY_MAX_CUMULATION_BUFFER_CAPACITY.get(settings); this.maxCompositeBufferComponents = NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); - this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings); - - this.connectionsPerNodeRecovery = CONNECTIONS_PER_NODE_RECOVERY.get(settings); - this.connectionsPerNodeBulk = CONNECTIONS_PER_NODE_BULK.get(settings); - this.connectionsPerNodeReg = CONNECTIONS_PER_NODE_REG.get(settings); - this.connectionsPerNodeState = CONNECTIONS_PER_NODE_STATE.get(settings); - this.connectionsPerNodePing = CONNECTIONS_PER_NODE_PING.get(settings); // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one this.receivePredictorMin = NETTY_RECEIVE_PREDICTOR_MIN.get(settings); @@ -280,37 +154,14 @@ public class NettyTransport extends AbstractLifecycleComponent implem receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes()); } else { receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), - (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes()); + (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes()); } - - this.scheduledPing = new ScheduledPing(); - this.pingSchedule = PING_SCHEDULE.get(settings); - this.namedWriteableRegistry = namedWriteableRegistry; - this.circuitBreakerService = circuitBreakerService; - } - - public Settings settings() { - return this.settings; - } - - @Override - public void transportServiceAdapter(TransportServiceAdapter service) { - this.transportServiceAdapter = service; } TransportServiceAdapter transportServiceAdapter() { return transportServiceAdapter; } - ThreadPool threadPool() { - return threadPool; - } - - CircuitBreaker inFlightRequestsBreaker() { - // We always obtain a fresh breaker to reflect changes to the breaker configuration. - return circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); - } - @Override protected void doStart() { boolean success = false; @@ -319,51 +170,17 @@ public class NettyTransport extends AbstractLifecycleComponent implem if (NetworkService.NETWORK_SERVER.get(settings)) { final OpenChannelsHandler openChannels = new OpenChannelsHandler(logger); this.serverOpenChannels = openChannels; - - // extract default profile first and create standard bootstrap - Map profiles = TransportSettings.TRANSPORT_PROFILES_SETTING.get(settings()).getAsGroups(true); - if (!profiles.containsKey(TransportSettings.DEFAULT_PROFILE)) { - profiles = new HashMap<>(profiles); - profiles.put(TransportSettings.DEFAULT_PROFILE, Settings.EMPTY); - } - - Settings fallbackSettings = createFallbackSettings(); - Settings defaultSettings = profiles.get(TransportSettings.DEFAULT_PROFILE); - // loop through all profiles and start them up, special handling for default one - for (Map.Entry entry : profiles.entrySet()) { - Settings profileSettings = entry.getValue(); - String name = entry.getKey(); - - if (!Strings.hasLength(name)) { - logger.info("transport profile configured without a name. skipping profile with settings [{}]", - profileSettings.toDelimitedString(',')); - continue; - } else if (TransportSettings.DEFAULT_PROFILE.equals(name)) { - profileSettings = Settings.builder() - .put(profileSettings) - .put("port", profileSettings.get("port", TransportSettings.PORT.get(this.settings))) - .build(); - } else if (profileSettings.get("port") == null) { - // if profile does not have a port, skip it - logger.info("No port configured for profile [{}], not binding", name); - continue; - } - + for (Map.Entry entry : buildProfileSettings().entrySet()) { // merge fallback settings with default settings with profile settings so we have complete settings with default values - Settings mergedSettings = Settings.builder() - .put(fallbackSettings) - .put(defaultSettings) - .put(profileSettings) - .build(); - - createServerBootstrap(name, mergedSettings); - bindServerBootstrap(name, mergedSettings); + final Settings settings = Settings.builder() + .put(createFallbackSettings()) + .put(entry.getValue()).build(); + createServerBootstrap(entry.getKey(), settings); + bindServer(entry.getKey(), settings); } } - if (pingSchedule.millis() > 0) { - threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, scheduledPing); - } + super.doStart(); success = true; } finally { if (success == false) { @@ -372,24 +189,18 @@ public class NettyTransport extends AbstractLifecycleComponent implem } } - @Override - public Map profileBoundAddresses() { - return unmodifiableMap(new HashMap<>(profileBoundAddresses)); - } - private ClientBootstrap createClientBootstrap() { - if (blockingClient) { clientBootstrap = new ClientBootstrap(new OioClientSocketChannelFactory( - Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX)))); + Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX)))); } else { int bossCount = NETTY_BOSS_COUNT.get(settings); clientBootstrap = new ClientBootstrap( - new NioClientSocketChannelFactory( - Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX)), - bossCount, - new NioWorkerPool(Executors.newCachedThreadPool( - daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX)), workerCount), + new NioClientSocketChannelFactory( + Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX)), + bossCount, + new NioWorkerPool(Executors.newCachedThreadPool( + daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX)), workerCount), new HashedWheelTimer(daemonThreadFactory(settings, "transport_client_timer")))); } clientBootstrap.setPipelineFactory(configureClientChannelPipelineFactory()); @@ -436,19 +247,19 @@ public class NettyTransport extends AbstractLifecycleComponent implem fallbackSettingsBuilder.put("tcp_no_delay", fallbackTcpNoDelay); boolean fallbackTcpKeepAlive = settings.getAsBoolean("transport.netty.tcp_keep_alive", TcpSettings.TCP_KEEP_ALIVE.get(settings)); - fallbackSettingsBuilder.put("tcp_keep_alive", fallbackTcpKeepAlive); + fallbackSettingsBuilder.put("tcp_keep_alive", fallbackTcpKeepAlive); boolean fallbackReuseAddress = settings.getAsBoolean("transport.netty.reuse_address", TcpSettings.TCP_REUSE_ADDRESS.get(settings)); fallbackSettingsBuilder.put("reuse_address", fallbackReuseAddress); ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", - TCP_SEND_BUFFER_SIZE.get(settings)); + TCP_SEND_BUFFER_SIZE.get(settings)); if (fallbackTcpSendBufferSize.bytes() >= 0) { fallbackSettingsBuilder.put("tcp_send_buffer_size", fallbackTcpSendBufferSize); } ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", - TCP_RECEIVE_BUFFER_SIZE.get(settings)); + TCP_RECEIVE_BUFFER_SIZE.get(settings)); if (fallbackTcpBufferSize.bytes() >= 0) { fallbackSettingsBuilder.put("tcp_receive_buffer_size", fallbackTcpBufferSize); } @@ -456,146 +267,6 @@ public class NettyTransport extends AbstractLifecycleComponent implem return fallbackSettingsBuilder.build(); } - private void bindServerBootstrap(final String name, final Settings settings) { - // Bind and start to accept incoming connections. - InetAddress hostAddresses[]; - String bindHosts[] = settings.getAsArray("bind_host", null); - try { - hostAddresses = networkService.resolveBindHostAddresses(bindHosts); - } catch (IOException e) { - throw new BindTransportException("Failed to resolve host " + Arrays.toString(bindHosts) + "", e); - } - if (logger.isDebugEnabled()) { - String[] addresses = new String[hostAddresses.length]; - for (int i = 0; i < hostAddresses.length; i++) { - addresses[i] = NetworkAddress.format(hostAddresses[i]); - } - logger.debug("binding server bootstrap to: {}", (Object)addresses); - } - - assert hostAddresses.length > 0; - - List boundAddresses = new ArrayList<>(); - for (InetAddress hostAddress : hostAddresses) { - boundAddresses.add(bindToPort(name, hostAddress, settings.get("port"))); - } - - final BoundTransportAddress boundTransportAddress = createBoundTransportAddress(name, settings, boundAddresses); - - if (TransportSettings.DEFAULT_PROFILE.equals(name)) { - this.boundAddress = boundTransportAddress; - } else { - profileBoundAddresses.put(name, boundTransportAddress); - } - } - - private InetSocketAddress bindToPort(final String name, final InetAddress hostAddress, String port) { - PortsRange portsRange = new PortsRange(port); - final AtomicReference lastException = new AtomicReference<>(); - final AtomicReference boundSocket = new AtomicReference<>(); - boolean success = portsRange.iterate(new PortsRange.PortCallback() { - @Override - public boolean onPortNumber(int portNumber) { - try { - Channel channel = serverBootstraps.get(name).bind(new InetSocketAddress(hostAddress, portNumber)); - synchronized (serverChannels) { - List list = serverChannels.get(name); - if (list == null) { - list = new ArrayList<>(); - serverChannels.put(name, list); - } - list.add(channel); - boundSocket.set((InetSocketAddress) channel.getLocalAddress()); - } - } catch (Exception e) { - lastException.set(e); - return false; - } - return true; - } - }); - if (!success) { - throw new BindTransportException("Failed to bind to [" + port + "]", lastException.get()); - } - - if (logger.isDebugEnabled()) { - logger.debug("Bound profile [{}] to address {{}}", name, NetworkAddress.format(boundSocket.get())); - } - - return boundSocket.get(); - } - - private BoundTransportAddress createBoundTransportAddress(String name, Settings profileSettings, - List boundAddresses) { - String[] boundAddressesHostStrings = new String[boundAddresses.size()]; - TransportAddress[] transportBoundAddresses = new TransportAddress[boundAddresses.size()]; - for (int i = 0; i < boundAddresses.size(); i++) { - InetSocketAddress boundAddress = boundAddresses.get(i); - boundAddressesHostStrings[i] = boundAddress.getHostString(); - transportBoundAddresses[i] = new InetSocketTransportAddress(boundAddress); - } - - final String[] publishHosts; - if (TransportSettings.DEFAULT_PROFILE.equals(name)) { - publishHosts = TransportSettings.PUBLISH_HOST.get(settings).toArray(Strings.EMPTY_ARRAY); - } else { - publishHosts = profileSettings.getAsArray("publish_host", boundAddressesHostStrings); - } - - final InetAddress publishInetAddress; - try { - publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts); - } catch (Exception e) { - throw new BindTransportException("Failed to resolve publish address", e); - } - - final int publishPort = resolvePublishPort(name, settings, profileSettings, boundAddresses, publishInetAddress); - final TransportAddress publishAddress = new InetSocketTransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); - return new BoundTransportAddress(transportBoundAddresses, publishAddress); - } - - // package private for tests - static int resolvePublishPort(String profileName, Settings settings, Settings profileSettings, List boundAddresses, - InetAddress publishInetAddress) { - int publishPort; - if (TransportSettings.DEFAULT_PROFILE.equals(profileName)) { - publishPort = TransportSettings.PUBLISH_PORT.get(settings); - } else { - publishPort = profileSettings.getAsInt("publish_port", -1); - } - - // if port not explicitly provided, search for port of address in boundAddresses that matches publishInetAddress - if (publishPort < 0) { - for (InetSocketAddress boundAddress : boundAddresses) { - InetAddress boundInetAddress = boundAddress.getAddress(); - if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { - publishPort = boundAddress.getPort(); - break; - } - } - } - - // if no matching boundAddress found, check if there is a unique port for all bound addresses - if (publishPort < 0) { - final IntSet ports = new IntHashSet(); - for (InetSocketAddress boundAddress : boundAddresses) { - ports.add(boundAddress.getPort()); - } - if (ports.size() == 1) { - publishPort = ports.iterator().next().value; - } - } - - if (publishPort < 0) { - String profileExplanation = TransportSettings.DEFAULT_PROFILE.equals(profileName) ? "" : " for profile " + profileName; - throw new BindTransportException("Failed to auto-resolve publish port" + profileExplanation + ", multiple bound addresses " + - boundAddresses + " with distinct ports and none of them matched the publish address (" + publishInetAddress + "). " + - "Please specify a unique port by setting " + TransportSettings.PORT.getKey() + " or " + - TransportSettings.PUBLISH_PORT.getKey()); - } - return publishPort; - } - private void createServerBootstrap(String name, Settings settings) { boolean blockingServer = TCP_BLOCKING_SERVER.get(settings); String port = settings.get("port"); @@ -609,10 +280,10 @@ public class NettyTransport extends AbstractLifecycleComponent implem if (logger.isDebugEnabled()) { logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], " - + "connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]", - name, workerCount, port, bindHost, publishHost, compress, connectTimeout, connectionsPerNodeRecovery, - connectionsPerNodeBulk, connectionsPerNodeReg, connectionsPerNodeState, connectionsPerNodePing, receivePredictorMin, - receivePredictorMax); + + "connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]", + name, workerCount, port, bindHost, publishHost, compress, connectTimeout, connectionsPerNodeRecovery, + connectionsPerNodeBulk, connectionsPerNodeReg, connectionsPerNodeState, connectionsPerNodePing, receivePredictorMin, + receivePredictorMax); } final ThreadFactory bossFactory = daemonThreadFactory(this.settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX, name); @@ -620,14 +291,14 @@ public class NettyTransport extends AbstractLifecycleComponent implem ServerBootstrap serverBootstrap; if (blockingServer) { serverBootstrap = new ServerBootstrap(new OioServerSocketChannelFactory( - Executors.newCachedThreadPool(bossFactory), - Executors.newCachedThreadPool(workerFactory) + Executors.newCachedThreadPool(bossFactory), + Executors.newCachedThreadPool(workerFactory) )); } else { serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory( - Executors.newCachedThreadPool(bossFactory), - Executors.newCachedThreadPool(workerFactory), - workerCount)); + Executors.newCachedThreadPool(bossFactory), + Executors.newCachedThreadPool(workerFactory), + workerCount)); } serverBootstrap.setPipelineFactory(configureServerChannelPipelineFactory(name, settings)); if (!"default".equals(tcpNoDelay)) { @@ -649,202 +320,8 @@ public class NettyTransport extends AbstractLifecycleComponent implem serverBootstraps.put(name, serverBootstrap); } - @Override - protected void doStop() { - final CountDownLatch latch = new CountDownLatch(1); - // make sure we run it on another thread than a possible IO handler thread - threadPool.generic().execute(new Runnable() { - @Override - public void run() { - globalLock.writeLock().lock(); - try { - for (Iterator it = connectedNodes.values().iterator(); it.hasNext(); ) { - NodeChannels nodeChannels = it.next(); - it.remove(); - nodeChannels.close(); - } - - Iterator>> serverChannelIterator = serverChannels.entrySet().iterator(); - while (serverChannelIterator.hasNext()) { - Map.Entry> serverChannelEntry = serverChannelIterator.next(); - String name = serverChannelEntry.getKey(); - List serverChannels = serverChannelEntry.getValue(); - for (Channel serverChannel : serverChannels) { - try { - serverChannel.close().awaitUninterruptibly(); - } catch (Throwable t) { - logger.debug("Error closing serverChannel for profile [{}]", t, name); - } - } - serverChannelIterator.remove(); - } - - if (serverOpenChannels != null) { - serverOpenChannels.close(); - serverOpenChannels = null; - } - - Iterator> serverBootstrapIterator = serverBootstraps.entrySet().iterator(); - while (serverBootstrapIterator.hasNext()) { - Map.Entry serverBootstrapEntry = serverBootstrapIterator.next(); - String name = serverBootstrapEntry.getKey(); - ServerBootstrap serverBootstrap = serverBootstrapEntry.getValue(); - - try { - serverBootstrap.releaseExternalResources(); - } catch (Throwable t) { - logger.debug("Error closing serverBootstrap for profile [{}]", t, name); - } - - serverBootstrapIterator.remove(); - } - - for (Iterator it = connectedNodes.values().iterator(); it.hasNext(); ) { - NodeChannels nodeChannels = it.next(); - it.remove(); - nodeChannels.close(); - } - - if (clientBootstrap != null) { - clientBootstrap.releaseExternalResources(); - clientBootstrap = null; - } - } finally { - globalLock.writeLock().unlock(); - latch.countDown(); - } - } - }); - - try { - latch.await(30, TimeUnit.SECONDS); - } catch (InterruptedException e) { - // ignore - } - } - - @Override - protected void doClose() { - } - - @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception { - return parse(address, settings.get("transport.profiles.default.port", TransportSettings.PORT.get(settings)), perAddressLimit); - } - - // this code is a take on guava's HostAndPort, like a HostAndPortRange - - // pattern for validating ipv6 bracket addresses. - // not perfect, but PortsRange should take care of any port range validation, not a regex - private static final Pattern BRACKET_PATTERN = Pattern.compile("^\\[(.*:.*)\\](?::([\\d\\-]*))?$"); - - /** parse a hostname+port range spec into its equivalent addresses */ - static TransportAddress[] parse(String hostPortString, String defaultPortRange, int perAddressLimit) throws UnknownHostException { - Objects.requireNonNull(hostPortString); - String host; - String portString = null; - - if (hostPortString.startsWith("[")) { - // Parse a bracketed host, typically an IPv6 literal. - Matcher matcher = BRACKET_PATTERN.matcher(hostPortString); - if (!matcher.matches()) { - throw new IllegalArgumentException("Invalid bracketed host/port range: " + hostPortString); - } - host = matcher.group(1); - portString = matcher.group(2); // could be null - } else { - int colonPos = hostPortString.indexOf(':'); - if (colonPos >= 0 && hostPortString.indexOf(':', colonPos + 1) == -1) { - // Exactly 1 colon. Split into host:port. - host = hostPortString.substring(0, colonPos); - portString = hostPortString.substring(colonPos + 1); - } else { - // 0 or 2+ colons. Bare hostname or IPv6 literal. - host = hostPortString; - // 2+ colons and not bracketed: exception - if (colonPos >= 0) { - throw new IllegalArgumentException("IPv6 addresses must be bracketed: " + hostPortString); - } - } - } - - // if port isn't specified, fill with the default - if (portString == null || portString.isEmpty()) { - portString = defaultPortRange; - } - - // generate address for each port in the range - Set addresses = new HashSet<>(Arrays.asList(InetAddress.getAllByName(host))); - List transportAddresses = new ArrayList<>(); - int[] ports = new PortsRange(portString).ports(); - int limit = Math.min(ports.length, perAddressLimit); - for (int i = 0; i < limit; i++) { - for (InetAddress address : addresses) { - transportAddresses.add(new InetSocketTransportAddress(address, ports[i])); - } - } - return transportAddresses.toArray(new TransportAddress[transportAddresses.size()]); - } - - @Override - public boolean addressSupported(Class address) { - return InetSocketTransportAddress.class.equals(address); - } - - @Override - public BoundTransportAddress boundAddress() { - return this.boundAddress; - } - - protected void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception { - if (!lifecycle.started()) { - // ignore - return; - } - if (isCloseConnectionException(e.getCause())) { - logger.trace("close connection exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), - ctx.getChannel()); - // close the channel, which will cause a node to be disconnected if relevant - ctx.getChannel().close(); - disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); - } else if (isConnectException(e.getCause())) { - logger.trace("connect exception caught on transport layer [{}]", e.getCause(), ctx.getChannel()); - // close the channel as safe measure, which will cause a node to be disconnected if relevant - ctx.getChannel().close(); - disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); - } else if (e.getCause() instanceof BindException) { - logger.trace("bind exception caught on transport layer [{}]", e.getCause(), ctx.getChannel()); - // close the channel as safe measure, which will cause a node to be disconnected if relevant - ctx.getChannel().close(); - disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); - } else if (e.getCause() instanceof CancelledKeyException) { - logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), - ctx.getChannel()); - // close the channel as safe measure, which will cause a node to be disconnected if relevant - ctx.getChannel().close(); - disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); - } else if (e.getCause() instanceof SizeHeaderFrameDecoder.HttpOnTransportException) { - // in case we are able to return data, serialize the exception content and sent it back to the client - if (ctx.getChannel().isOpen()) { - ChannelBuffer buffer = ChannelBuffers.wrappedBuffer(e.getCause().getMessage().getBytes(StandardCharsets.UTF_8)); - ChannelFuture channelFuture = ctx.getChannel().write(buffer); - channelFuture.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - future.getChannel().close(); - } - }); - } - } else { - logger.warn("exception caught on transport layer [{}], closing connection", e.getCause(), ctx.getChannel()); - // close the channel, which will cause a node to be disconnected if relevant - ctx.getChannel().close(); - disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); - } - } - - TransportAddress wrapAddress(SocketAddress socketAddress) { - return new InetSocketTransportAddress((InetSocketAddress) socketAddress); + protected final void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception { + onException(ctx.getChannel(), e.getCause()); } @Override @@ -853,153 +330,6 @@ public class NettyTransport extends AbstractLifecycleComponent implem return channels == null ? 0 : channels.numberOfOpenChannels(); } - @Override - public List getLocalAddresses() { - List local = new ArrayList<>(); - local.add("127.0.0.1"); - // check if v6 is supported, if so, v4 will also work via mapped addresses. - if (NetworkUtils.SUPPORTS_V6) { - local.add("[::1]"); // may get ports appended! - } - return local; - } - - @Override - public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, - TransportRequestOptions options) throws IOException, TransportException { - - Channel targetChannel = nodeChannel(node, options); - - if (compress) { - options = TransportRequestOptions.builder(options).withCompress(true).build(); - } - - byte status = 0; - status = TransportStatus.setRequest(status); - - ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); - boolean addedReleaseListener = false; - try { - bStream.skip(NettyHeader.HEADER_SIZE); - StreamOutput stream = bStream; - // only compress if asked, and, the request is not bytes, since then only - // the header part is compressed, and the "body" can't be extracted as compressed - if (options.compress() && (!(request instanceof BytesTransportRequest))) { - status = TransportStatus.setCompress(status); - stream = CompressorFactory.COMPRESSOR.streamOutput(stream); - } - - // we pick the smallest of the 2, to support both backward and forward compatibility - // note, this is the only place we need to do this, since from here on, we use the serialized version - // as the version to use also when the node receiving this request will send the response with - Version version = Version.smallest(getCurrentVersion(), node.getVersion()); - - stream.setVersion(version); - threadPool.getThreadContext().writeTo(stream); - stream.writeString(action); - - ReleasablePagedBytesReference bytes; - ChannelBuffer buffer; - // it might be nice to somehow generalize this optimization, maybe a smart "paged" bytes output - // that create paged channel buffers, but its tricky to know when to do it (where this option is - // more explicit). - if (request instanceof BytesTransportRequest) { - BytesTransportRequest bRequest = (BytesTransportRequest) request; - assert node.getVersion().equals(bRequest.version()); - bRequest.writeThin(stream); - stream.close(); - bytes = bStream.bytes(); - ChannelBuffer headerBuffer = NettyUtils.toChannelBuffer(bytes); - ChannelBuffer contentBuffer = NettyUtils.toChannelBuffer(bRequest.bytes()); - buffer = ChannelBuffers.wrappedBuffer(NettyUtils.DEFAULT_GATHERING, headerBuffer, contentBuffer); - } else { - request.writeTo(stream); - stream.close(); - bytes = bStream.bytes(); - buffer = NettyUtils.toChannelBuffer(bytes); - } - NettyHeader.writeHeader(buffer, requestId, status, version); - ChannelFuture future = targetChannel.write(buffer); - ReleaseChannelFutureListener listener = new ReleaseChannelFutureListener(bytes); - future.addListener(listener); - addedReleaseListener = true; - final TransportRequestOptions finalOptions = options; - ChannelFutureListener channelFutureListener = - f -> transportServiceAdapter.onRequestSent(node, requestId, action, request, finalOptions); - future.addListener(channelFutureListener); - } finally { - if (!addedReleaseListener) { - Releasables.close(bStream.bytes()); - } - } - } - - @Override - public boolean nodeConnected(DiscoveryNode node) { - return connectedNodes.containsKey(node); - } - - @Override - public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException { - connectToNode(node, true); - } - - @Override - public void connectToNode(DiscoveryNode node) { - connectToNode(node, false); - } - - public void connectToNode(DiscoveryNode node, boolean light) { - if (!lifecycle.started()) { - throw new IllegalStateException("can't add nodes to a stopped transport"); - } - if (node == null) { - throw new ConnectTransportException(null, "can't connect to a null node"); - } - globalLock.readLock().lock(); - try { - - try (Releasable ignored = connectionLock.acquire(node.getId())) { - if (!lifecycle.started()) { - throw new IllegalStateException("can't add nodes to a stopped transport"); - } - NodeChannels nodeChannels = connectedNodes.get(node); - if (nodeChannels != null) { - return; - } - try { - if (light) { - nodeChannels = connectToChannelsLight(node); - } else { - nodeChannels = new NodeChannels(new Channel[connectionsPerNodeRecovery], new Channel[connectionsPerNodeBulk], - new Channel[connectionsPerNodeReg], new Channel[connectionsPerNodeState], - new Channel[connectionsPerNodePing]); - try { - connectToChannels(nodeChannels, node); - } catch (Throwable e) { - logger.trace("failed to connect to [{}], cleaning dangling connections", e, node); - nodeChannels.close(); - throw e; - } - } - // we acquire a connection lock, so no way there is an existing connection - nodeChannels.start(); - connectedNodes.put(node, nodeChannels); - if (logger.isDebugEnabled()) { - logger.debug("connected to node [{}]", node); - } - transportServiceAdapter.raiseNodeConnected(node); - } catch (ConnectTransportException e) { - throw e; - } catch (Exception e) { - throw new ConnectTransportException(node, "general node connection failure", e); - } - } - } finally { - globalLock.readLock().unlock(); - } - } - protected NodeChannels connectToChannelsLight(DiscoveryNode node) { InetSocketAddress address = ((InetSocketTransportAddress) node.getAddress()).address(); ChannelFuture connect = clientBootstrap.connect(address); @@ -1013,176 +343,117 @@ public class NettyTransport extends AbstractLifecycleComponent implem return new NodeChannels(channels, channels, channels, channels, channels); } - protected void connectToChannels(NodeChannels nodeChannels, DiscoveryNode node) { - ChannelFuture[] connectRecovery = new ChannelFuture[nodeChannels.recovery.length]; - ChannelFuture[] connectBulk = new ChannelFuture[nodeChannels.bulk.length]; - ChannelFuture[] connectReg = new ChannelFuture[nodeChannels.reg.length]; - ChannelFuture[] connectState = new ChannelFuture[nodeChannels.state.length]; - ChannelFuture[] connectPing = new ChannelFuture[nodeChannels.ping.length]; - InetSocketAddress address = ((InetSocketTransportAddress) node.getAddress()).address(); - for (int i = 0; i < connectRecovery.length; i++) { - connectRecovery[i] = clientBootstrap.connect(address); - } - for (int i = 0; i < connectBulk.length; i++) { - connectBulk[i] = clientBootstrap.connect(address); - } - for (int i = 0; i < connectReg.length; i++) { - connectReg[i] = clientBootstrap.connect(address); - } - for (int i = 0; i < connectState.length; i++) { - connectState[i] = clientBootstrap.connect(address); - } - for (int i = 0; i < connectPing.length; i++) { - connectPing[i] = clientBootstrap.connect(address); - } - + protected NodeChannels connectToChannels(DiscoveryNode node) { + final NodeChannels nodeChannels = new NodeChannels(new Channel[connectionsPerNodeRecovery], new Channel[connectionsPerNodeBulk], + new Channel[connectionsPerNodeReg], new Channel[connectionsPerNodeState], + new Channel[connectionsPerNodePing]); + boolean success = false; try { + ChannelFuture[] connectRecovery = new ChannelFuture[nodeChannels.recovery.length]; + ChannelFuture[] connectBulk = new ChannelFuture[nodeChannels.bulk.length]; + ChannelFuture[] connectReg = new ChannelFuture[nodeChannels.reg.length]; + ChannelFuture[] connectState = new ChannelFuture[nodeChannels.state.length]; + ChannelFuture[] connectPing = new ChannelFuture[nodeChannels.ping.length]; + InetSocketAddress address = ((InetSocketTransportAddress) node.getAddress()).address(); for (int i = 0; i < connectRecovery.length; i++) { - connectRecovery[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5)); - if (!connectRecovery[i].isSuccess()) { - throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectRecovery[i].getCause()); - } - nodeChannels.recovery[i] = connectRecovery[i].getChannel(); - nodeChannels.recovery[i].getCloseFuture().addListener(new ChannelCloseListener(node)); + connectRecovery[i] = clientBootstrap.connect(address); } - for (int i = 0; i < connectBulk.length; i++) { - connectBulk[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5)); - if (!connectBulk[i].isSuccess()) { - throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectBulk[i].getCause()); - } - nodeChannels.bulk[i] = connectBulk[i].getChannel(); - nodeChannels.bulk[i].getCloseFuture().addListener(new ChannelCloseListener(node)); + connectBulk[i] = clientBootstrap.connect(address); } - for (int i = 0; i < connectReg.length; i++) { - connectReg[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5)); - if (!connectReg[i].isSuccess()) { - throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectReg[i].getCause()); - } - nodeChannels.reg[i] = connectReg[i].getChannel(); - nodeChannels.reg[i].getCloseFuture().addListener(new ChannelCloseListener(node)); + connectReg[i] = clientBootstrap.connect(address); } - for (int i = 0; i < connectState.length; i++) { - connectState[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5)); - if (!connectState[i].isSuccess()) { - throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectState[i].getCause()); - } - nodeChannels.state[i] = connectState[i].getChannel(); - nodeChannels.state[i].getCloseFuture().addListener(new ChannelCloseListener(node)); + connectState[i] = clientBootstrap.connect(address); } - for (int i = 0; i < connectPing.length; i++) { - connectPing[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5)); - if (!connectPing[i].isSuccess()) { - throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectPing[i].getCause()); - } - nodeChannels.ping[i] = connectPing[i].getChannel(); - nodeChannels.ping[i].getCloseFuture().addListener(new ChannelCloseListener(node)); + connectPing[i] = clientBootstrap.connect(address); } - if (nodeChannels.recovery.length == 0) { - if (nodeChannels.bulk.length > 0) { - nodeChannels.recovery = nodeChannels.bulk; - } else { - nodeChannels.recovery = nodeChannels.reg; + try { + for (int i = 0; i < connectRecovery.length; i++) { + connectRecovery[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5)); + if (!connectRecovery[i].isSuccess()) { + throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectRecovery[i].getCause()); + } + nodeChannels.recovery[i] = connectRecovery[i].getChannel(); + nodeChannels.recovery[i].getCloseFuture().addListener(new ChannelCloseListener(node)); } - } - if (nodeChannels.bulk.length == 0) { - nodeChannels.bulk = nodeChannels.reg; - } - } catch (RuntimeException e) { - // clean the futures - List futures = new ArrayList<>(); - futures.addAll(Arrays.asList(connectRecovery)); - futures.addAll(Arrays.asList(connectBulk)); - futures.addAll(Arrays.asList(connectReg)); - futures.addAll(Arrays.asList(connectState)); - futures.addAll(Arrays.asList(connectPing)); - for (ChannelFuture future : Collections.unmodifiableList(futures)) { - future.cancel(); - if (future.getChannel() != null && future.getChannel().isOpen()) { - try { - future.getChannel().close(); - } catch (Exception e1) { - // ignore + + for (int i = 0; i < connectBulk.length; i++) { + connectBulk[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5)); + if (!connectBulk[i].isSuccess()) { + throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectBulk[i].getCause()); + } + nodeChannels.bulk[i] = connectBulk[i].getChannel(); + nodeChannels.bulk[i].getCloseFuture().addListener(new ChannelCloseListener(node)); + } + + for (int i = 0; i < connectReg.length; i++) { + connectReg[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5)); + if (!connectReg[i].isSuccess()) { + throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectReg[i].getCause()); + } + nodeChannels.reg[i] = connectReg[i].getChannel(); + nodeChannels.reg[i].getCloseFuture().addListener(new ChannelCloseListener(node)); + } + + for (int i = 0; i < connectState.length; i++) { + connectState[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5)); + if (!connectState[i].isSuccess()) { + throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectState[i].getCause()); + } + nodeChannels.state[i] = connectState[i].getChannel(); + nodeChannels.state[i].getCloseFuture().addListener(new ChannelCloseListener(node)); + } + + for (int i = 0; i < connectPing.length; i++) { + connectPing[i].awaitUninterruptibly((long) (connectTimeout.millis() * 1.5)); + if (!connectPing[i].isSuccess()) { + throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", connectPing[i].getCause()); + } + nodeChannels.ping[i] = connectPing[i].getChannel(); + nodeChannels.ping[i].getCloseFuture().addListener(new ChannelCloseListener(node)); + } + + if (nodeChannels.recovery.length == 0) { + if (nodeChannels.bulk.length > 0) { + nodeChannels.recovery = nodeChannels.bulk; + } else { + nodeChannels.recovery = nodeChannels.reg; } } - } - throw e; - } - } - - @Override - public void disconnectFromNode(DiscoveryNode node) { - - try (Releasable ignored = connectionLock.acquire(node.getId())) { - NodeChannels nodeChannels = connectedNodes.remove(node); - if (nodeChannels != null) { - try { - logger.debug("disconnecting from [{}] due to explicit disconnect call", node); - nodeChannels.close(); - } finally { - logger.trace("disconnected from [{}] due to explicit disconnect call", node); - transportServiceAdapter.raiseNodeDisconnected(node); + if (nodeChannels.bulk.length == 0) { + nodeChannels.bulk = nodeChannels.reg; } - } - } - } - - /** - * Disconnects from a node, only if the relevant channel is found to be part of the node channels. - */ - protected boolean disconnectFromNode(DiscoveryNode node, Channel channel, String reason) { - // this might be called multiple times from all the node channels, so do a lightweight - // check outside of the lock - NodeChannels nodeChannels = connectedNodes.get(node); - if (nodeChannels != null && nodeChannels.hasChannel(channel)) { - try (Releasable ignored = connectionLock.acquire(node.getId())) { - nodeChannels = connectedNodes.get(node); - // check again within the connection lock, if its still applicable to remove it - if (nodeChannels != null && nodeChannels.hasChannel(channel)) { - connectedNodes.remove(node); - try { - logger.debug("disconnecting from [{}], {}", node, reason); - nodeChannels.close(); - } finally { - logger.trace("disconnected from [{}], {}", node, reason); - transportServiceAdapter.raiseNodeDisconnected(node); - } - return true; - } - } - } - return false; - } - - /** - * Disconnects from a node if a channel is found as part of that nodes channels. - */ - protected void disconnectFromNodeChannel(final Channel channel, final Throwable failure) { - threadPool().generic().execute(new Runnable() { - - @Override - public void run() { - for (DiscoveryNode node : connectedNodes.keySet()) { - if (disconnectFromNode(node, channel, ExceptionsHelper.detailedMessage(failure))) { - // if we managed to find this channel and disconnect from it, then break, no need to check on - // the rest of the nodes - break; + } catch (RuntimeException e) { + // clean the futures + List futures = new ArrayList<>(); + futures.addAll(Arrays.asList(connectRecovery)); + futures.addAll(Arrays.asList(connectBulk)); + futures.addAll(Arrays.asList(connectReg)); + futures.addAll(Arrays.asList(connectState)); + futures.addAll(Arrays.asList(connectPing)); + for (ChannelFuture future : Collections.unmodifiableList(futures)) { + future.cancel(); + if (future.getChannel() != null && future.getChannel().isOpen()) { + try { + future.getChannel().close(); + } catch (Exception e1) { + // ignore + } } } + throw e; + } + success = true; + } finally { + if (success == false) { + nodeChannels.close(); } - }); - } - - protected Channel nodeChannel(DiscoveryNode node, TransportRequestOptions options) throws ConnectTransportException { - NodeChannels nodeChannels = connectedNodes.get(node); - if (nodeChannels == null) { - throw new NodeNotConnectedException(node, "Node not connected"); } - return nodeChannels.channel(options.type()); + return nodeChannels; } public ChannelPipelineFactory configureClientChannelPipelineFactory() { @@ -1212,7 +483,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem } channelPipeline.addLast("size", sizeHeader); // using a dot as a prefix means, this cannot come from any settings parsed - channelPipeline.addLast("dispatcher", new MessageChannelHandler(nettyTransport, nettyTransport.logger, ".client")); + channelPipeline.addLast("dispatcher", new NettyMessageChannelHandler(nettyTransport, ".client")); return channelPipeline; } } @@ -1249,7 +520,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem sizeHeader.setMaxCumulationBufferComponents(nettyTransport.maxCompositeBufferComponents); } channelPipeline.addLast("size", sizeHeader); - channelPipeline.addLast("dispatcher", new MessageChannelHandler(nettyTransport, nettyTransport.logger, name)); + channelPipeline.addLast("dispatcher", new NettyMessageChannelHandler(nettyTransport, name)); return channelPipeline; } } @@ -1266,142 +537,169 @@ public class NettyTransport extends AbstractLifecycleComponent implem public void operationComplete(final ChannelFuture future) throws Exception { NodeChannels nodeChannels = connectedNodes.get(node); if (nodeChannels != null && nodeChannels.hasChannel(future.getChannel())) { - threadPool().generic().execute(new Runnable() { - @Override - public void run() { - disconnectFromNode(node, future.getChannel(), "channel closed event"); - } - }); + threadPool.generic().execute(() -> disconnectFromNode(node, future.getChannel(), "channel closed event")); } } } - public static class NodeChannels { - - List allChannels = Collections.emptyList(); - private Channel[] recovery; - private final AtomicInteger recoveryCounter = new AtomicInteger(); - private Channel[] bulk; - private final AtomicInteger bulkCounter = new AtomicInteger(); - private Channel[] reg; - private final AtomicInteger regCounter = new AtomicInteger(); - private Channel[] state; - private final AtomicInteger stateCounter = new AtomicInteger(); - private Channel[] ping; - private final AtomicInteger pingCounter = new AtomicInteger(); - - public NodeChannels(Channel[] recovery, Channel[] bulk, Channel[] reg, Channel[] state, Channel[] ping) { - this.recovery = recovery; - this.bulk = bulk; - this.reg = reg; - this.state = state; - this.ping = ping; - } - - public void start() { - List newAllChannels = new ArrayList<>(); - newAllChannels.addAll(Arrays.asList(recovery)); - newAllChannels.addAll(Arrays.asList(bulk)); - newAllChannels.addAll(Arrays.asList(reg)); - newAllChannels.addAll(Arrays.asList(state)); - newAllChannels.addAll(Arrays.asList(ping)); - this.allChannels = Collections.unmodifiableList(newAllChannels); - } - - public boolean hasChannel(Channel channel) { - for (Channel channel1 : allChannels) { - if (channel.equals(channel1)) { - return true; - } - } - return false; - } - - public Channel channel(TransportRequestOptions.Type type) { - if (type == TransportRequestOptions.Type.REG) { - return reg[Math.floorMod(regCounter.incrementAndGet(), reg.length)]; - } else if (type == TransportRequestOptions.Type.STATE) { - return state[Math.floorMod(stateCounter.incrementAndGet(), state.length)]; - } else if (type == TransportRequestOptions.Type.PING) { - return ping[Math.floorMod(pingCounter.incrementAndGet(), ping.length)]; - } else if (type == TransportRequestOptions.Type.BULK) { - return bulk[Math.floorMod(bulkCounter.incrementAndGet(), bulk.length)]; - } else if (type == TransportRequestOptions.Type.RECOVERY) { - return recovery[Math.floorMod(recoveryCounter.incrementAndGet(), recovery.length)]; - } else { - throw new IllegalArgumentException("no type channel for [" + type + "]"); - } - } - - public synchronized void close() { - List futures = new ArrayList<>(); - for (Channel channel : allChannels) { + protected void sendMessage(Channel channel, BytesReference reference, Runnable sendListener, boolean close) { + final ChannelFuture future = channel.write(NettyUtils.toChannelBuffer(reference)); + if (close) { + future.addListener(f -> { try { - if (channel != null && channel.isOpen()) { - futures.add(channel.close()); - } - } catch (Exception e) { - //ignore + sendListener.run(); + } finally { + f.getChannel().close(); } - } - for (ChannelFuture future : futures) { - future.awaitUninterruptibly(); - } + }); + } else { + future.addListener(future1 -> sendListener.run()); } } - class ScheduledPing extends AbstractLifecycleRunnable { + @Override + protected void closeChannels(List channels) { + List futures = new ArrayList<>(); - final CounterMetric successfulPings = new CounterMetric(); - final CounterMetric failedPings = new CounterMetric(); - - public ScheduledPing() { - super(lifecycle, logger); - } - - @Override - protected void doRunInLifecycle() throws Exception { - for (Map.Entry entry : connectedNodes.entrySet()) { - DiscoveryNode node = entry.getKey(); - NodeChannels channels = entry.getValue(); - for (Channel channel : channels.allChannels) { - try { - ChannelFuture future = channel.write(NettyHeader.pingHeader()); - future.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - successfulPings.inc(); - } - }); - } catch (Throwable t) { - if (channel.isOpen()) { - logger.debug("[{}] failed to send ping transport message", t, node); - failedPings.inc(); - } else { - logger.trace("[{}] failed to send ping transport message (channel closed)", t, node); - } - } + for (Channel channel : channels) { + try { + if (channel != null && channel.isOpen()) { + futures.add(channel.close()); } + } catch (Exception e) { + logger.trace("failed to close channel", e); } } - - @Override - protected void onAfterInLifecycle() { - threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, this); + for (ChannelFuture future : futures) { + future.awaitUninterruptibly(); } + } - @Override - public void onFailure(Throwable t) { - if (lifecycle.stoppedOrClosed()) { - logger.trace("failed to send ping transport message", t); + @Override + protected InetSocketAddress getLocalAddress(Channel channel) { + return (InetSocketAddress) channel.getLocalAddress(); + } + + @Override + protected Channel bind(String name, InetSocketAddress address) { + return serverBootstraps.get(name).bind(address); + } + + ScheduledPing getPing() { + return scheduledPing; + } + + @Override + protected boolean isOpen(Channel channel) { + return channel.isOpen(); + } + + @Override + protected void stopInternal() { + Releasables.close(serverOpenChannels, () ->{ + for (Map.Entry entry : serverBootstraps.entrySet()) { + String name = entry.getKey(); + ServerBootstrap serverBootstrap = entry.getValue(); + try { + serverBootstrap.releaseExternalResources(); + } catch (Throwable t) { + logger.debug("Error closing serverBootstrap for profile [{}]", t, name); + } + } + serverBootstraps.clear(); + if (clientBootstrap != null) { + clientBootstrap.releaseExternalResources(); + clientBootstrap = null; + } + }); + } + + @Override + public Message prepareSend(Version nodeVersion, TransportMessage message, StreamOutput stream, + ReleasableBytesStream writtenBytes) throws IOException { + // it might be nice to somehow generalize this optimization, maybe a smart "paged" bytes output + // that create paged channel buffers, but its tricky to know when to do it (where this option is + // more explicit). + if (message instanceof BytesTransportRequest) { + BytesTransportRequest bRequest = (BytesTransportRequest) message; + assert nodeVersion.equals(bRequest.version()); + bRequest.writeThin(stream); + stream.close(); + ReleasablePagedBytesReference bytes = writtenBytes.bytes(); + ChannelBuffer headerBuffer = NettyUtils.toChannelBuffer(bytes); + ChannelBuffer contentBuffer = NettyUtils.toChannelBuffer(bRequest.bytes()); + ChannelBuffer buffer = ChannelBuffers.wrappedBuffer(NettyUtils.DEFAULT_GATHERING, headerBuffer, contentBuffer); + return new NettyMessage(buffer); } else { - logger.warn("failed to send ping transport message", t); + return super.prepareSend(nodeVersion, message, stream, writtenBytes); } + } + + @Override + public Message prepareSend(Version nodeVersion, BytesReference bytesReference) { + return new NettyMessage(NettyUtils.toChannelBuffer(bytesReference)); + } + + @Override + public boolean canCompress(TransportRequest request) { + return super.canCompress(request) && (!(request instanceof BytesTransportRequest)); + } + + private class NettyMessage implements Message { + private final ChannelBuffer buffer; + + public NettyMessage(ChannelBuffer buffer) { + this.buffer = buffer; + } + + public StreamOutput getHeaderOutput() { + return new ChannelBufferStreamOutput(buffer); + } + + public int size() { + return buffer.readableBytes(); + } + + @Override + public void send(Channel channel, Runnable onRequestSent) { + ChannelFuture future = channel.write(buffer); + ChannelFutureListener channelFutureListener = f -> onRequestSent.run(); + future.addListener(channelFutureListener); } } - protected Version getCurrentVersion() { - // this is just for tests to mock stuff like the nodes version - tests can override this internally - return Version.CURRENT; + private final static class ChannelBufferStreamOutput extends StreamOutput { + + private final ChannelBuffer buffer; + private int offset; + + public ChannelBufferStreamOutput(ChannelBuffer buffer) { + this.buffer = buffer; + this.offset = buffer.readerIndex(); + } + + @Override + public void writeByte(byte b) throws IOException { + buffer.setByte(offset++, b); + } + + @Override + public void writeBytes(byte[] b, int offset, int length) throws IOException { + buffer.setBytes(this.offset, b, offset, length); + this.offset += length; + } + + @Override + public void flush() throws IOException { + } + + @Override + public void close() throws IOException { + } + + @Override + public void reset() throws IOException { + throw new UnsupportedOperationException(); + } } } diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java deleted file mode 100644 index 0d5666408ea..00000000000 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.netty; - -import org.elasticsearch.Version; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.netty.NettyUtils; -import org.elasticsearch.common.netty.ReleaseChannelFutureListener; -import org.elasticsearch.transport.RemoteTransportException; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseOptions; -import org.elasticsearch.transport.TransportServiceAdapter; -import org.elasticsearch.transport.support.TransportStatus; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.channel.Channel; -import org.jboss.netty.channel.ChannelFuture; -import org.jboss.netty.channel.ChannelFutureListener; - -import java.io.IOException; -import java.util.concurrent.atomic.AtomicBoolean; - -public class NettyTransportChannel implements TransportChannel { - - private final NettyTransport transport; - private final TransportServiceAdapter transportServiceAdapter; - private final Version version; - private final String action; - private final Channel channel; - private final long requestId; - private final String profileName; - private final long reservedBytes; - private final AtomicBoolean released = new AtomicBoolean(); - - public NettyTransportChannel(NettyTransport transport, TransportServiceAdapter transportServiceAdapter, String action, Channel channel, - long requestId, Version version, String profileName, long reservedBytes) { - this.transportServiceAdapter = transportServiceAdapter; - this.version = version; - this.transport = transport; - this.action = action; - this.channel = channel; - this.requestId = requestId; - this.profileName = profileName; - this.reservedBytes = reservedBytes; - } - - @Override - public String getProfileName() { - return profileName; - } - - @Override - public String action() { - return this.action; - } - - @Override - public void sendResponse(TransportResponse response) throws IOException { - sendResponse(response, TransportResponseOptions.EMPTY); - } - - @Override - public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException { - release(); - if (transport.compress) { - options = TransportResponseOptions.builder(options).withCompress(transport.compress).build(); - } - - byte status = 0; - status = TransportStatus.setResponse(status); - - ReleasableBytesStreamOutput bStream = null; - boolean addedReleaseListener = false; - try { - bStream = new ReleasableBytesStreamOutput(transport.bigArrays); - bStream.skip(NettyHeader.HEADER_SIZE); - StreamOutput stream = bStream; - if (options.compress()) { - status = TransportStatus.setCompress(status); - stream = CompressorFactory.COMPRESSOR.streamOutput(stream); - } - stream.setVersion(version); - response.writeTo(stream); - stream.close(); - - ReleasablePagedBytesReference bytes = bStream.bytes(); - ChannelBuffer buffer = NettyUtils.toChannelBuffer(bytes); - NettyHeader.writeHeader(buffer, requestId, status, version); - ChannelFuture future = channel.write(buffer); - ReleaseChannelFutureListener listener = new ReleaseChannelFutureListener(bytes); - future.addListener(listener); - addedReleaseListener = true; - final TransportResponseOptions finalOptions = options; - ChannelFutureListener onResponseSentListener = - f -> transportServiceAdapter.onResponseSent(requestId, action, response, finalOptions); - future.addListener(onResponseSentListener); - } finally { - if (!addedReleaseListener && bStream != null) { - Releasables.close(bStream.bytes()); - } - } - } - - @Override - public void sendResponse(Throwable error) throws IOException { - release(); - BytesStreamOutput stream = new BytesStreamOutput(); - stream.skip(NettyHeader.HEADER_SIZE); - RemoteTransportException tx = new RemoteTransportException( - transport.nodeName(), transport.wrapAddress(channel.getLocalAddress()), action, error); - stream.writeThrowable(tx); - byte status = 0; - status = TransportStatus.setResponse(status); - status = TransportStatus.setError(status); - - BytesReference bytes = stream.bytes(); - ChannelBuffer buffer = NettyUtils.toChannelBuffer(bytes); - NettyHeader.writeHeader(buffer, requestId, status, version); - ChannelFuture future = channel.write(buffer); - ChannelFutureListener onResponseSentListener = - f -> transportServiceAdapter.onResponseSent(requestId, action, error); - future.addListener(onResponseSentListener); - } - - private void release() { - // attempt to release once atomically - if (released.compareAndSet(false, true) == false) { - throw new IllegalStateException("reserved bytes are already released"); - } - transport.inFlightRequestsBreaker().addWithoutBreaking(-reservedBytes); - } - - @Override - public long getRequestId() { - return requestId; - } - - @Override - public String getChannelType() { - return "netty"; - } - - /** - * Returns the underlying netty channel. This method is intended be used for access to netty to get additional - * details when processing the request and may be used by plugins. Responses should be sent using the methods - * defined in this class and not directly on the channel. - * @return underlying netty channel - */ - public Channel getChannel() { - return channel; - } - -} diff --git a/core/src/main/java/org/elasticsearch/common/netty/NettyUtils.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyUtils.java similarity index 90% rename from core/src/main/java/org/elasticsearch/common/netty/NettyUtils.java rename to core/src/main/java/org/elasticsearch/transport/netty/NettyUtils.java index c37ca3ad6fb..f3fdde5e91c 100644 --- a/core/src/main/java/org/elasticsearch/common/netty/NettyUtils.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyUtils.java @@ -16,12 +16,12 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.common.netty; +package org.elasticsearch.transport.netty; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.transport.netty.NettyInternalESLoggerFactory; +import org.elasticsearch.common.logging.Loggers; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.logging.InternalLogger; @@ -93,10 +93,11 @@ public class NettyUtils { } static { - InternalLoggerFactory.setDefaultFactory(new NettyInternalESLoggerFactory() { + InternalLoggerFactory.setDefaultFactory(new InternalLoggerFactory() { @Override public InternalLogger newInstance(String name) { - return super.newInstance(name.replace("org.jboss.netty.", "netty.").replace("org.jboss.netty.", "netty.")); + name = name.replace("org.jboss.netty.", "netty.").replace("org.jboss.netty.", "netty."); + return new NettyInternalESLogger(Loggers.getLogger(name)); } }); @@ -136,6 +137,13 @@ public class NettyUtils { * Wraps the given ChannelBuffer with a BytesReference */ public static BytesReference toBytesReference(ChannelBuffer channelBuffer) { - return new ChannelBufferBytesReference(channelBuffer); + return toBytesReference(channelBuffer, channelBuffer.readableBytes()); + } + + /** + * Wraps the given ChannelBuffer with a BytesReference of a given size + */ + public static BytesReference toBytesReference(ChannelBuffer channelBuffer, int size) { + return new ChannelBufferBytesReference(channelBuffer, size); } } diff --git a/core/src/main/java/org/elasticsearch/common/netty/OpenChannelsHandler.java b/core/src/main/java/org/elasticsearch/transport/netty/OpenChannelsHandler.java similarity index 95% rename from core/src/main/java/org/elasticsearch/common/netty/OpenChannelsHandler.java rename to core/src/main/java/org/elasticsearch/transport/netty/OpenChannelsHandler.java index 324db75dc90..df7cd73a42b 100644 --- a/core/src/main/java/org/elasticsearch/common/netty/OpenChannelsHandler.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/OpenChannelsHandler.java @@ -17,8 +17,9 @@ * under the License. */ -package org.elasticsearch.common.netty; +package org.elasticsearch.transport.netty; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -32,13 +33,14 @@ import org.jboss.netty.channel.ChannelState; import org.jboss.netty.channel.ChannelStateEvent; import org.jboss.netty.channel.ChannelUpstreamHandler; +import java.io.Closeable; import java.util.Set; /** * */ @ChannelHandler.Sharable -public class OpenChannelsHandler implements ChannelUpstreamHandler { +public class OpenChannelsHandler implements ChannelUpstreamHandler, Releasable { final Set openChannels = ConcurrentCollections.newConcurrentSet(); final CounterMetric openChannelsMetric = new CounterMetric(); @@ -91,6 +93,7 @@ public class OpenChannelsHandler implements ChannelUpstreamHandler { return totalChannelsMetric.count(); } + @Override public void close() { for (Channel channel : openChannels) { channel.close().awaitUninterruptibly(); diff --git a/core/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java b/core/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java index 9c410e4b912..d098fae6c78 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java @@ -19,107 +19,29 @@ package org.elasticsearch.transport.netty; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.monitor.jvm.JvmInfo; -import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.TcpHeader; +import org.elasticsearch.transport.TcpTransport; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelHandlerContext; import org.jboss.netty.handler.codec.frame.FrameDecoder; import org.jboss.netty.handler.codec.frame.TooLongFrameException; -import java.io.IOException; -import java.io.StreamCorruptedException; - /** */ -public class SizeHeaderFrameDecoder extends FrameDecoder { - - private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().bytes() * 0.9); +final class SizeHeaderFrameDecoder extends FrameDecoder { @Override protected Object decode(ChannelHandlerContext ctx, Channel channel, ChannelBuffer buffer) throws Exception { - final int sizeHeaderLength = NettyHeader.MARKER_BYTES_SIZE + NettyHeader.MESSAGE_LENGTH_SIZE; - if (buffer.readableBytes() < sizeHeaderLength) { + try { + boolean continueProcessing = TcpTransport.validateMessageHeader(NettyUtils.toBytesReference(buffer)); + buffer.skipBytes(TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE); + return continueProcessing ? buffer : null; + } catch (IllegalArgumentException ex) { + throw new TooLongFrameException(ex.getMessage(), ex); + } catch (IllegalStateException ex) { return null; } - - int readerIndex = buffer.readerIndex(); - if (buffer.getByte(readerIndex) != 'E' || buffer.getByte(readerIndex + 1) != 'S') { - // special handling for what is probably HTTP - if (bufferStartsWith(buffer, readerIndex, "GET ") || - bufferStartsWith(buffer, readerIndex, "POST ") || - bufferStartsWith(buffer, readerIndex, "PUT ") || - bufferStartsWith(buffer, readerIndex, "HEAD ") || - bufferStartsWith(buffer, readerIndex, "DELETE ") || - bufferStartsWith(buffer, readerIndex, "OPTIONS ") || - bufferStartsWith(buffer, readerIndex, "PATCH ") || - bufferStartsWith(buffer, readerIndex, "TRACE ")) { - - throw new HttpOnTransportException("This is not a HTTP port"); - } - - // we have 6 readable bytes, show 4 (should be enough) - throw new StreamCorruptedException("invalid internal transport message format, got (" - + Integer.toHexString(buffer.getByte(readerIndex) & 0xFF) + "," - + Integer.toHexString(buffer.getByte(readerIndex + 1) & 0xFF) + "," - + Integer.toHexString(buffer.getByte(readerIndex + 2) & 0xFF) + "," - + Integer.toHexString(buffer.getByte(readerIndex + 3) & 0xFF) + ")"); - } - - int dataLen = buffer.getInt(buffer.readerIndex() + NettyHeader.MARKER_BYTES_SIZE); - if (dataLen == NettyHeader.PING_DATA_SIZE) { - // discard the messages we read and continue, this is achieved by skipping the bytes - // and returning null - buffer.skipBytes(sizeHeaderLength); - return null; - } - if (dataLen <= 0) { - throw new StreamCorruptedException("invalid data length: " + dataLen); - } - // safety against too large frames being sent - if (dataLen > NINETY_PER_HEAP_SIZE) { - throw new TooLongFrameException("transport content length received [" + new ByteSizeValue(dataLen) + "] exceeded [" - + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]"); - } - - if (buffer.readableBytes() < dataLen + sizeHeaderLength) { - return null; - } - buffer.skipBytes(sizeHeaderLength); - return buffer; } - private boolean bufferStartsWith(ChannelBuffer buffer, int readerIndex, String method) { - char[] chars = method.toCharArray(); - for (int i = 0; i < chars.length; i++) { - if (buffer.getByte(readerIndex + i) != chars[i]) { - return false; - } - } - - return true; - } - - /** - * A helper exception to mark an incoming connection as potentially being HTTP - * so an appropriate error code can be returned - */ - public static class HttpOnTransportException extends ElasticsearchException { - - public HttpOnTransportException(String msg) { - super(msg); - } - - @Override - public RestStatus status() { - return RestStatus.BAD_REQUEST; - } - - public HttpOnTransportException(StreamInput in) throws IOException{ - super(in); - } - } } diff --git a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index d10fc890848..c0b5f2e9cf8 100644 --- a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -80,6 +80,7 @@ import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.transport.ActionNotFoundTransportException; import org.elasticsearch.transport.ActionTransportException; import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.TcpTransport; import java.io.IOException; import java.net.URISyntaxException; @@ -763,7 +764,7 @@ public class ExceptionSerializationTests extends ESTestCase { ids.put(122, null); ids.put(123, org.elasticsearch.indices.IndexAlreadyExistsException.class); ids.put(124, org.elasticsearch.script.Script.ScriptParseException.class); - ids.put(125, org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class); + ids.put(125, TcpTransport.HttpOnTransportException.class); ids.put(126, org.elasticsearch.index.mapper.MapperParsingException.class); ids.put(127, org.elasticsearch.search.SearchContextException.class); ids.put(128, org.elasticsearch.search.builder.SearchSourceBuilderException.class); diff --git a/core/src/test/java/org/elasticsearch/common/ChannelsTests.java b/core/src/test/java/org/elasticsearch/common/ChannelsTests.java index 5bb9c614b84..4f2bad36d4a 100644 --- a/core/src/test/java/org/elasticsearch/common/ChannelsTests.java +++ b/core/src/test/java/org/elasticsearch/common/ChannelsTests.java @@ -162,20 +162,6 @@ public class ChannelsTests extends ESTestCase { assertTrue("read bytes didn't match written bytes", sourceRef.equals(copyRef)); } - - public void testWriteFromChannel() throws IOException { - int length = randomIntBetween(1, randomBytes.length / 2); - int offset = randomIntBetween(0, randomBytes.length - length); - ByteBuffer byteBuffer = ByteBuffer.wrap(randomBytes); - ChannelBuffer source = new ByteBufferBackedChannelBuffer(byteBuffer); - Channels.writeToChannel(source, offset, length, fileChannel); - - BytesReference copyRef = new BytesArray(Channels.readFromFileChannel(fileChannel, 0, length)); - BytesReference sourceRef = new BytesArray(randomBytes, offset, length); - - assertTrue("read bytes didn't match written bytes", sourceRef.equals(copyRef)); - } - class MockFileChannel extends FileChannel { FileChannel delegate; diff --git a/core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java similarity index 99% rename from core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java rename to core/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java index f9451375590..36335ee78d0 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.transport.netty; +package org.elasticsearch.common.util.concurrent; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.util.concurrent.KeyedLock; diff --git a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 4fa99b3b80c..9ae029a4aa4 100644 --- a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -324,16 +324,13 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testVoidMessageCompressed() { serviceA.registerRequestHandler("sayHello", TransportRequest.Empty::new, ThreadPool.Names.GENERIC, - new TransportRequestHandler() { - @Override - public void messageReceived(TransportRequest.Empty request, TransportChannel channel) { - try { - TransportResponseOptions responseOptions = TransportResponseOptions.builder().withCompress(true).build(); - channel.sendResponse(TransportResponse.Empty.INSTANCE, responseOptions); - } catch (IOException e) { - logger.error("Unexpected failure", e); - fail(e.getMessage()); - } + (request, channel) -> { + try { + TransportResponseOptions responseOptions = TransportResponseOptions.builder().withCompress(true).build(); + channel.sendResponse(TransportResponse.Empty.INSTANCE, responseOptions); + } catch (IOException e) { + logger.error("Unexpected failure", e); + fail(e.getMessage()); } }); diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyPublishPortTests.java b/core/src/test/java/org/elasticsearch/transport/PublishPortTests.java similarity index 94% rename from core/src/test/java/org/elasticsearch/transport/netty/NettyPublishPortTests.java rename to core/src/test/java/org/elasticsearch/transport/PublishPortTests.java index 6f602dafc99..ffe7a2d7ce2 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyPublishPortTests.java +++ b/core/src/test/java/org/elasticsearch/transport/PublishPortTests.java @@ -17,13 +17,11 @@ * under the License. */ -package org.elasticsearch.transport.netty; +package org.elasticsearch.transport; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.transport.BindTransportException; -import org.elasticsearch.transport.TransportSettings; import java.net.InetSocketAddress; import java.net.UnknownHostException; @@ -32,11 +30,11 @@ import java.util.List; import static java.net.InetAddress.getByName; import static java.util.Arrays.asList; -import static org.elasticsearch.transport.netty.NettyTransport.resolvePublishPort; +import static org.elasticsearch.transport.TcpTransport.resolvePublishPort; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public class NettyPublishPortTests extends ESTestCase { +public class PublishPortTests extends ESTestCase { public void testPublishPort() throws Exception { int boundPort = randomIntBetween(9000, 9100); diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java b/core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java similarity index 78% rename from core/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java rename to core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java index a5bd6612cdf..da1dcf43e5d 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java +++ b/core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java @@ -17,17 +17,17 @@ * under the License. */ -package org.elasticsearch.transport.netty; +package org.elasticsearch.transport; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESTestCase; -/** Unit tests for NettyTransport */ -public class NettyTransportTests extends ESTestCase { - +/** Unit tests for TCPTransport */ +public class TCPTransportTests extends ESTestCase { + /** Test ipv4 host with a default port works */ public void testParseV4DefaultPort() throws Exception { - TransportAddress[] addresses = NettyTransport.parse("127.0.0.1", "1234", Integer.MAX_VALUE); + TransportAddress[] addresses = TcpTransport.parse("127.0.0.1", "1234", Integer.MAX_VALUE); assertEquals(1, addresses.length); assertEquals("127.0.0.1", addresses[0].getAddress()); @@ -36,19 +36,19 @@ public class NettyTransportTests extends ESTestCase { /** Test ipv4 host with a default port range works */ public void testParseV4DefaultRange() throws Exception { - TransportAddress[] addresses = NettyTransport.parse("127.0.0.1", "1234-1235", Integer.MAX_VALUE); + TransportAddress[] addresses = TcpTransport.parse("127.0.0.1", "1234-1235", Integer.MAX_VALUE); assertEquals(2, addresses.length); assertEquals("127.0.0.1", addresses[0].getAddress()); assertEquals(1234, addresses[0].getPort()); - + assertEquals("127.0.0.1", addresses[1].getAddress()); assertEquals(1235, addresses[1].getPort()); } /** Test ipv4 host with port works */ public void testParseV4WithPort() throws Exception { - TransportAddress[] addresses = NettyTransport.parse("127.0.0.1:2345", "1234", Integer.MAX_VALUE); + TransportAddress[] addresses = TcpTransport.parse("127.0.0.1:2345", "1234", Integer.MAX_VALUE); assertEquals(1, addresses.length); assertEquals("127.0.0.1", addresses[0].getAddress()); @@ -57,7 +57,7 @@ public class NettyTransportTests extends ESTestCase { /** Test ipv4 host with port range works */ public void testParseV4WithPortRange() throws Exception { - TransportAddress[] addresses = NettyTransport.parse("127.0.0.1:2345-2346", "1234", Integer.MAX_VALUE); + TransportAddress[] addresses = TcpTransport.parse("127.0.0.1:2345-2346", "1234", Integer.MAX_VALUE); assertEquals(2, addresses.length); assertEquals("127.0.0.1", addresses[0].getAddress()); @@ -70,7 +70,7 @@ public class NettyTransportTests extends ESTestCase { /** Test unbracketed ipv6 hosts in configuration fail. Leave no ambiguity */ public void testParseV6UnBracketed() throws Exception { try { - NettyTransport.parse("::1", "1234", Integer.MAX_VALUE); + TcpTransport.parse("::1", "1234", Integer.MAX_VALUE); fail("should have gotten exception"); } catch (IllegalArgumentException expected) { assertTrue(expected.getMessage().contains("must be bracketed")); @@ -79,7 +79,7 @@ public class NettyTransportTests extends ESTestCase { /** Test ipv6 host with a default port works */ public void testParseV6DefaultPort() throws Exception { - TransportAddress[] addresses = NettyTransport.parse("[::1]", "1234", Integer.MAX_VALUE); + TransportAddress[] addresses = TcpTransport.parse("[::1]", "1234", Integer.MAX_VALUE); assertEquals(1, addresses.length); assertEquals("::1", addresses[0].getAddress()); @@ -88,19 +88,19 @@ public class NettyTransportTests extends ESTestCase { /** Test ipv6 host with a default port range works */ public void testParseV6DefaultRange() throws Exception { - TransportAddress[] addresses = NettyTransport.parse("[::1]", "1234-1235", Integer.MAX_VALUE); + TransportAddress[] addresses = TcpTransport.parse("[::1]", "1234-1235", Integer.MAX_VALUE); assertEquals(2, addresses.length); assertEquals("::1", addresses[0].getAddress()); assertEquals(1234, addresses[0].getPort()); - + assertEquals("::1", addresses[1].getAddress()); assertEquals(1235, addresses[1].getPort()); } /** Test ipv6 host with port works */ public void testParseV6WithPort() throws Exception { - TransportAddress[] addresses = NettyTransport.parse("[::1]:2345", "1234", Integer.MAX_VALUE); + TransportAddress[] addresses = TcpTransport.parse("[::1]:2345", "1234", Integer.MAX_VALUE); assertEquals(1, addresses.length); assertEquals("::1", addresses[0].getAddress()); @@ -109,7 +109,7 @@ public class NettyTransportTests extends ESTestCase { /** Test ipv6 host with port range works */ public void testParseV6WithPortRange() throws Exception { - TransportAddress[] addresses = NettyTransport.parse("[::1]:2345-2346", "1234", Integer.MAX_VALUE); + TransportAddress[] addresses = TcpTransport.parse("[::1]:2345-2346", "1234", Integer.MAX_VALUE); assertEquals(2, addresses.length); assertEquals("::1", addresses[0].getAddress()); @@ -118,10 +118,10 @@ public class NettyTransportTests extends ESTestCase { assertEquals("::1", addresses[1].getAddress()); assertEquals(2346, addresses[1].getPort()); } - + /** Test per-address limit */ public void testAddressLimit() throws Exception { - TransportAddress[] addresses = NettyTransport.parse("[::1]:100-200", "1000", 3); + TransportAddress[] addresses = TcpTransport.parse("[::1]:100-200", "1000", 3); assertEquals(3, addresses.length); assertEquals(100, addresses[0].getPort()); assertEquals(101, addresses[1].getPort()); diff --git a/core/src/test/java/org/elasticsearch/common/netty/ChannelBufferBytesReferenceTests.java b/core/src/test/java/org/elasticsearch/transport/netty/ChannelBufferBytesReferenceTests.java similarity index 96% rename from core/src/test/java/org/elasticsearch/common/netty/ChannelBufferBytesReferenceTests.java rename to core/src/test/java/org/elasticsearch/transport/netty/ChannelBufferBytesReferenceTests.java index 76a8626fee5..a284f6ea911 100644 --- a/core/src/test/java/org/elasticsearch/common/netty/ChannelBufferBytesReferenceTests.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/ChannelBufferBytesReferenceTests.java @@ -16,12 +16,13 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.common.netty; +package org.elasticsearch.transport.netty; import org.elasticsearch.common.bytes.AbstractBytesReferenceTestCase; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.transport.netty.NettyUtils; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffers; diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java index df7dcb0714b..c69f56c2cbd 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport.netty; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; @@ -33,6 +32,7 @@ import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; @@ -56,7 +56,7 @@ public class NettyScheduledPingTests extends ESTestCase { ThreadPool threadPool = new TestThreadPool(getClass().getName()); Settings settings = Settings.builder() - .put(NettyTransport.PING_SCHEDULE.getKey(), "5ms") + .put(TcpTransport.PING_SCHEDULE.getKey(), "5ms") .put(TransportSettings.PORT.getKey(), 0) .put("cluster.name", "test") .build(); @@ -89,12 +89,12 @@ public class NettyScheduledPingTests extends ESTestCase { assertBusy(new Runnable() { @Override public void run() { - assertThat(nettyA.scheduledPing.successfulPings.count(), greaterThan(100L)); - assertThat(nettyB.scheduledPing.successfulPings.count(), greaterThan(100L)); + assertThat(nettyA.getPing().getSuccessfulPings(), greaterThan(100L)); + assertThat(nettyB.getPing().getSuccessfulPings(), greaterThan(100L)); } }); - assertThat(nettyA.scheduledPing.failedPings.count(), equalTo(0L)); - assertThat(nettyB.scheduledPing.failedPings.count(), equalTo(0L)); + assertThat(nettyA.getPing().getFailedPings(), equalTo(0L)); + assertThat(nettyB.getPing().getFailedPings(), equalTo(0L)); serviceA.registerRequestHandler("sayHello", TransportRequest.Empty::new, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @@ -137,15 +137,12 @@ public class NettyScheduledPingTests extends ESTestCase { }).txGet(); } - assertBusy(new Runnable() { - @Override - public void run() { - assertThat(nettyA.scheduledPing.successfulPings.count(), greaterThan(200L)); - assertThat(nettyB.scheduledPing.successfulPings.count(), greaterThan(200L)); - } + assertBusy(() -> { + assertThat(nettyA.getPing().getSuccessfulPings(), greaterThan(200L)); + assertThat(nettyB.getPing().getSuccessfulPings(), greaterThan(200L)); }); - assertThat(nettyA.scheduledPing.failedPings.count(), equalTo(0L)); - assertThat(nettyB.scheduledPing.failedPings.count(), equalTo(0L)); + assertThat(nettyA.getPing().getFailedPings(), equalTo(0L)); + assertThat(nettyB.getPing().getFailedPings(), equalTo(0L)); Releasables.close(serviceA, serviceB); terminate(threadPool); diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java index 2f89435c6df..310f804ef7c 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java @@ -44,6 +44,7 @@ import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.ChannelPipelineFactory; import java.io.IOException; +import java.net.InetSocketAddress; import java.util.Collection; import java.util.Collections; @@ -98,45 +99,24 @@ public class NettyTransportIT extends ESIntegTestCase { super(settings, threadPool, networkService, bigArrays, namedWriteableRegistry, circuitBreakerService); } + protected String handleRequest(Channel channel, String profileName, + StreamInput stream, long requestId, int messageLengthBytes, Version version, + InetSocketAddress remoteAddress) throws IOException { + String action = super.handleRequest(channel, profileName, stream, requestId, messageLengthBytes, version, + remoteAddress); + channelProfileName = TransportSettings.DEFAULT_PROFILE; + return action; + } + @Override - public ChannelPipelineFactory configureServerChannelPipelineFactory(String name, Settings groupSettings) { - return new ErrorPipelineFactory(this, name, groupSettings); - } - - private static class ErrorPipelineFactory extends ServerChannelPipelineFactory { - - private final ESLogger logger; - - public ErrorPipelineFactory(ExceptionThrowingNettyTransport nettyTransport, String name, Settings groupSettings) { - super(nettyTransport, name, groupSettings); - this.logger = nettyTransport.logger; - } - - @Override - public ChannelPipeline getPipeline() throws Exception { - ChannelPipeline pipeline = super.getPipeline(); - pipeline.replace("dispatcher", "dispatcher", - new MessageChannelHandler(nettyTransport, logger, TransportSettings.DEFAULT_PROFILE) { - - @Override - protected String handleRequest(Channel channel, Marker marker, StreamInput buffer, long requestId, - int messageLengthBytes, Version version) throws IOException { - String action = super.handleRequest(channel, marker, buffer, requestId, messageLengthBytes, version); - channelProfileName = this.profileName; - return action; - } - - @Override - protected void validateRequest(Marker marker, StreamInput buffer, long requestId, String action) throws IOException { - super.validateRequest(marker, buffer, requestId, action); - String error = threadPool.getThreadContext().getHeader("ERROR"); - if (error != null) { - throw new ElasticsearchException(error); - } - } - }); - return pipeline; + protected void validateRequest(StreamInput buffer, long requestId, String action) + throws IOException { + super.validateRequest(buffer, requestId, action); + String error = threadPool.getThreadContext().getHeader("ERROR"); + if (error != null) { + throw new ElasticsearchException(error); } } + } } diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java index 6fdc214d18d..352c90d2317 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.transport.netty; -import org.elasticsearch.Version; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -30,6 +29,7 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; import org.junit.Before; @@ -58,7 +58,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { .build(); ThreadPool threadPool = new TestThreadPool("tst"); - try (NettyTransport transport = startNettyTransport(settings, threadPool)) { + try (TcpTransport transport = startTransport(settings, threadPool)) { assertEquals(1, transport.profileBoundAddresses().size()); assertEquals(1, transport.boundAddress().boundAddresses().length); } finally { @@ -74,7 +74,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { .build(); ThreadPool threadPool = new TestThreadPool("tst"); - try (NettyTransport transport = startNettyTransport(settings, threadPool)) { + try (TcpTransport transport = startTransport(settings, threadPool)) { assertEquals(1, transport.profileBoundAddresses().size()); assertEquals(1, transport.boundAddress().boundAddresses().length); } finally { @@ -91,7 +91,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { .build(); ThreadPool threadPool = new TestThreadPool("tst"); - try (NettyTransport transport = startNettyTransport(settings, threadPool)) { + try (TcpTransport transport = startTransport(settings, threadPool)) { assertEquals(0, transport.profileBoundAddresses().size()); assertEquals(1, transport.boundAddress().boundAddresses().length); } finally { @@ -107,7 +107,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { .build(); ThreadPool threadPool = new TestThreadPool("tst"); - try (NettyTransport transport = startNettyTransport(settings, threadPool)) { + try (TcpTransport transport = startTransport(settings, threadPool)) { assertEquals(0, transport.profileBoundAddresses().size()); assertEquals(1, transport.boundAddress().boundAddresses().length); } finally { @@ -125,7 +125,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { .build(); ThreadPool threadPool = new TestThreadPool("tst"); - try (NettyTransport transport = startNettyTransport(settings, threadPool)) { + try (TcpTransport transport = startTransport(settings, threadPool)) { assertEquals(0, transport.profileBoundAddresses().size()); assertEquals(1, transport.boundAddress().boundAddresses().length); } finally { @@ -133,14 +133,13 @@ public class NettyTransportMultiPortTests extends ESTestCase { } } - private NettyTransport startNettyTransport(Settings settings, ThreadPool threadPool) { + private TcpTransport startTransport(Settings settings, ThreadPool threadPool) { BigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService()); - - NettyTransport nettyTransport = new NettyTransport(settings, threadPool, new NetworkService(settings), bigArrays, + TcpTransport transport = new NettyTransport(settings, threadPool, new NetworkService(settings), bigArrays, new NamedWriteableRegistry(), new NoneCircuitBreakerService()); - nettyTransport.start(); + transport.start(); - assertThat(nettyTransport.lifecycleState(), is(Lifecycle.State.STARTED)); - return nettyTransport; + assertThat(transport.lifecycleState(), is(Lifecycle.State.STARTED)); + return transport; } } diff --git a/core/src/test/java/org/elasticsearch/common/netty/NettyUtilsTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyUtilsTests.java similarity index 98% rename from core/src/test/java/org/elasticsearch/common/netty/NettyUtilsTests.java rename to core/src/test/java/org/elasticsearch/transport/netty/NettyUtilsTests.java index 2d981dc9eae..fa8f30249bb 100644 --- a/core/src/test/java/org/elasticsearch/common/netty/NettyUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyUtilsTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.common.netty; +package org.elasticsearch.transport.netty; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -27,7 +27,6 @@ import org.elasticsearch.test.ESTestCase; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.buffer.CompositeChannelBuffer; -import org.junit.Before; import java.io.IOException; diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 20af5cf50b2..b6b75b1ec64 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -89,6 +89,7 @@ import org.elasticsearch.search.SearchService; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.transport.AssertingLocalTransport; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; @@ -421,9 +422,9 @@ public final class InternalTestCluster extends TestCluster { // randomize netty settings if (random.nextBoolean()) { builder.put(NettyTransport.WORKER_COUNT.getKey(), random.nextInt(3) + 1); - builder.put(NettyTransport.CONNECTIONS_PER_NODE_RECOVERY.getKey(), random.nextInt(2) + 1); - builder.put(NettyTransport.CONNECTIONS_PER_NODE_BULK.getKey(), random.nextInt(3) + 1); - builder.put(NettyTransport.CONNECTIONS_PER_NODE_REG.getKey(), random.nextInt(6) + 1); + builder.put(TcpTransport.CONNECTIONS_PER_NODE_RECOVERY.getKey(), random.nextInt(2) + 1); + builder.put(TcpTransport.CONNECTIONS_PER_NODE_BULK.getKey(), random.nextInt(3) + 1); + builder.put(TcpTransport.CONNECTIONS_PER_NODE_REG.getKey(), random.nextInt(6) + 1); } if (random.nextBoolean()) { @@ -455,7 +456,7 @@ public final class InternalTestCluster extends TestCluster { } if (random.nextBoolean()) { - builder.put(NettyTransport.PING_SCHEDULE.getKey(), RandomInts.randomIntBetween(random, 100, 2000) + "ms"); + builder.put(TcpTransport.PING_SCHEDULE.getKey(), RandomInts.randomIntBetween(random, 100, 2000) + "ms"); } if (random.nextBoolean()) { From 8c6c00ff1526b6ca21935fb491174e99c1558efc Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Jun 2016 13:57:43 +0200 Subject: [PATCH 11/36] Update documentation for cat/plugins API Cat API for plugins doesn't display anymore url or jvm/site flag --- docs/reference/cat/plugins.asciidoc | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/docs/reference/cat/plugins.asciidoc b/docs/reference/cat/plugins.asciidoc index 81df5cfb127..0af1faa5c9c 100644 --- a/docs/reference/cat/plugins.asciidoc +++ b/docs/reference/cat/plugins.asciidoc @@ -6,14 +6,9 @@ The `plugins` command provides a view per node of running plugins. This informat [source,sh] ------------------------------------------------------------------------------ % curl 'localhost:9200/_cat/plugins?v' -name component version type isolation url -Abraxas discovery-azure 2.1.0-SNAPSHOT j x -Abraxas lang-javascript 2.0.0-SNAPSHOT j x -Abraxas marvel NA j/s x /_plugin/marvel/ -Abraxas lang-python 2.0.0-SNAPSHOT j x -Abraxas inquisitor NA s /_plugin/inquisitor/ -Abraxas kopf 0.5.2 s /_plugin/kopf/ -Abraxas segmentspy NA s /_plugin/segmentspy/ +name component version description +Abraxas discovery-gce 5.0.0 The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism. +Abraxas lang-javascript 5.0.0 The JavaScript language plugin allows to have javascript as the language of scripts to execute. ------------------------------------------------------------------------------- We can tell quickly how many plugins per node we have and which versions. From 2dee980a1a864bd3346099e6e00289709e7e45d5 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Jun 2016 14:05:29 +0200 Subject: [PATCH 12/36] LICENSE.txt is not needed in plugin root dir We have licenses in licenses dir and the global license for the whole project is in the root dir so this file is not needed here. --- plugins/discovery-azure/LICENSE.txt | 202 ---------------------------- 1 file changed, 202 deletions(-) delete mode 100644 plugins/discovery-azure/LICENSE.txt diff --git a/plugins/discovery-azure/LICENSE.txt b/plugins/discovery-azure/LICENSE.txt deleted file mode 100644 index d6456956733..00000000000 --- a/plugins/discovery-azure/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. From 74d5fb3197a968e1dde17e6f529ad85602bc3ca2 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Jun 2016 14:06:31 +0200 Subject: [PATCH 13/36] LICENSE.txt is not needed in plugin root dir We have licenses in licenses dir and the global license for the whole project is in the root dir so this file is not needed here. --- plugins/store-smb/LICENSE.txt | 202 ---------------------------------- 1 file changed, 202 deletions(-) delete mode 100644 plugins/store-smb/LICENSE.txt diff --git a/plugins/store-smb/LICENSE.txt b/plugins/store-smb/LICENSE.txt deleted file mode 100644 index d6456956733..00000000000 --- a/plugins/store-smb/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. From cd6535ea9b7803e3210b20cef4d7cb0357f2bbbe Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Jun 2016 14:07:01 +0200 Subject: [PATCH 14/36] LICENSE.txt is not needed in plugin root dir We have licenses in licenses dir and the global license for the whole project is in the root dir so this file is not needed here. --- plugins/repository-azure/LICENSE.txt | 202 --------------------------- 1 file changed, 202 deletions(-) delete mode 100644 plugins/repository-azure/LICENSE.txt diff --git a/plugins/repository-azure/LICENSE.txt b/plugins/repository-azure/LICENSE.txt deleted file mode 100644 index d6456956733..00000000000 --- a/plugins/repository-azure/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. From 527a9c7f48d571d06e5e300e64681ad5d952f398 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Jun 2016 14:39:23 +0200 Subject: [PATCH 15/36] Deprecate discovery-azure and rename it to discovery-azure-classic As discussed at https://github.com/elastic/elasticsearch-cloud-azure/issues/91#issuecomment-229113595, we know that the current `discovery-azure` plugin only works with Azure Classic VMs / Services (which is somehow Legacy now). The proposal here is to rename `discovery-azure` to `discovery-azure-classic` in case some users are using it. And deprecate it for 5.0. Closes #19144. --- .../resources/checkstyle_suppressions.xml | 8 ++--- dev-tools/smoke_test_rc.py | 2 +- docs/plugins/discovery-azure.asciidoc | 32 +++++++++---------- docs/plugins/discovery.asciidoc | 6 ++-- docs/plugins/redirects.asciidoc | 2 +- .../migration/migrate_5_0/plugins.asciidoc | 2 +- .../modules/discovery/azure.asciidoc | 8 ++--- .../LICENSE.txt | 0 .../build.gradle | 2 +- .../licenses/azure-LICENSE.txt | 0 .../licenses/azure-NOTICE.txt | 0 .../licenses/azure-core-0.9.3.jar.sha1 | 0 .../azure-svc-mgmt-compute-0.9.3.jar.sha1 | 0 .../licenses/commons-codec-1.10.jar.sha1 | 0 .../licenses/commons-codec-LICENSE.txt | 0 .../licenses/commons-codec-NOTICE.txt | 0 .../licenses/commons-io-2.4.jar.sha1 | 0 .../licenses/commons-io-LICENSE.txt | 0 .../licenses/commons-io-NOTICE.txt | 0 .../licenses/commons-lang-2.6.jar.sha1 | 0 .../licenses/commons-lang-LICENSE.txt | 0 .../licenses/commons-lang-NOTICE.txt | 0 .../licenses/commons-logging-1.1.3.jar.sha1 | 0 .../licenses/commons-logging-LICENSE.txt | 0 .../licenses/commons-logging-NOTICE.txt | 0 .../licenses/httpclient-4.5.2.jar.sha1 | 0 .../licenses/httpclient-LICENSE.txt | 0 .../licenses/httpclient-NOTICE.txt | 0 .../licenses/httpcore-4.4.4.jar.sha1 | 0 .../licenses/httpcore-LICENSE.txt | 0 .../licenses/httpcore-NOTICE.txt | 0 .../licenses/jackson-LICENSE | 0 .../licenses/jackson-NOTICE | 0 .../licenses/jackson-core-asl-1.9.2.jar.sha1 | 0 .../licenses/jackson-jaxrs-1.9.2.jar.sha1 | 0 .../jackson-mapper-asl-1.9.2.jar.sha1 | 0 .../licenses/jackson-xc-1.9.2.jar.sha1 | 0 .../licenses/javax.inject-1.jar.sha1 | 0 .../licenses/javax.inject-LICENSE.txt | 0 .../licenses/javax.inject-NOTICE.txt | 0 .../licenses/jaxb-LICENSE.txt | 0 .../licenses/jaxb-NOTICE.txt | 0 .../licenses/jaxb-api-2.2.2.jar.sha1 | 0 .../licenses/jaxb-impl-2.2.3-1.jar.sha1 | 0 .../licenses/jersey-LICENSE.txt | 0 .../licenses/jersey-NOTICE.txt | 0 .../licenses/jersey-client-1.13.jar.sha1 | 0 .../licenses/jersey-core-1.13.jar.sha1 | 0 .../licenses/jersey-json-1.13.jar.sha1 | 0 .../licenses/jettison-1.1.jar.sha1 | 0 .../licenses/jettison-LICENSE.txt | 0 .../licenses/jettison-NOTICE.txt | 0 .../licenses/mail-1.4.5.jar.sha1 | 0 .../licenses/mail-LICENSE.txt | 0 .../licenses/mail-NOTICE.txt | 0 .../azure/classic}/AzureDiscoveryModule.java | 27 ++++++++-------- .../AzureServiceDisableException.java | 2 +- .../classic}/AzureServiceRemoteException.java | 2 +- .../management/AzureComputeService.java | 6 ++-- .../management/AzureComputeServiceImpl.java | 4 +-- .../classic}/AzureUnicastHostsProvider.java | 10 +++--- .../azure/classic}/AzureDiscoveryPlugin.java | 14 ++++---- .../AbstractAzureComputeServiceTestCase.java | 8 ++--- .../AzureComputeServiceSimpleMock.java | 4 +-- .../AzureComputeServiceTwoNodesMock.java | 4 +-- .../AzureComputeServiceAbstractMock.java | 2 +- .../AzureDiscoveryClusterFormationTests.java | 6 ++-- .../azure/classic}/AzureDiscoveryRestIT.java | 2 +- .../AzureMinimumMasterNodesTests.java | 6 ++-- .../azure/classic}/AzureSimpleTests.java | 10 +++--- .../classic}/AzureTwoStartedNodesTests.java | 10 +++--- .../discovery_azure_classic/10_basic.yaml | 13 ++++++++ .../test/discovery_azure/10_basic.yaml | 13 -------- .../scripts/module_and_plugin_test_cases.bash | 6 ++-- settings.gradle | 2 +- 75 files changed, 107 insertions(+), 106 deletions(-) rename plugins/{discovery-azure => discovery-azure-classic}/LICENSE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/build.gradle (99%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/azure-LICENSE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/azure-NOTICE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/azure-core-0.9.3.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/azure-svc-mgmt-compute-0.9.3.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/commons-codec-1.10.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/commons-codec-LICENSE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/commons-codec-NOTICE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/commons-io-2.4.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/commons-io-LICENSE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/commons-io-NOTICE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/commons-lang-2.6.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/commons-lang-LICENSE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/commons-lang-NOTICE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/commons-logging-1.1.3.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/commons-logging-LICENSE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/commons-logging-NOTICE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/httpclient-4.5.2.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/httpclient-LICENSE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/httpclient-NOTICE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/httpcore-4.4.4.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/httpcore-LICENSE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/httpcore-NOTICE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jackson-LICENSE (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jackson-NOTICE (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jackson-core-asl-1.9.2.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jackson-jaxrs-1.9.2.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jackson-mapper-asl-1.9.2.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jackson-xc-1.9.2.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/javax.inject-1.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/javax.inject-LICENSE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/javax.inject-NOTICE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jaxb-LICENSE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jaxb-NOTICE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jaxb-api-2.2.2.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jaxb-impl-2.2.3-1.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jersey-LICENSE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jersey-NOTICE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jersey-client-1.13.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jersey-core-1.13.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jersey-json-1.13.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jettison-1.1.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jettison-LICENSE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/jettison-NOTICE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/mail-1.4.5.jar.sha1 (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/mail-LICENSE.txt (100%) rename plugins/{discovery-azure => discovery-azure-classic}/licenses/mail-NOTICE.txt (100%) rename plugins/{discovery-azure/src/main/java/org/elasticsearch/cloud/azure => discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic}/AzureDiscoveryModule.java (76%) rename plugins/{discovery-azure/src/main/java/org/elasticsearch/cloud/azure => discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic}/AzureServiceDisableException.java (95%) rename plugins/{discovery-azure/src/main/java/org/elasticsearch/cloud/azure => discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic}/AzureServiceRemoteException.java (95%) rename plugins/{discovery-azure/src/main/java/org/elasticsearch/cloud/azure => discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic}/management/AzureComputeService.java (94%) rename plugins/{discovery-azure/src/main/java/org/elasticsearch/cloud/azure => discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic}/management/AzureComputeServiceImpl.java (97%) rename plugins/{discovery-azure/src/main/java/org/elasticsearch/discovery/azure => discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic}/AzureUnicastHostsProvider.java (96%) rename plugins/{discovery-azure/src/main/java/org/elasticsearch/plugin/discovery/azure => discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic}/AzureDiscoveryPlugin.java (82%) rename plugins/{discovery-azure/src/test/java/org/elasticsearch/cloud/azure => discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic}/AbstractAzureComputeServiceTestCase.java (89%) rename plugins/{discovery-azure/src/test/java/org/elasticsearch/cloud/azure => discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic}/AzureComputeServiceSimpleMock.java (95%) rename plugins/{discovery-azure/src/test/java/org/elasticsearch/cloud/azure => discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic}/AzureComputeServiceTwoNodesMock.java (96%) rename plugins/{discovery-azure/src/test/java/org/elasticsearch/cloud/azure => discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic}/management/AzureComputeServiceAbstractMock.java (96%) rename plugins/{discovery-azure/src/test/java/org/elasticsearch/discovery/azure => discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic}/AzureDiscoveryClusterFormationTests.java (98%) rename plugins/{discovery-azure/src/test/java/org/elasticsearch/discovery/azure => discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic}/AzureDiscoveryRestIT.java (96%) rename plugins/{discovery-azure/src/test/java/org/elasticsearch/discovery/azure => discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic}/AzureMinimumMasterNodesTests.java (94%) rename plugins/{discovery-azure/src/test/java/org/elasticsearch/discovery/azure => discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic}/AzureSimpleTests.java (89%) rename plugins/{discovery-azure/src/test/java/org/elasticsearch/discovery/azure => discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic}/AzureTwoStartedNodesTests.java (89%) create mode 100644 plugins/discovery-azure-classic/src/test/resources/rest-api-spec/test/discovery_azure_classic/10_basic.yaml delete mode 100644 plugins/discovery-azure/src/test/resources/rest-api-spec/test/discovery_azure/10_basic.yaml diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 5a08a834c3c..94f962be19d 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -1180,10 +1180,10 @@ - - - - + + + + diff --git a/dev-tools/smoke_test_rc.py b/dev-tools/smoke_test_rc.py index 259081ddc10..33abbf96345 100644 --- a/dev-tools/smoke_test_rc.py +++ b/dev-tools/smoke_test_rc.py @@ -63,7 +63,7 @@ DEFAULT_PLUGINS = ["analysis-icu", "analysis-phonetic", "analysis-smartcn", "analysis-stempel", - "discovery-azure", + "discovery-azure-classic", "discovery-ec2", "discovery-gce", "ingest-attachment", diff --git a/docs/plugins/discovery-azure.asciidoc b/docs/plugins/discovery-azure.asciidoc index f49f1568cab..022c453e148 100644 --- a/docs/plugins/discovery-azure.asciidoc +++ b/docs/plugins/discovery-azure.asciidoc @@ -1,9 +1,9 @@ -[[discovery-azure]] +[[discovery-azure-classic]] === Azure Discovery Plugin -The Azure Discovery plugin uses the Azure API for unicast discovery. +The Azure Classic Discovery plugin uses the Azure Classic API for unicast discovery. -[[discovery-azure-install]] +[[discovery-azure-classic-install]] [float] ==== Installation @@ -11,13 +11,13 @@ This plugin can be installed using the plugin manager: [source,sh] ---------------------------------------------------------------- -sudo bin/elasticsearch-plugin install discovery-azure +sudo bin/elasticsearch-plugin install discovery-azure-classic ---------------------------------------------------------------- The plugin must be installed on every node in the cluster, and each node must be restarted after installation. -[[discovery-azure-remove]] +[[discovery-azure-classic-remove]] [float] ==== Removal @@ -25,12 +25,12 @@ The plugin can be removed with the following command: [source,sh] ---------------------------------------------------------------- -sudo bin/elasticsearch-plugin remove discovery-azure +sudo bin/elasticsearch-plugin remove discovery-azure-classic ---------------------------------------------------------------- The node must be stopped before removing the plugin. -[[discovery-azure-usage]] +[[discovery-azure-classic-usage]] ==== Azure Virtual Machine Discovery Azure VM discovery allows to use the azure APIs to perform automatic discovery (similar to multicast in non hostile @@ -64,7 +64,7 @@ You can use {ref}/modules-network.html[core network host settings]. For example ============================================== -[[discovery-azure-short]] +[[discovery-azure-classic-short]] ===== How to start (short story) * Create Azure instances @@ -73,7 +73,7 @@ You can use {ref}/modules-network.html[core network host settings]. For example * Modify `elasticsearch.yml` file * Start Elasticsearch -[[discovery-azure-settings]] +[[discovery-azure-classic-settings]] ===== Azure credential API settings The following are a list of settings that can further control the credential API: @@ -100,7 +100,7 @@ The following are a list of settings that can further control the credential API your_azure_cloud_service_name -[[discovery-azure-settings-advanced]] +[[discovery-azure-classic-settings-advanced]] ===== Advanced settings The following are a list of settings that can further control the discovery: @@ -143,7 +143,7 @@ discovery: slot: production ---- -[[discovery-azure-long]] +[[discovery-azure-classic-long]] ==== Setup process for Azure Discovery We will expose here one strategy which is to hide our Elasticsearch cluster from outside. @@ -153,7 +153,7 @@ other. That means that with this mode, you can use elasticsearch unicast discovery to build a cluster, using the Azure API to retrieve information about your nodes. -[[discovery-azure-long-prerequisites]] +[[discovery-azure-classic-long-prerequisites]] ===== Prerequisites Before starting, you need to have: @@ -243,7 +243,7 @@ azure account download azure account import /tmp/azure.publishsettings ---- -[[discovery-azure-long-instance]] +[[discovery-azure-classic-long-instance]] ===== Creating your first instance You need to have a storage account available. Check http://www.windowsazure.com/en-us/develop/net/how-to-guides/blob-storage/#create-account[Azure Blob Storage documentation] @@ -396,7 +396,7 @@ This command should give you a JSON result: } ---- -[[discovery-azure-long-plugin]] +[[discovery-azure-classic-long-plugin]] ===== Install elasticsearch cloud azure plugin [source,sh] @@ -405,7 +405,7 @@ This command should give you a JSON result: sudo service elasticsearch stop # Install the plugin -sudo /usr/share/elasticsearch/bin/elasticsearch-plugin install discovery-azure +sudo /usr/share/elasticsearch/bin/elasticsearch-plugin install discovery-azure-classic # Configure it sudo vi /etc/elasticsearch/elasticsearch.yml @@ -441,7 +441,7 @@ sudo service elasticsearch start If anything goes wrong, check your logs in `/var/log/elasticsearch`. -[[discovery-azure-scale]] +[[discovery-azure-classic-scale]] ==== Scaling Out! You need first to create an image of your previous machine. diff --git a/docs/plugins/discovery.asciidoc b/docs/plugins/discovery.asciidoc index 62c5b4551ac..ed9cfc0ed3d 100644 --- a/docs/plugins/discovery.asciidoc +++ b/docs/plugins/discovery.asciidoc @@ -13,9 +13,9 @@ The core discovery plugins are: The EC2 discovery plugin uses the https://github.com/aws/aws-sdk-java[AWS API] for unicast discovery. -<>:: +<>:: -The Azure discovery plugin uses the Azure API for unicast discovery. +The Azure Classic discovery plugin uses the Azure Classic API for unicast discovery. <>:: @@ -33,7 +33,7 @@ A number of discovery plugins have been contributed by our community: include::discovery-ec2.asciidoc[] -include::discovery-azure.asciidoc[] +include::discovery-azure-classic.asciidoc[] include::discovery-gce.asciidoc[] diff --git a/docs/plugins/redirects.asciidoc b/docs/plugins/redirects.asciidoc index c8cf10c6319..0f9c0b40f2c 100644 --- a/docs/plugins/redirects.asciidoc +++ b/docs/plugins/redirects.asciidoc @@ -24,7 +24,7 @@ The `cloud-aws` plugin has been split into two separate plugins: The `cloud-azure` plugin has been split into two separate plugins: -* <> (`discovery-azure`) +* <> (`discovery-azure-classic`) * <> (`repository-azure`) diff --git a/docs/reference/migration/migrate_5_0/plugins.asciidoc b/docs/reference/migration/migrate_5_0/plugins.asciidoc index 2826c822d15..e1ff497a8f3 100644 --- a/docs/reference/migration/migrate_5_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_5_0/plugins.asciidoc @@ -63,7 +63,7 @@ Proxy settings for both plugins have been renamed: Cloud Azure plugin has been split in three plugins: -* {plugins}/discovery-azure.html[Discovery Azure plugin] +* {plugins}/discovery-azure-classic.html[Discovery Azure plugin] * {plugins}/repository-azure.html[Repository Azure plugin] * {plugins}/store-smb.html[Store SMB plugin] diff --git a/docs/reference/modules/discovery/azure.asciidoc b/docs/reference/modules/discovery/azure.asciidoc index 87d072564b3..1343819b02a 100644 --- a/docs/reference/modules/discovery/azure.asciidoc +++ b/docs/reference/modules/discovery/azure.asciidoc @@ -1,5 +1,5 @@ -[[modules-discovery-azure]] -=== Azure Discovery +[[modules-discovery-azure-classic]] +=== Azure Classic Discovery -Azure discovery allows to use the Azure APIs to perform automatic discovery (similar to multicast). -It is available as a plugin. See {plugins}/discovery-azure.html[discovery-azure] for more information. +Azure classic discovery allows to use the Azure Classic APIs to perform automatic discovery (similar to multicast). +It is available as a plugin. See {plugins}/discovery-azure-classic.html[discovery-azure-classic] for more information. diff --git a/plugins/discovery-azure/LICENSE.txt b/plugins/discovery-azure-classic/LICENSE.txt similarity index 100% rename from plugins/discovery-azure/LICENSE.txt rename to plugins/discovery-azure-classic/LICENSE.txt diff --git a/plugins/discovery-azure/build.gradle b/plugins/discovery-azure-classic/build.gradle similarity index 99% rename from plugins/discovery-azure/build.gradle rename to plugins/discovery-azure-classic/build.gradle index 8f0c641e150..42f6fd29106 100644 --- a/plugins/discovery-azure/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -21,7 +21,7 @@ import org.elasticsearch.gradle.LoggedExec esplugin { description 'The Azure Discovery plugin allows to use Azure API for the unicast discovery mechanism' - classname 'org.elasticsearch.plugin.discovery.azure.AzureDiscoveryPlugin' + classname 'AzureDiscoveryPlugin' } versions << [ diff --git a/plugins/discovery-azure/licenses/azure-LICENSE.txt b/plugins/discovery-azure-classic/licenses/azure-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/azure-LICENSE.txt rename to plugins/discovery-azure-classic/licenses/azure-LICENSE.txt diff --git a/plugins/discovery-azure/licenses/azure-NOTICE.txt b/plugins/discovery-azure-classic/licenses/azure-NOTICE.txt similarity index 100% rename from plugins/discovery-azure/licenses/azure-NOTICE.txt rename to plugins/discovery-azure-classic/licenses/azure-NOTICE.txt diff --git a/plugins/discovery-azure/licenses/azure-core-0.9.3.jar.sha1 b/plugins/discovery-azure-classic/licenses/azure-core-0.9.3.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/azure-core-0.9.3.jar.sha1 rename to plugins/discovery-azure-classic/licenses/azure-core-0.9.3.jar.sha1 diff --git a/plugins/discovery-azure/licenses/azure-svc-mgmt-compute-0.9.3.jar.sha1 b/plugins/discovery-azure-classic/licenses/azure-svc-mgmt-compute-0.9.3.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/azure-svc-mgmt-compute-0.9.3.jar.sha1 rename to plugins/discovery-azure-classic/licenses/azure-svc-mgmt-compute-0.9.3.jar.sha1 diff --git a/plugins/discovery-azure/licenses/commons-codec-1.10.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-codec-1.10.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/commons-codec-1.10.jar.sha1 rename to plugins/discovery-azure-classic/licenses/commons-codec-1.10.jar.sha1 diff --git a/plugins/discovery-azure/licenses/commons-codec-LICENSE.txt b/plugins/discovery-azure-classic/licenses/commons-codec-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/commons-codec-LICENSE.txt rename to plugins/discovery-azure-classic/licenses/commons-codec-LICENSE.txt diff --git a/plugins/discovery-azure/licenses/commons-codec-NOTICE.txt b/plugins/discovery-azure-classic/licenses/commons-codec-NOTICE.txt similarity index 100% rename from plugins/discovery-azure/licenses/commons-codec-NOTICE.txt rename to plugins/discovery-azure-classic/licenses/commons-codec-NOTICE.txt diff --git a/plugins/discovery-azure/licenses/commons-io-2.4.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-io-2.4.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/commons-io-2.4.jar.sha1 rename to plugins/discovery-azure-classic/licenses/commons-io-2.4.jar.sha1 diff --git a/plugins/discovery-azure/licenses/commons-io-LICENSE.txt b/plugins/discovery-azure-classic/licenses/commons-io-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/commons-io-LICENSE.txt rename to plugins/discovery-azure-classic/licenses/commons-io-LICENSE.txt diff --git a/plugins/discovery-azure/licenses/commons-io-NOTICE.txt b/plugins/discovery-azure-classic/licenses/commons-io-NOTICE.txt similarity index 100% rename from plugins/discovery-azure/licenses/commons-io-NOTICE.txt rename to plugins/discovery-azure-classic/licenses/commons-io-NOTICE.txt diff --git a/plugins/discovery-azure/licenses/commons-lang-2.6.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-lang-2.6.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/commons-lang-2.6.jar.sha1 rename to plugins/discovery-azure-classic/licenses/commons-lang-2.6.jar.sha1 diff --git a/plugins/discovery-azure/licenses/commons-lang-LICENSE.txt b/plugins/discovery-azure-classic/licenses/commons-lang-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/commons-lang-LICENSE.txt rename to plugins/discovery-azure-classic/licenses/commons-lang-LICENSE.txt diff --git a/plugins/discovery-azure/licenses/commons-lang-NOTICE.txt b/plugins/discovery-azure-classic/licenses/commons-lang-NOTICE.txt similarity index 100% rename from plugins/discovery-azure/licenses/commons-lang-NOTICE.txt rename to plugins/discovery-azure-classic/licenses/commons-lang-NOTICE.txt diff --git a/plugins/discovery-azure/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-logging-1.1.3.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/commons-logging-1.1.3.jar.sha1 rename to plugins/discovery-azure-classic/licenses/commons-logging-1.1.3.jar.sha1 diff --git a/plugins/discovery-azure/licenses/commons-logging-LICENSE.txt b/plugins/discovery-azure-classic/licenses/commons-logging-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/commons-logging-LICENSE.txt rename to plugins/discovery-azure-classic/licenses/commons-logging-LICENSE.txt diff --git a/plugins/discovery-azure/licenses/commons-logging-NOTICE.txt b/plugins/discovery-azure-classic/licenses/commons-logging-NOTICE.txt similarity index 100% rename from plugins/discovery-azure/licenses/commons-logging-NOTICE.txt rename to plugins/discovery-azure-classic/licenses/commons-logging-NOTICE.txt diff --git a/plugins/discovery-azure/licenses/httpclient-4.5.2.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpclient-4.5.2.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/httpclient-4.5.2.jar.sha1 rename to plugins/discovery-azure-classic/licenses/httpclient-4.5.2.jar.sha1 diff --git a/plugins/discovery-azure/licenses/httpclient-LICENSE.txt b/plugins/discovery-azure-classic/licenses/httpclient-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/httpclient-LICENSE.txt rename to plugins/discovery-azure-classic/licenses/httpclient-LICENSE.txt diff --git a/plugins/discovery-azure/licenses/httpclient-NOTICE.txt b/plugins/discovery-azure-classic/licenses/httpclient-NOTICE.txt similarity index 100% rename from plugins/discovery-azure/licenses/httpclient-NOTICE.txt rename to plugins/discovery-azure-classic/licenses/httpclient-NOTICE.txt diff --git a/plugins/discovery-azure/licenses/httpcore-4.4.4.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.4.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/httpcore-4.4.4.jar.sha1 rename to plugins/discovery-azure-classic/licenses/httpcore-4.4.4.jar.sha1 diff --git a/plugins/discovery-azure/licenses/httpcore-LICENSE.txt b/plugins/discovery-azure-classic/licenses/httpcore-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/httpcore-LICENSE.txt rename to plugins/discovery-azure-classic/licenses/httpcore-LICENSE.txt diff --git a/plugins/discovery-azure/licenses/httpcore-NOTICE.txt b/plugins/discovery-azure-classic/licenses/httpcore-NOTICE.txt similarity index 100% rename from plugins/discovery-azure/licenses/httpcore-NOTICE.txt rename to plugins/discovery-azure-classic/licenses/httpcore-NOTICE.txt diff --git a/plugins/discovery-azure/licenses/jackson-LICENSE b/plugins/discovery-azure-classic/licenses/jackson-LICENSE similarity index 100% rename from plugins/discovery-azure/licenses/jackson-LICENSE rename to plugins/discovery-azure-classic/licenses/jackson-LICENSE diff --git a/plugins/discovery-azure/licenses/jackson-NOTICE b/plugins/discovery-azure-classic/licenses/jackson-NOTICE similarity index 100% rename from plugins/discovery-azure/licenses/jackson-NOTICE rename to plugins/discovery-azure-classic/licenses/jackson-NOTICE diff --git a/plugins/discovery-azure/licenses/jackson-core-asl-1.9.2.jar.sha1 b/plugins/discovery-azure-classic/licenses/jackson-core-asl-1.9.2.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/jackson-core-asl-1.9.2.jar.sha1 rename to plugins/discovery-azure-classic/licenses/jackson-core-asl-1.9.2.jar.sha1 diff --git a/plugins/discovery-azure/licenses/jackson-jaxrs-1.9.2.jar.sha1 b/plugins/discovery-azure-classic/licenses/jackson-jaxrs-1.9.2.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/jackson-jaxrs-1.9.2.jar.sha1 rename to plugins/discovery-azure-classic/licenses/jackson-jaxrs-1.9.2.jar.sha1 diff --git a/plugins/discovery-azure/licenses/jackson-mapper-asl-1.9.2.jar.sha1 b/plugins/discovery-azure-classic/licenses/jackson-mapper-asl-1.9.2.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/jackson-mapper-asl-1.9.2.jar.sha1 rename to plugins/discovery-azure-classic/licenses/jackson-mapper-asl-1.9.2.jar.sha1 diff --git a/plugins/discovery-azure/licenses/jackson-xc-1.9.2.jar.sha1 b/plugins/discovery-azure-classic/licenses/jackson-xc-1.9.2.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/jackson-xc-1.9.2.jar.sha1 rename to plugins/discovery-azure-classic/licenses/jackson-xc-1.9.2.jar.sha1 diff --git a/plugins/discovery-azure/licenses/javax.inject-1.jar.sha1 b/plugins/discovery-azure-classic/licenses/javax.inject-1.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/javax.inject-1.jar.sha1 rename to plugins/discovery-azure-classic/licenses/javax.inject-1.jar.sha1 diff --git a/plugins/discovery-azure/licenses/javax.inject-LICENSE.txt b/plugins/discovery-azure-classic/licenses/javax.inject-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/javax.inject-LICENSE.txt rename to plugins/discovery-azure-classic/licenses/javax.inject-LICENSE.txt diff --git a/plugins/discovery-azure/licenses/javax.inject-NOTICE.txt b/plugins/discovery-azure-classic/licenses/javax.inject-NOTICE.txt similarity index 100% rename from plugins/discovery-azure/licenses/javax.inject-NOTICE.txt rename to plugins/discovery-azure-classic/licenses/javax.inject-NOTICE.txt diff --git a/plugins/discovery-azure/licenses/jaxb-LICENSE.txt b/plugins/discovery-azure-classic/licenses/jaxb-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/jaxb-LICENSE.txt rename to plugins/discovery-azure-classic/licenses/jaxb-LICENSE.txt diff --git a/plugins/discovery-azure/licenses/jaxb-NOTICE.txt b/plugins/discovery-azure-classic/licenses/jaxb-NOTICE.txt similarity index 100% rename from plugins/discovery-azure/licenses/jaxb-NOTICE.txt rename to plugins/discovery-azure-classic/licenses/jaxb-NOTICE.txt diff --git a/plugins/discovery-azure/licenses/jaxb-api-2.2.2.jar.sha1 b/plugins/discovery-azure-classic/licenses/jaxb-api-2.2.2.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/jaxb-api-2.2.2.jar.sha1 rename to plugins/discovery-azure-classic/licenses/jaxb-api-2.2.2.jar.sha1 diff --git a/plugins/discovery-azure/licenses/jaxb-impl-2.2.3-1.jar.sha1 b/plugins/discovery-azure-classic/licenses/jaxb-impl-2.2.3-1.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/jaxb-impl-2.2.3-1.jar.sha1 rename to plugins/discovery-azure-classic/licenses/jaxb-impl-2.2.3-1.jar.sha1 diff --git a/plugins/discovery-azure/licenses/jersey-LICENSE.txt b/plugins/discovery-azure-classic/licenses/jersey-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/jersey-LICENSE.txt rename to plugins/discovery-azure-classic/licenses/jersey-LICENSE.txt diff --git a/plugins/discovery-azure/licenses/jersey-NOTICE.txt b/plugins/discovery-azure-classic/licenses/jersey-NOTICE.txt similarity index 100% rename from plugins/discovery-azure/licenses/jersey-NOTICE.txt rename to plugins/discovery-azure-classic/licenses/jersey-NOTICE.txt diff --git a/plugins/discovery-azure/licenses/jersey-client-1.13.jar.sha1 b/plugins/discovery-azure-classic/licenses/jersey-client-1.13.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/jersey-client-1.13.jar.sha1 rename to plugins/discovery-azure-classic/licenses/jersey-client-1.13.jar.sha1 diff --git a/plugins/discovery-azure/licenses/jersey-core-1.13.jar.sha1 b/plugins/discovery-azure-classic/licenses/jersey-core-1.13.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/jersey-core-1.13.jar.sha1 rename to plugins/discovery-azure-classic/licenses/jersey-core-1.13.jar.sha1 diff --git a/plugins/discovery-azure/licenses/jersey-json-1.13.jar.sha1 b/plugins/discovery-azure-classic/licenses/jersey-json-1.13.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/jersey-json-1.13.jar.sha1 rename to plugins/discovery-azure-classic/licenses/jersey-json-1.13.jar.sha1 diff --git a/plugins/discovery-azure/licenses/jettison-1.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/jettison-1.1.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/jettison-1.1.jar.sha1 rename to plugins/discovery-azure-classic/licenses/jettison-1.1.jar.sha1 diff --git a/plugins/discovery-azure/licenses/jettison-LICENSE.txt b/plugins/discovery-azure-classic/licenses/jettison-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/jettison-LICENSE.txt rename to plugins/discovery-azure-classic/licenses/jettison-LICENSE.txt diff --git a/plugins/discovery-azure/licenses/jettison-NOTICE.txt b/plugins/discovery-azure-classic/licenses/jettison-NOTICE.txt similarity index 100% rename from plugins/discovery-azure/licenses/jettison-NOTICE.txt rename to plugins/discovery-azure-classic/licenses/jettison-NOTICE.txt diff --git a/plugins/discovery-azure/licenses/mail-1.4.5.jar.sha1 b/plugins/discovery-azure-classic/licenses/mail-1.4.5.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/mail-1.4.5.jar.sha1 rename to plugins/discovery-azure-classic/licenses/mail-1.4.5.jar.sha1 diff --git a/plugins/discovery-azure/licenses/mail-LICENSE.txt b/plugins/discovery-azure-classic/licenses/mail-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/mail-LICENSE.txt rename to plugins/discovery-azure-classic/licenses/mail-LICENSE.txt diff --git a/plugins/discovery-azure/licenses/mail-NOTICE.txt b/plugins/discovery-azure-classic/licenses/mail-NOTICE.txt similarity index 100% rename from plugins/discovery-azure/licenses/mail-NOTICE.txt rename to plugins/discovery-azure-classic/licenses/mail-NOTICE.txt diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureDiscoveryModule.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java similarity index 76% rename from plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureDiscoveryModule.java rename to plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java index 2c9c6e0a486..da684fd824d 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureDiscoveryModule.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java @@ -17,12 +17,11 @@ * under the License. */ -package org.elasticsearch.cloud.azure; +package org.elasticsearch.cloud.azure.classic; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cloud.azure.management.AzureComputeService; -import org.elasticsearch.cloud.azure.management.AzureComputeService.Management; -import org.elasticsearch.cloud.azure.management.AzureComputeServiceImpl; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeServiceImpl; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Inject; @@ -31,7 +30,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.plugin.discovery.azure.AzureDiscoveryPlugin; +import org.elasticsearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin; /** * Azure Module @@ -41,7 +40,7 @@ import org.elasticsearch.plugin.discovery.azure.AzureDiscoveryPlugin; * to AzureComputeServiceImpl. * * - * @see org.elasticsearch.cloud.azure.management.AzureComputeServiceImpl + * @see AzureComputeServiceImpl */ public class AzureDiscoveryModule extends AbstractModule { protected final ESLogger logger; @@ -77,19 +76,19 @@ public class AzureDiscoveryModule extends AbstractModule { return false; } - if (isDefined(settings, Management.SUBSCRIPTION_ID_SETTING) && - isDefined(settings, Management.SERVICE_NAME_SETTING) && - isDefined(settings, Management.KEYSTORE_PATH_SETTING) && - isDefined(settings, Management.KEYSTORE_PASSWORD_SETTING)) { + if (isDefined(settings, AzureComputeService.Management.SUBSCRIPTION_ID_SETTING) && + isDefined(settings, AzureComputeService.Management.SERVICE_NAME_SETTING) && + isDefined(settings, AzureComputeService.Management.KEYSTORE_PATH_SETTING) && + isDefined(settings, AzureComputeService.Management.KEYSTORE_PASSWORD_SETTING)) { logger.trace("All required properties for Azure discovery are set!"); return true; } else { logger.debug("One or more Azure discovery settings are missing. " + "Check elasticsearch.yml file. Should have [{}], [{}], [{}] and [{}].", - Management.SUBSCRIPTION_ID_SETTING.getKey(), - Management.SERVICE_NAME_SETTING.getKey(), - Management.KEYSTORE_PATH_SETTING.getKey(), - Management.KEYSTORE_PASSWORD_SETTING.getKey()); + AzureComputeService.Management.SUBSCRIPTION_ID_SETTING.getKey(), + AzureComputeService.Management.SERVICE_NAME_SETTING.getKey(), + AzureComputeService.Management.KEYSTORE_PATH_SETTING.getKey(), + AzureComputeService.Management.KEYSTORE_PASSWORD_SETTING.getKey()); return false; } } diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceDisableException.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureServiceDisableException.java similarity index 95% rename from plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceDisableException.java rename to plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureServiceDisableException.java index 487997d71b6..66488f90c31 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceDisableException.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureServiceDisableException.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.cloud.azure; +package org.elasticsearch.cloud.azure.classic; public class AzureServiceDisableException extends IllegalStateException { public AzureServiceDisableException(String msg) { diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceRemoteException.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureServiceRemoteException.java similarity index 95% rename from plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceRemoteException.java rename to plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureServiceRemoteException.java index 4bd4f1d67f1..c961c03ba71 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceRemoteException.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureServiceRemoteException.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.cloud.azure; +package org.elasticsearch.cloud.azure.classic; public class AzureServiceRemoteException extends IllegalStateException { public AzureServiceRemoteException(String msg) { diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeService.java similarity index 94% rename from plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java rename to plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeService.java index 526f98025b7..49e609aad80 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeService.java @@ -17,15 +17,15 @@ * under the License. */ -package org.elasticsearch.cloud.azure.management; +package org.elasticsearch.cloud.azure.classic.management; import com.microsoft.windowsazure.core.utils.KeyStoreType; import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.azure.AzureUnicastHostsProvider; -import org.elasticsearch.discovery.azure.AzureUnicastHostsProvider.Deployment; +import org.elasticsearch.discovery.azure.classic.AzureUnicastHostsProvider; +import org.elasticsearch.discovery.azure.classic.AzureUnicastHostsProvider.Deployment; import java.net.URI; import java.net.URISyntaxException; diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java similarity index 97% rename from plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java rename to plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java index 076ce52ff80..7d6f81ad0b6 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.cloud.azure.management; +package org.elasticsearch.cloud.azure.classic.management; import com.microsoft.windowsazure.Configuration; import com.microsoft.windowsazure.core.Builder; @@ -28,7 +28,7 @@ import com.microsoft.windowsazure.management.compute.ComputeManagementService; import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import com.microsoft.windowsazure.management.configuration.ManagementConfiguration; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cloud.azure.AzureServiceRemoteException; +import org.elasticsearch.cloud.azure.classic.AzureServiceRemoteException; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java similarity index 96% rename from plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java rename to plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java index cb6c8238bf5..ed327a3a727 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.azure; +package org.elasticsearch.discovery.azure.classic; import com.microsoft.windowsazure.management.compute.models.DeploymentSlot; import com.microsoft.windowsazure.management.compute.models.DeploymentStatus; @@ -25,10 +25,10 @@ import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDeta import com.microsoft.windowsazure.management.compute.models.InstanceEndpoint; import com.microsoft.windowsazure.management.compute.models.RoleInstance; import org.elasticsearch.Version; -import org.elasticsearch.cloud.azure.AzureServiceDisableException; -import org.elasticsearch.cloud.azure.AzureServiceRemoteException; -import org.elasticsearch.cloud.azure.management.AzureComputeService; -import org.elasticsearch.cloud.azure.management.AzureComputeService.Discovery; +import org.elasticsearch.cloud.azure.classic.AzureServiceDisableException; +import org.elasticsearch.cloud.azure.classic.AzureServiceRemoteException; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeService.Discovery; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/plugin/discovery/azure/AzureDiscoveryPlugin.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java similarity index 82% rename from plugins/discovery-azure/src/main/java/org/elasticsearch/plugin/discovery/azure/AzureDiscoveryPlugin.java rename to plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java index 9c1df5b4c8d..a7e1816fff0 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/plugin/discovery/azure/AzureDiscoveryPlugin.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java @@ -17,18 +17,18 @@ * under the License. */ -package org.elasticsearch.plugin.discovery.azure; +package org.elasticsearch.plugin.discovery.azure.classic; -import org.elasticsearch.cloud.azure.AzureDiscoveryModule; -import org.elasticsearch.cloud.azure.management.AzureComputeService; +import org.elasticsearch.cloud.azure.classic.AzureDiscoveryModule; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.discovery.azure.AzureUnicastHostsProvider; +import org.elasticsearch.discovery.azure.classic.AzureUnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.plugins.Plugin; @@ -45,7 +45,9 @@ public class AzureDiscoveryPlugin extends Plugin { public AzureDiscoveryPlugin(Settings settings) { this.settings = settings; - logger.trace("starting azure discovery plugin..."); + DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + deprecationLogger.deprecated("azure classic discovery plugin is deprecated. Use azure arm discovery plugin instead"); + logger.trace("starting azure classic discovery plugin..."); } @Override diff --git a/plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureComputeServiceTestCase.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java similarity index 89% rename from plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureComputeServiceTestCase.java rename to plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java index 0c57ec3f16e..e9d16408537 100644 --- a/plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureComputeServiceTestCase.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java @@ -17,14 +17,14 @@ * under the License. */ -package org.elasticsearch.cloud.azure; +package org.elasticsearch.cloud.azure.classic; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.cloud.azure.management.AzureComputeService.Discovery; -import org.elasticsearch.cloud.azure.management.AzureComputeService.Management; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeService.Discovery; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeService.Management; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.Node; -import org.elasticsearch.plugin.discovery.azure.AzureDiscoveryPlugin; +import org.elasticsearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; diff --git a/plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/AzureComputeServiceSimpleMock.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AzureComputeServiceSimpleMock.java similarity index 95% rename from plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/AzureComputeServiceSimpleMock.java rename to plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AzureComputeServiceSimpleMock.java index 26843eba532..66e853b5953 100644 --- a/plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/AzureComputeServiceSimpleMock.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AzureComputeServiceSimpleMock.java @@ -17,14 +17,14 @@ * under the License. */ -package org.elasticsearch.cloud.azure; +package org.elasticsearch.cloud.azure.classic; import com.microsoft.windowsazure.management.compute.models.DeploymentSlot; import com.microsoft.windowsazure.management.compute.models.DeploymentStatus; import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import com.microsoft.windowsazure.management.compute.models.InstanceEndpoint; import com.microsoft.windowsazure.management.compute.models.RoleInstance; -import org.elasticsearch.cloud.azure.management.AzureComputeServiceAbstractMock; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeServiceAbstractMock; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; diff --git a/plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/AzureComputeServiceTwoNodesMock.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AzureComputeServiceTwoNodesMock.java similarity index 96% rename from plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/AzureComputeServiceTwoNodesMock.java rename to plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AzureComputeServiceTwoNodesMock.java index bf7589c8836..d75ce22d55c 100644 --- a/plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/AzureComputeServiceTwoNodesMock.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AzureComputeServiceTwoNodesMock.java @@ -17,14 +17,14 @@ * under the License. */ -package org.elasticsearch.cloud.azure; +package org.elasticsearch.cloud.azure.classic; import com.microsoft.windowsazure.management.compute.models.DeploymentSlot; import com.microsoft.windowsazure.management.compute.models.DeploymentStatus; import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import com.microsoft.windowsazure.management.compute.models.InstanceEndpoint; import com.microsoft.windowsazure.management.compute.models.RoleInstance; -import org.elasticsearch.cloud.azure.management.AzureComputeServiceAbstractMock; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeServiceAbstractMock; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; diff --git a/plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceAbstractMock.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceAbstractMock.java similarity index 96% rename from plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceAbstractMock.java rename to plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceAbstractMock.java index c11060a84a9..33f40a9159a 100644 --- a/plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceAbstractMock.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceAbstractMock.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.cloud.azure.management; +package org.elasticsearch.cloud.azure.classic.management; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.component.AbstractLifecycleComponent; diff --git a/plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureDiscoveryClusterFormationTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java similarity index 98% rename from plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureDiscoveryClusterFormationTests.java rename to plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java index 0d1de07ed64..505f2d8b0db 100644 --- a/plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureDiscoveryClusterFormationTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java @@ -17,14 +17,14 @@ * under the License. */ -package org.elasticsearch.discovery.azure; +package org.elasticsearch.discovery.azure.classic; import com.microsoft.windowsazure.management.compute.models.DeploymentSlot; import com.microsoft.windowsazure.management.compute.models.DeploymentStatus; import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpsConfigurator; import com.sun.net.httpserver.HttpsServer; -import org.elasticsearch.cloud.azure.management.AzureComputeService; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.Loggers; @@ -33,7 +33,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; import org.elasticsearch.node.Node; -import org.elasticsearch.plugin.discovery.azure.AzureDiscoveryPlugin; +import org.elasticsearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.TransportSettings; diff --git a/plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureDiscoveryRestIT.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryRestIT.java similarity index 96% rename from plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureDiscoveryRestIT.java rename to plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryRestIT.java index 131f73d1ca9..cb04842cb4a 100644 --- a/plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureDiscoveryRestIT.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryRestIT.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.azure; +package org.elasticsearch.discovery.azure.classic; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; diff --git a/plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureMinimumMasterNodesTests.java similarity index 94% rename from plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTests.java rename to plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureMinimumMasterNodesTests.java index 46c3f8af7c2..3acec43f7da 100644 --- a/plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureMinimumMasterNodesTests.java @@ -17,11 +17,11 @@ * under the License. */ -package org.elasticsearch.discovery.azure; +package org.elasticsearch.discovery.azure.classic; import org.apache.lucene.util.LuceneTestCase.AwaitsFix; -import org.elasticsearch.cloud.azure.AbstractAzureComputeServiceTestCase; -import org.elasticsearch.cloud.azure.AzureComputeServiceTwoNodesMock; +import org.elasticsearch.cloud.azure.classic.AbstractAzureComputeServiceTestCase; +import org.elasticsearch.cloud.azure.classic.AzureComputeServiceTwoNodesMock; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.discovery.zen.ZenDiscovery; diff --git a/plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java similarity index 89% rename from plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTests.java rename to plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java index 7a85909a13e..05bed1cefb1 100644 --- a/plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java @@ -17,12 +17,12 @@ * under the License. */ -package org.elasticsearch.discovery.azure; +package org.elasticsearch.discovery.azure.classic; -import org.elasticsearch.cloud.azure.AbstractAzureComputeServiceTestCase; -import org.elasticsearch.cloud.azure.AzureComputeServiceSimpleMock; -import org.elasticsearch.cloud.azure.management.AzureComputeService.Discovery; -import org.elasticsearch.cloud.azure.management.AzureComputeService.Management; +import org.elasticsearch.cloud.azure.classic.AbstractAzureComputeServiceTestCase; +import org.elasticsearch.cloud.azure.classic.AzureComputeServiceSimpleMock; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeService.Discovery; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeService.Management; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; diff --git a/plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java similarity index 89% rename from plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTests.java rename to plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java index 6431696c2eb..89c918769dc 100644 --- a/plugins/discovery-azure/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java @@ -17,12 +17,12 @@ * under the License. */ -package org.elasticsearch.discovery.azure; +package org.elasticsearch.discovery.azure.classic; -import org.elasticsearch.cloud.azure.AbstractAzureComputeServiceTestCase; -import org.elasticsearch.cloud.azure.AzureComputeServiceTwoNodesMock; -import org.elasticsearch.cloud.azure.management.AzureComputeService.Discovery; -import org.elasticsearch.cloud.azure.management.AzureComputeService.Management; +import org.elasticsearch.cloud.azure.classic.AbstractAzureComputeServiceTestCase; +import org.elasticsearch.cloud.azure.classic.AzureComputeServiceTwoNodesMock; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeService.Discovery; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeService.Management; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; diff --git a/plugins/discovery-azure-classic/src/test/resources/rest-api-spec/test/discovery_azure_classic/10_basic.yaml b/plugins/discovery-azure-classic/src/test/resources/rest-api-spec/test/discovery_azure_classic/10_basic.yaml new file mode 100644 index 00000000000..ea042d8a52d --- /dev/null +++ b/plugins/discovery-azure-classic/src/test/resources/rest-api-spec/test/discovery_azure_classic/10_basic.yaml @@ -0,0 +1,13 @@ +# Integration tests for Azure Classic Discovery component +# +"Discovery Azure Classic loaded": + - do: + cluster.state: {} + + # Get master node id + - set: { master_node: master } + + - do: + nodes.info: {} + + - match: { nodes.$master.plugins.0.name: discovery-azure-classic } diff --git a/plugins/discovery-azure/src/test/resources/rest-api-spec/test/discovery_azure/10_basic.yaml b/plugins/discovery-azure/src/test/resources/rest-api-spec/test/discovery_azure/10_basic.yaml deleted file mode 100644 index 7a5acd1f001..00000000000 --- a/plugins/discovery-azure/src/test/resources/rest-api-spec/test/discovery_azure/10_basic.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# Integration tests for Azure Discovery component -# -"Discovery Azure loaded": - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - - do: - nodes.info: {} - - - match: { nodes.$master.plugins.0.name: discovery-azure } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash index 336fa3ee305..c17df96937a 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash @@ -209,7 +209,7 @@ fi install_and_check_plugin discovery gce google-api-client-*.jar } -@test "[$GROUP] install discovery-azure plugin" { +@test "[$GROUP] install discovery-azure-classic plugin" { install_and_check_plugin discovery azure azure-core-*.jar } @@ -341,8 +341,8 @@ fi remove_plugin discovery-gce } -@test "[$GROUP] remove discovery-azure plugin" { - remove_plugin discovery-azure +@test "[$GROUP] remove discovery-azure-classic plugin" { + remove_plugin discovery-azure-classic } @test "[$GROUP] remove discovery-ec2 plugin" { diff --git a/settings.gradle b/settings.gradle index c0d7a72b2a5..6588b605a9d 100644 --- a/settings.gradle +++ b/settings.gradle @@ -31,7 +31,7 @@ List projects = [ 'plugins:analysis-phonetic', 'plugins:analysis-smartcn', 'plugins:analysis-stempel', - 'plugins:discovery-azure', + 'plugins:discovery-azure-classic', 'plugins:discovery-ec2', 'plugins:discovery-gce', 'plugins:ingest-geoip', From 4c2d6cf538da466ca469d8e68f25b7e5311616db Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 30 Jun 2016 14:43:28 +0200 Subject: [PATCH 16/36] percolator: removed unused code --- .../org/elasticsearch/percolator/PercolatorPlugin.java | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java index a15acf67c02..4359568b3f6 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java @@ -21,8 +21,6 @@ package org.elasticsearch.percolator; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.Mapper; @@ -39,8 +37,6 @@ import java.util.Map; public class PercolatorPlugin extends Plugin implements MapperPlugin, ActionPlugin { - public static final String NAME = "percolator"; - private final Settings settings; public PercolatorPlugin(Settings settings) { @@ -65,7 +61,7 @@ public class PercolatorPlugin extends Plugin implements MapperPlugin, ActionPlug @Override public List> getSettings() { - return Arrays.asList(PercolatorFieldMapper.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING); + return Collections.singletonList(PercolatorFieldMapper.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING); } @Override @@ -73,7 +69,4 @@ public class PercolatorPlugin extends Plugin implements MapperPlugin, ActionPlug return Collections.singletonMap(PercolatorFieldMapper.CONTENT_TYPE, new PercolatorFieldMapper.TypeParser()); } - static boolean transportClientMode(Settings settings) { - return TransportClient.CLIENT_TYPE.equals(settings.get(Client.CLIENT_TYPE_SETTING_S.getKey())); - } } From a029c147a389daf445ae92bd46ddc009a136bfc4 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Jun 2016 15:16:51 +0200 Subject: [PATCH 17/36] Update plugin description --- plugins/discovery-azure-classic/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 42f6fd29106..3f68c3fcba5 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -20,7 +20,7 @@ import org.elasticsearch.gradle.LoggedExec */ esplugin { - description 'The Azure Discovery plugin allows to use Azure API for the unicast discovery mechanism' + description 'The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism' classname 'AzureDiscoveryPlugin' } From b5bb27cf902745b43a79b7f46fa67c12e1a09d96 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 30 Jun 2016 15:20:59 +0200 Subject: [PATCH 18/36] Bumped version to 5.0.0-alpha4 --- docs/reference/index.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 3a625ac1a2c..636475dfb47 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -1,7 +1,7 @@ [[elasticsearch-reference]] = Elasticsearch Reference -:version: 5.0.0-alpha3 +:version: 5.0.0-alpha4 :major-version: 5.x :branch: master :jdk: 1.8.0_73 From 7c7abc349c52ac1f5011abe47995f2b9c16f8a19 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Jun 2016 15:21:30 +0200 Subject: [PATCH 19/36] Fix checkstyle issues --- .../azure/classic/AzureMinimumMasterNodesTests.java | 12 ++++++++---- .../discovery/azure/classic/AzureSimpleTests.java | 6 ++++-- .../azure/classic/AzureTwoStartedNodesTests.java | 12 ++++++++---- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureMinimumMasterNodesTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureMinimumMasterNodesTests.java index 3acec43f7da..72e1f2da791 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureMinimumMasterNodesTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureMinimumMasterNodesTests.java @@ -63,20 +63,23 @@ public class AzureMinimumMasterNodesTests extends AbstractAzureComputeServiceTes logger.info("--> start data node / non master node"); internalCluster().startNode(); try { - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").get().getState().nodes().getMasterNodeId(), + nullValue()); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { // all is well, no master elected } logger.info("--> start another node"); internalCluster().startNode(); - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().getMasterNodeId(), notNullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").get().getState().nodes().getMasterNodeId(), + notNullValue()); logger.info("--> stop master node"); internalCluster().stopCurrentMasterNode(); try { - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").get().getState().nodes().getMasterNodeId(), + nullValue()); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { // all is well, no master elected @@ -84,6 +87,7 @@ public class AzureMinimumMasterNodesTests extends AbstractAzureComputeServiceTes logger.info("--> start another node"); internalCluster().startNode(); - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().getMasterNodeId(), notNullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").get().getState().nodes().getMasterNodeId(), + notNullValue()); } } diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java index 05bed1cefb1..f507b1ea501 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java @@ -45,7 +45,8 @@ public class AzureSimpleTests extends AbstractAzureComputeServiceTestCase { logger.info("--> start one node"); internalCluster().startNode(settings); - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().getMasterNodeId(), notNullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").get().getState().nodes().getMasterNodeId(), + notNullValue()); // We expect having 1 node as part of the cluster, let's test that checkNumberOfNodes(1); @@ -58,7 +59,8 @@ public class AzureSimpleTests extends AbstractAzureComputeServiceTestCase { logger.info("--> start one node"); internalCluster().startNode(settings); - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().getMasterNodeId(), notNullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").get().getState().nodes().getMasterNodeId(), + notNullValue()); // We expect having 1 node as part of the cluster, let's test that checkNumberOfNodes(1); diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java index 89c918769dc..35844c9b383 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java @@ -46,11 +46,13 @@ public class AzureTwoStartedNodesTests extends AbstractAzureComputeServiceTestCa logger.info("--> start first node"); internalCluster().startNode(settings); - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().getMasterNodeId(), notNullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").get().getState().nodes().getMasterNodeId(), + notNullValue()); logger.info("--> start another node"); internalCluster().startNode(settings); - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().getMasterNodeId(), notNullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").get().getState().nodes().getMasterNodeId(), + notNullValue()); // We expect having 2 nodes as part of the cluster, let's test that checkNumberOfNodes(2); @@ -64,11 +66,13 @@ public class AzureTwoStartedNodesTests extends AbstractAzureComputeServiceTestCa logger.info("--> start first node"); internalCluster().startNode(settings); - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().getMasterNodeId(), notNullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").get().getState().nodes().getMasterNodeId(), + notNullValue()); logger.info("--> start another node"); internalCluster().startNode(settings); - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().getMasterNodeId(), notNullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").get().getState().nodes().getMasterNodeId(), + notNullValue()); // We expect having 2 nodes as part of the cluster, let's test that checkNumberOfNodes(2); From f3ddccad17c0b6281cab273009444151912a33fc Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Jun 2016 15:26:54 +0200 Subject: [PATCH 20/36] Fix documentation filenames --- ...iscovery-azure.asciidoc => discovery-azure-classic.asciidoc} | 2 +- docs/plugins/discovery.asciidoc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename docs/plugins/{discovery-azure.asciidoc => discovery-azure-classic.asciidoc} (99%) diff --git a/docs/plugins/discovery-azure.asciidoc b/docs/plugins/discovery-azure-classic.asciidoc similarity index 99% rename from docs/plugins/discovery-azure.asciidoc rename to docs/plugins/discovery-azure-classic.asciidoc index 022c453e148..54dc2533807 100644 --- a/docs/plugins/discovery-azure.asciidoc +++ b/docs/plugins/discovery-azure-classic.asciidoc @@ -1,5 +1,5 @@ [[discovery-azure-classic]] -=== Azure Discovery Plugin +=== Azure Classic Discovery Plugin The Azure Classic Discovery plugin uses the Azure Classic API for unicast discovery. diff --git a/docs/plugins/discovery.asciidoc b/docs/plugins/discovery.asciidoc index ed9cfc0ed3d..999bf9c0e1f 100644 --- a/docs/plugins/discovery.asciidoc +++ b/docs/plugins/discovery.asciidoc @@ -13,7 +13,7 @@ The core discovery plugins are: The EC2 discovery plugin uses the https://github.com/aws/aws-sdk-java[AWS API] for unicast discovery. -<>:: +<>:: The Azure Classic discovery plugin uses the Azure Classic API for unicast discovery. From 72c220b1dff256b7e3b66695541a575ac955ed73 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Jun 2016 15:29:29 +0200 Subject: [PATCH 21/36] Add deprecation notice --- docs/plugins/discovery-azure-classic.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/plugins/discovery-azure-classic.asciidoc b/docs/plugins/discovery-azure-classic.asciidoc index 54dc2533807..d794adbea7e 100644 --- a/docs/plugins/discovery-azure-classic.asciidoc +++ b/docs/plugins/discovery-azure-classic.asciidoc @@ -3,6 +3,10 @@ The Azure Classic Discovery plugin uses the Azure Classic API for unicast discovery. +// TODO: Link to ARM plugin when ready +// See issue https://github.com/elastic/elasticsearch/issues/19146 +deprecated[5.0.0, Use coming Azure ARM Discovery plugin instead] + [[discovery-azure-classic-install]] [float] ==== Installation From d78afc26eadc89d9ac04dd2a1e12bb32e615776b Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Jun 2016 15:30:49 +0200 Subject: [PATCH 22/36] Fix classname Package was removed by mistake --- plugins/discovery-azure-classic/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 3f68c3fcba5..88874968b21 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -21,7 +21,7 @@ import org.elasticsearch.gradle.LoggedExec esplugin { description 'The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism' - classname 'AzureDiscoveryPlugin' + classname 'org.elasticsearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin' } versions << [ From 648b7b82b48be2325b0336b7e556ae4628375d4d Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Jun 2016 15:32:52 +0200 Subject: [PATCH 23/36] Fix method name typo --- .../elasticsearch/discovery/azure/classic/AzureSimpleTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java index f507b1ea501..d2234632122 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java @@ -38,7 +38,7 @@ public class AzureSimpleTests extends AbstractAzureComputeServiceTestCase { super(AzureComputeServiceSimpleMock.TestPlugin.class); } - public void testOneNodeDhouldRunUsingPrivateIp() { + public void testOneNodeShouldRunUsingPrivateIp() { Settings.Builder settings = Settings.builder() .put(Management.SERVICE_NAME_SETTING.getKey(), "dummy") .put(Discovery.HOST_TYPE_SETTING.getKey(), "private_ip"); From e359be7632e5d65c2e08934bb2e5062db47d513f Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 29 Jun 2016 18:41:31 -0400 Subject: [PATCH 24/36] Don't inject TransportPercolateAction into RestPercolateAction Instead use the client. This will help us build the actions more easily in the future. --- .../resources/checkstyle_suppressions.xml | 1 - .../percolator/RestMultiPercolateAction.java | 8 +++----- .../percolator/RestPercolateAction.java | 20 +++++++++---------- 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 5a08a834c3c..71f2cf85ab4 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -1168,7 +1168,6 @@ - diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/RestMultiPercolateAction.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/RestMultiPercolateAction.java index a2902a9a7c2..41de2de42d1 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/RestMultiPercolateAction.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/RestMultiPercolateAction.java @@ -36,13 +36,10 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestMultiPercolateAction extends BaseRestHandler { private final boolean allowExplicitIndex; - private final TransportMultiPercolateAction action; @Inject - public RestMultiPercolateAction(Settings settings, RestController controller, Client client, - TransportMultiPercolateAction action) { + public RestMultiPercolateAction(Settings settings, RestController controller, Client client) { super(settings, client); - this.action = action; controller.registerHandler(POST, "/_mpercolate", this); controller.registerHandler(POST, "/{index}/_mpercolate", this); controller.registerHandler(POST, "/{index}/{type}/_mpercolate", this); @@ -61,7 +58,8 @@ public class RestMultiPercolateAction extends BaseRestHandler { multiPercolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param("index"))); multiPercolateRequest.documentType(restRequest.param("type")); multiPercolateRequest.add(RestActions.getRestContent(restRequest), allowExplicitIndex); - action.execute(multiPercolateRequest, new RestToXContentListener(restChannel)); + client.execute(MultiPercolateAction.INSTANCE, multiPercolateRequest, + new RestToXContentListener(restChannel)); } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java index b752cc55f6c..6dffd5518c8 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java @@ -36,13 +36,9 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestPercolateAction extends BaseRestHandler { - - private final TransportPercolateAction action; - @Inject - public RestPercolateAction(Settings settings, RestController controller, Client client, TransportPercolateAction action) { + public RestPercolateAction(Settings settings, RestController controller, Client client) { super(settings, client); - this.action = action; controller.registerHandler(GET, "/{index}/{type}/_percolate", this); controller.registerHandler(POST, "/{index}/{type}/_percolate", this); @@ -54,7 +50,8 @@ public class RestPercolateAction extends BaseRestHandler { controller.registerHandler(GET, "/{index}/{type}/_percolate/count", countHandler); controller.registerHandler(POST, "/{index}/{type}/_percolate/count", countHandler); - RestCountPercolateExistingDocHandler countExistingDocHandler = new RestCountPercolateExistingDocHandler(settings, controller, client); + RestCountPercolateExistingDocHandler countExistingDocHandler = new RestCountPercolateExistingDocHandler(settings, controller, + client); controller.registerHandler(GET, "/{index}/{type}/{id}/_percolate/count", countExistingDocHandler); controller.registerHandler(POST, "/{index}/{type}/{id}/_percolate/count", countExistingDocHandler); } @@ -67,10 +64,11 @@ public class RestPercolateAction extends BaseRestHandler { percolateRequest.source(RestActions.getRestContent(restRequest)); percolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, percolateRequest.indicesOptions())); - executePercolate(percolateRequest, restChannel); + executePercolate(client, percolateRequest, restChannel); } - void parseExistingDocPercolate(PercolateRequest percolateRequest, RestRequest restRequest, RestChannel restChannel, final Client client) { + void parseExistingDocPercolate(PercolateRequest percolateRequest, RestRequest restRequest, RestChannel restChannel, + final Client client) { String index = restRequest.param("index"); String type = restRequest.param("type"); percolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param("percolate_index", index))); @@ -91,11 +89,11 @@ public class RestPercolateAction extends BaseRestHandler { percolateRequest.source(RestActions.getRestContent(restRequest)); percolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, percolateRequest.indicesOptions())); - executePercolate(percolateRequest, restChannel); + executePercolate(client, percolateRequest, restChannel); } - void executePercolate(final PercolateRequest percolateRequest, final RestChannel restChannel) { - action.execute(percolateRequest, new RestToXContentListener<>(restChannel)); + void executePercolate(final Client client, final PercolateRequest percolateRequest, final RestChannel restChannel) { + client.execute(PercolateAction.INSTANCE, percolateRequest, new RestToXContentListener<>(restChannel)); } @Override From d57b780bb465dad3f692e44834df484c697d51ca Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 29 Jun 2016 18:46:12 -0400 Subject: [PATCH 25/36] Remote TransportRethrottleAction from RestRethrottleAction Just use the client to call it. --- .../elasticsearch/index/reindex/RestRethrottleAction.java | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestRethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestRethrottleAction.java index 9841794ca2a..cb53509a461 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestRethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestRethrottleAction.java @@ -36,14 +36,11 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.action.admin.cluster.node.tasks.RestListTasksAction.nodeSettingListener; public class RestRethrottleAction extends BaseRestHandler { - private final TransportRethrottleAction action; private final ClusterService clusterService; @Inject - public RestRethrottleAction(Settings settings, RestController controller, Client client, TransportRethrottleAction action, - ClusterService clusterService) { + public RestRethrottleAction(Settings settings, RestController controller, Client client, ClusterService clusterService) { super(settings, client); - this.action = action; this.clusterService = clusterService; controller.registerHandler(POST, "/_update_by_query/{taskId}/_rethrottle", this); controller.registerHandler(POST, "/_delete_by_query/{taskId}/_rethrottle", this); @@ -60,6 +57,6 @@ public class RestRethrottleAction extends BaseRestHandler { } internalRequest.setRequestsPerSecond(requestsPerSecond); ActionListener listener = nodeSettingListener(clusterService, new RestToXContentListener<>(channel)); - action.execute(internalRequest, listener); + client.execute(RethrottleAction.INSTANCE, internalRequest, listener); } } From dc53ce929d8c54e50376d04f421bf9012d5ff137 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 30 Jun 2016 12:41:31 +0200 Subject: [PATCH 26/36] Document Update/Delete-By-Query with version number zero Update-By-Query and Delete-By-Query use internal versioning to update/delete documents. But documents can have a version number equal to zero using the external versioning... making the UBQ/DBQ request fail because zero is not a valid version number and they only support internal versioning for now. Sequence numbers might help to solve this issue in the future. --- docs/reference/docs/delete-by-query.asciidoc | 4 +++ docs/reference/docs/index_.asciidoc | 8 +++++ docs/reference/docs/update-by-query.asciidoc | 4 +++ .../test/delete_by_query/40_versioning.yaml | 29 +++++++++++++++++++ .../test/update_by_query/40_versioning.yaml | 27 +++++++++++++++++ 5 files changed, 72 insertions(+) create mode 100644 modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/40_versioning.yaml diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index 8b37e0a1220..b7f31ce4789 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -54,6 +54,10 @@ conflict if the document changes between the time when the snapshot was taken and when the delete request is processed. When the versions match the document is deleted. +NOTE: Since `internal` versioning does not support the value 0 as a valid +version number, documents with version equal to zero cannot be deleted using +`_delete_by_query` and will fail the request. + During the `_delete_by_query` execution, multiple search requests are sequentially executed in order to find all the matching documents to delete. Every time a batch of documents is found, a corresponding bulk request is executed to delete all diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index e1c260ae48f..dda75dd5aa5 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -119,6 +119,14 @@ indexed and the new version number used. If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. +WARNING: External versioning supports the value 0 as a valid version number. +This allows the version to be in sync with an external versioning system +where version numbers start from zero instead of one. It has the side effect +that documents with version number equal to zero cannot neither be updated +using the <> nor be deleted +using the <> as long as their +version number is equal to zero. + A nice side effect is that there is no need to maintain strict ordering of async indexing operations executed as a result of changes to a source database, as long as version numbers from the source database are used. diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index 56ad1c7cd9a..06c20bcf07e 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -46,6 +46,10 @@ conflict if the document changes between the time when the snapshot was taken and when the index request is processed. When the versions match the document is updated and the version number is incremented. +NOTE: Since `internal` versioning does not support the value 0 as a valid +version number, documents with version equal to zero cannot be updated using +`_update_by_query` and will fail the request. + All update and query failures cause the `_update_by_query` to abort and are returned in the `failures` of the response. The updates that have been performed still stick. In other words, the process is not rolled back, only diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/40_versioning.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/40_versioning.yaml new file mode 100644 index 00000000000..c81305e2824 --- /dev/null +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/40_versioning.yaml @@ -0,0 +1,29 @@ +--- +"delete_by_query fails to delete documents with version number equal to zero": + - do: + index: + index: index1 + type: type1 + id: 1 + version: 0 # Starting version is zero + version_type: external + body: {"delete": 0} + - do: + indices.refresh: {} + + # Delete by query uses internal versioning and will fail here + # because zero is not allowed as a valid version number + - do: + catch: /illegal version value \[0\] for version type \[INTERNAL\]./ + delete_by_query: + index: index1 + refresh: true + body: + query: + match_all: {} + - do: + get: + index: index1 + type: type1 + id: 1 + - match: {_version: 0} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/40_versioning.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/40_versioning.yaml index ac1cbe4417e..1718714defd 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/40_versioning.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/40_versioning.yaml @@ -21,3 +21,30 @@ type: test id: 1 - match: {_version: 2} + +--- +"update_by_query fails to update documents with version number equal to zero": + - do: + index: + index: index1 + type: type1 + id: 1 + version: 0 # Starting version is zero + version_type: external + body: {"update": 0} + - do: + indices.refresh: {} + + # Update by query uses internal versioning and will fail here + # because zero is not allowed as a valid version number + - do: + catch: /illegal version value \[0\] for version type \[INTERNAL\]./ + update_by_query: + index: index1 + refresh: true + - do: + get: + index: index1 + type: type1 + id: 1 + - match: {_version: 0} From dbf1f61c5b9c568960e59a8f3087a793ae8f8978 Mon Sep 17 00:00:00 2001 From: jalvar08 Date: Thu, 30 Jun 2016 10:42:01 -0400 Subject: [PATCH 27/36] Fixing typo for path.conf location (#19098) Changing -Ees.path.conf to -Epath.conf --- docs/reference/setup/configuration.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/setup/configuration.asciidoc b/docs/reference/setup/configuration.asciidoc index 52bf5ffcbff..68f73fc96b8 100644 --- a/docs/reference/setup/configuration.asciidoc +++ b/docs/reference/setup/configuration.asciidoc @@ -26,7 +26,7 @@ setting, as follows: [source,sh] ------------------------------- -./bin/elasticsearch -Ees.path.conf=/path/to/my/config/ +./bin/elasticsearch -Epath.conf=/path/to/my/config/ ------------------------------- [float] From afb5e6332bd9add0b05dd73d3784cfda183c7bcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 30 Jun 2016 17:05:54 +0200 Subject: [PATCH 28/36] Make sure TimeIntervalRounding is monotonic for increasing dates (#19020) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently there are cases when using TimeIntervalRounding#round() and date1 < date2 that round(date2) < round(date1). These errors can happen when using a non-fixed time zone and the values to be rounded are slightly after a time zone offset change (e.g. DST transition). Here is an example for the "CET" time zone with a 45 minute rounding interval. The dates to be rounded are on the left (with utc time stamp), the rounded values on the right. The error case is marked: 2011-10-30T01:40:00.000+02:00 1319931600000 | 2011-10-30T01:30:00.000+02:00 1319931000000 2011-10-30T02:02:30.000+02:00 1319932950000 | 2011-10-30T01:30:00.000+02:00 1319931000000 2011-10-30T02:25:00.000+02:00 1319934300000 | 2011-10-30T02:15:00.000+02:00 1319933700000 2011-10-30T02:47:30.000+02:00 1319935650000 | 2011-10-30T02:15:00.000+02:00 1319933700000 2011-10-30T02:10:00.000+01:00 1319937000000 | 2011-10-30T01:30:00.000+02:00 1319931000000 * 2011-10-30T02:32:30.000+01:00 1319938350000 | 2011-10-30T02:15:00.000+01:00 1319937300000 2011-10-30T02:55:00.000+01:00 1319939700000 | 2011-10-30T02:15:00.000+01:00 1319937300000 2011-10-30T03:17:30.000+01:00 1319941050000 | 2011-10-30T03:00:00.000+01:00 1319940000000 We should correct this by detecting that we are crossing a transition when rounding, and in that case pick the largest valid rounded value before the transition. This change adds this correction logic to the rounding function and adds this invariant to the randomized TimeIntervalRounding tests. Also adding the example test case from above (with corrected behaviour) for illustrative purposes. --- .../common/rounding/TimeZoneRounding.java | 5 ++ .../rounding/TimeZoneRoundingTests.java | 79 +++++++++++++++---- 2 files changed, 67 insertions(+), 17 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java b/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java index 593b0484800..46362687e01 100644 --- a/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java +++ b/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java @@ -222,6 +222,11 @@ public abstract class TimeZoneRounding extends Rounding { long roundedUTC; if (isInDSTGap(rounded) == false) { roundedUTC = timeZone.convertLocalToUTC(rounded, true, utcMillis); + // check if we crossed DST transition, in this case we want the last rounded value before the transition + long transition = timeZone.previousTransition(utcMillis); + if (transition != utcMillis && transition > roundedUTC) { + roundedUTC = roundKey(transition - 1); + } } else { /* * Edge case where the rounded local time is illegal and landed diff --git a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java index e82d37a5cf5..f9e5f6e3fbb 100644 --- a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java +++ b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.rounding; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.rounding.TimeZoneRounding.TimeIntervalRounding; import org.elasticsearch.common.rounding.TimeZoneRounding.TimeUnitRounding; import org.elasticsearch.common.unit.TimeValue; @@ -31,10 +32,13 @@ import org.joda.time.DateTimeConstants; import org.joda.time.DateTimeZone; import org.joda.time.format.ISODateTimeFormat; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -328,29 +332,70 @@ public class TimeZoneRoundingTests extends ESTestCase { long interval = unit.toMillis(randomIntBetween(1, 365)); DateTimeZone tz = randomDateTimeZone(); TimeZoneRounding rounding = new TimeZoneRounding.TimeIntervalRounding(interval, tz); - long date = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00 - try { - final long roundedDate = rounding.round(date); - final long nextRoundingValue = rounding.nextRoundingValue(roundedDate); - assertThat("Rounding should be idempotent", roundedDate, equalTo(rounding.round(roundedDate))); - assertThat("Rounded value smaller or equal than unrounded", roundedDate, lessThanOrEqualTo(date)); - assertThat("Values smaller than rounded value should round further down", rounding.round(roundedDate - 1), - lessThan(roundedDate)); + long mainDate = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00 + if (randomBoolean()) { + mainDate = nastyDate(mainDate, tz, interval); + } + // check two intervals around date + long previousRoundedValue = Long.MIN_VALUE; + for (long date = mainDate - 2 * interval; date < mainDate + 2 * interval; date += interval / 2) { + try { + final long roundedDate = rounding.round(date); + final long nextRoundingValue = rounding.nextRoundingValue(roundedDate); + assertThat("Rounding should be idempotent", roundedDate, equalTo(rounding.round(roundedDate))); + assertThat("Rounded value smaller or equal than unrounded", roundedDate, lessThanOrEqualTo(date)); + assertThat("Values smaller than rounded value should round further down", rounding.round(roundedDate - 1), + lessThan(roundedDate)); + assertThat("Rounding should be >= previous rounding value", roundedDate, greaterThanOrEqualTo(previousRoundedValue)); - if (tz.isFixed()) { - assertThat("NextRounding value should be greater than date", nextRoundingValue, greaterThan(roundedDate)); - assertThat("NextRounding value should be interval from rounded value", nextRoundingValue - roundedDate, - equalTo(interval)); - assertThat("NextRounding value should be a rounded date", nextRoundingValue, - equalTo(rounding.round(nextRoundingValue))); + if (tz.isFixed()) { + assertThat("NextRounding value should be greater than date", nextRoundingValue, greaterThan(roundedDate)); + assertThat("NextRounding value should be interval from rounded value", nextRoundingValue - roundedDate, + equalTo(interval)); + assertThat("NextRounding value should be a rounded date", nextRoundingValue, + equalTo(rounding.round(nextRoundingValue))); + } + previousRoundedValue = roundedDate; + } catch (AssertionError e) { + logger.error("Rounding error at {}, timezone {}, interval: {},", new DateTime(date, tz), tz, interval); + throw e; } - } catch (AssertionError e) { - logger.error("Rounding error at {}, timezone {}, interval: {},", new DateTime(date, tz), tz, interval); - throw e; } } } + /** + * Test that rounded values are always greater or equal to last rounded value if date is increasing. + * The example covers an interval around 2011-10-30T02:10:00+01:00, time zone CET, interval: 2700000ms + */ + public void testIntervalRoundingMonotonic_CET() { + long interval = TimeUnit.MINUTES.toMillis(45); + DateTimeZone tz = DateTimeZone.forID("CET"); + TimeZoneRounding rounding = new TimeZoneRounding.TimeIntervalRounding(interval, tz); + List> expectedDates = new ArrayList>(); + // first date is the date to be rounded, second the expected result + expectedDates.add(new Tuple<>("2011-10-30T01:40:00.000+02:00", "2011-10-30T01:30:00.000+02:00")); + expectedDates.add(new Tuple<>("2011-10-30T02:02:30.000+02:00", "2011-10-30T01:30:00.000+02:00")); + expectedDates.add(new Tuple<>("2011-10-30T02:25:00.000+02:00", "2011-10-30T02:15:00.000+02:00")); + expectedDates.add(new Tuple<>("2011-10-30T02:47:30.000+02:00", "2011-10-30T02:15:00.000+02:00")); + expectedDates.add(new Tuple<>("2011-10-30T02:10:00.000+01:00", "2011-10-30T02:15:00.000+02:00")); + expectedDates.add(new Tuple<>("2011-10-30T02:32:30.000+01:00", "2011-10-30T02:15:00.000+01:00")); + expectedDates.add(new Tuple<>("2011-10-30T02:55:00.000+01:00", "2011-10-30T02:15:00.000+01:00")); + expectedDates.add(new Tuple<>("2011-10-30T03:17:30.000+01:00", "2011-10-30T03:00:00.000+01:00")); + + long previousDate = Long.MIN_VALUE; + for (Tuple dates : expectedDates) { + final long roundedDate = rounding.round(time(dates.v1())); + assertThat(roundedDate, isDate(time(dates.v2()), tz)); + assertThat(roundedDate, greaterThanOrEqualTo(previousDate)); + previousDate = roundedDate; + } + // here's what this means for interval widths + assertEquals(TimeUnit.MINUTES.toMillis(45), time("2011-10-30T02:15:00.000+02:00") - time("2011-10-30T01:30:00.000+02:00")); + assertEquals(TimeUnit.MINUTES.toMillis(60), time("2011-10-30T02:15:00.000+01:00") - time("2011-10-30T02:15:00.000+02:00")); + assertEquals(TimeUnit.MINUTES.toMillis(45), time("2011-10-30T03:00:00.000+01:00") - time("2011-10-30T02:15:00.000+01:00")); + } + /** * special test for DST switch from #9491 */ From 983a64c833cb727c3e0b67ebda7184ddc00c3502 Mon Sep 17 00:00:00 2001 From: jaymode Date: Thu, 30 Jun 2016 11:33:29 -0400 Subject: [PATCH 29/36] Add support for `teardown` section in REST tests This commits adds support for a `teardown` section that can be defined in REST tests to clean up any items that may have been created by the test and are not cleaned up by deletion of indices and templates. --- .../rest-api-spec/test/README.asciidoc | 15 ++- .../test/rest/ESRestTestCase.java | 15 ++- .../test/rest/RestTestCandidate.java | 5 + .../parser/RestTestSuiteParseContext.java | 16 ++++ .../test/rest/parser/RestTestSuiteParser.java | 2 + .../rest/parser/TeardownSectionParser.java | 53 +++++++++++ .../test/rest/section/RestTestSuite.java | 9 ++ .../test/rest/section/TeardownSection.java | 56 +++++++++++ .../test/rest/test/RestTestParserTests.java | 50 ++++++++-- .../rest/test/TeardownSectionParserTests.java | 93 +++++++++++++++++++ 10 files changed, 301 insertions(+), 13 deletions(-) create mode 100644 test/framework/src/main/java/org/elasticsearch/test/rest/parser/TeardownSectionParser.java create mode 100644 test/framework/src/main/java/org/elasticsearch/test/rest/section/TeardownSection.java create mode 100644 test/framework/src/test/java/org/elasticsearch/test/rest/test/TeardownSectionParserTests.java diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index 688d8cbdc5b..4e88cef4c9f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -20,6 +20,7 @@ Test file structure A YAML test file consists of: * an optional `setup` section, followed by +* an optional `teardown` section, followed by * one or more test sections For instance: @@ -28,6 +29,10 @@ For instance: - do: .... - do: .... + --- + teardown: + - do: .... + --- "First test": - do: ... @@ -42,6 +47,11 @@ For instance: A `setup` section contains a list of commands to run before each test section in order to setup the same environment for each test section. +A `teardown` section contains a list of commands to run after each test +section in order to setup the same environment for each test section. This +may be needed for modifications made by the testthat are not cleared by the +deletion of indices and templates. + A test section represents an independent test, containing multiple `do` statements and assertions. The contents of a test section must be run in order, but individual test sections may be run in any order, as follows: @@ -49,9 +59,8 @@ order, but individual test sections may be run in any order, as follows: 1. run `setup` (if any) 2. reset the `response` var and the `stash` (see below) 2. run test contents -3. run teardown - -The `teardown` should delete all indices and all templates. +3. run `teardown` (if any) +4. delete all indices and all templates Dot notation: ------------- diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 12e46087b6c..e8895aa90db 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -359,6 +359,9 @@ public abstract class ESRestTestCase extends ESTestCase { //skip test if the whole suite (yaml file) is disabled assumeFalse(buildSkipMessage(testCandidate.getSuitePath(), testCandidate.getSetupSection().getSkipSection()), testCandidate.getSetupSection().getSkipSection().skip(restTestExecutionContext.esVersion())); + //skip test if the whole suite (yaml file) is disabled + assumeFalse(buildSkipMessage(testCandidate.getSuitePath(), testCandidate.getTeardownSection().getSkipSection()), + testCandidate.getTeardownSection().getSkipSection().skip(restTestExecutionContext.esVersion())); //skip test if test section is disabled assumeFalse(buildSkipMessage(testCandidate.getTestPath(), testCandidate.getTestSection().getSkipSection()), testCandidate.getTestSection().getSkipSection().skip(restTestExecutionContext.esVersion())); @@ -391,8 +394,16 @@ public abstract class ESRestTestCase extends ESTestCase { restTestExecutionContext.clear(); - for (ExecutableSection executableSection : testCandidate.getTestSection().getExecutableSections()) { - executableSection.execute(restTestExecutionContext); + try { + for (ExecutableSection executableSection : testCandidate.getTestSection().getExecutableSections()) { + executableSection.execute(restTestExecutionContext); + } + } finally { + logger.debug("start teardown test [{}]", testCandidate.getTestPath()); + for (DoSection doSection : testCandidate.getTeardownSection().getDoSections()) { + doSection.execute(restTestExecutionContext); + } + logger.debug("end teardown test [{}]", testCandidate.getTestPath()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java index e454c396a3d..57c7e1b1305 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java @@ -20,6 +20,7 @@ package org.elasticsearch.test.rest; import org.elasticsearch.test.rest.section.RestTestSuite; import org.elasticsearch.test.rest.section.SetupSection; +import org.elasticsearch.test.rest.section.TeardownSection; import org.elasticsearch.test.rest.section.TestSection; /** @@ -56,6 +57,10 @@ public class RestTestCandidate { return restTestSuite.getSetupSection(); } + public TeardownSection getTeardownSection() { + return restTestSuite.getTeardownSection(); + } + public TestSection getTestSection() { return testSection; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java index 0a0c2722020..e972aea641a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java @@ -25,11 +25,13 @@ import java.util.Map; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.test.rest.section.DoSection; import org.elasticsearch.test.rest.section.ExecutableSection; import org.elasticsearch.test.rest.section.ResponseBodyAssertion; import org.elasticsearch.test.rest.section.SetupSection; import org.elasticsearch.test.rest.section.SkipSection; +import org.elasticsearch.test.rest.section.TeardownSection; import org.elasticsearch.test.rest.section.TestSection; /** @@ -39,6 +41,7 @@ import org.elasticsearch.test.rest.section.TestSection; public class RestTestSuiteParseContext { private static final SetupSectionParser SETUP_SECTION_PARSER = new SetupSectionParser(); + private static final TeardownSectionParser TEARDOWN_SECTION_PARSER = new TeardownSectionParser(); private static final RestTestSectionParser TEST_SECTION_PARSER = new RestTestSectionParser(); private static final SkipSectionParser SKIP_SECTION_PARSER = new SkipSectionParser(); private static final DoSectionParser DO_SECTION_PARSER = new DoSectionParser(); @@ -93,6 +96,19 @@ public class RestTestSuiteParseContext { return SetupSection.EMPTY; } + public TeardownSection parseTeardownSection() throws IOException, RestTestParseException { + advanceToFieldName(); + + if ("teardown".equals(parser.currentName())) { + parser.nextToken(); + TeardownSection teardownSection = TEARDOWN_SECTION_PARSER.parse(this); + parser.nextToken(); + return teardownSection; + } + + return TeardownSection.EMPTY; + } + public TestSection parseTestSection() throws IOException, RestTestParseException { return TEST_SECTION_PARSER.parse(this); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java index d3f93939c2e..c6986d3eac8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java @@ -21,6 +21,7 @@ package org.elasticsearch.test.rest.parser; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.yaml.YamlXContent; import org.elasticsearch.test.rest.section.RestTestSuite; +import org.elasticsearch.test.rest.section.TeardownSection; import org.elasticsearch.test.rest.section.TestSection; import java.io.IOException; @@ -75,6 +76,7 @@ public class RestTestSuiteParser implements RestTestFragmentParser { + + @Override + public TeardownSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException { + XContentParser parser = parseContext.parser(); + + TeardownSection teardownSection = new TeardownSection(); + teardownSection.setSkipSection(parseContext.parseSkipSection()); + + while (parser.currentToken() != XContentParser.Token.END_ARRAY) { + parseContext.advanceToFieldName(); + if (!"do".equals(parser.currentName())) { + throw new RestTestParseException("section [" + parser.currentName() + "] not supported within teardown section"); + } + + parser.nextToken(); + teardownSection.addDoSection(parseContext.parseDoSection()); + parser.nextToken(); + } + + parser.nextToken(); + return teardownSection; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java index d53671bc6bc..5c093be3fa0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java @@ -33,6 +33,7 @@ public class RestTestSuite { private final String name; private SetupSection setupSection; + private TeardownSection teardownSection; private Set testSections = new TreeSet<>(); @@ -61,6 +62,14 @@ public class RestTestSuite { this.setupSection = setupSection; } + public TeardownSection getTeardownSection() { + return teardownSection; + } + + public void setTeardownSection(TeardownSection teardownSection) { + this.teardownSection = teardownSection; + } + /** * Adds a {@link org.elasticsearch.test.rest.section.TestSection} to the REST suite * @return true if the test section was not already present, false otherwise diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/TeardownSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/TeardownSection.java new file mode 100644 index 00000000000..b3709472be5 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/TeardownSection.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest.section; + +import java.util.ArrayList; +import java.util.List; + +public class TeardownSection { + + public static final TeardownSection EMPTY; + + static { + EMPTY = new TeardownSection(); + EMPTY.setSkipSection(SkipSection.EMPTY); + } + + private SkipSection skipSection; + private List doSections = new ArrayList<>(); + + public SkipSection getSkipSection() { + return skipSection; + } + + public void setSkipSection(SkipSection skipSection) { + this.skipSection = skipSection; + } + + public List getDoSections() { + return doSections; + } + + public void addDoSection(DoSection doSection) { + this.doSections.add(doSection); + } + + public boolean isEmpty() { + return EMPTY.equals(this); + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java index 298f230d64a..76fbe8307e6 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java @@ -54,13 +54,30 @@ public class RestTestParserTests extends ESTestCase { parser.close(); } - public void testParseTestSetupAndSections() throws Exception { - parser = YamlXContent.yamlXContent.createParser( + public void testParseTestSetupTeardownAndSections() throws Exception { + final boolean includeSetup = randomBoolean(); + final boolean includeTeardown = randomBoolean(); + StringBuilder testSpecBuilder = new StringBuilder(); + if (includeSetup) { + testSpecBuilder + .append("---\n" + "setup:\n" + " - do:\n" + " indices.create:\n" + " index: test_index\n" + - "\n" + + "\n"); + } + if (includeTeardown) { + testSpecBuilder + .append("---\n" + + "teardown:\n" + + " - do:\n" + + " indices.delete:\n" + + " index: test_index\n" + + "\n"); + } + parser = YamlXContent.yamlXContent.createParser( + testSpecBuilder.toString() + "---\n" + "\"Get index mapping\":\n" + " - do:\n" + @@ -92,12 +109,29 @@ public class RestTestParserTests extends ESTestCase { assertThat(restTestSuite, notNullValue()); assertThat(restTestSuite.getName(), equalTo("suite")); assertThat(restTestSuite.getSetupSection(), notNullValue()); - assertThat(restTestSuite.getSetupSection().getSkipSection().isEmpty(), equalTo(true)); + if (includeSetup) { + assertThat(restTestSuite.getSetupSection().isEmpty(), equalTo(false)); + assertThat(restTestSuite.getSetupSection().getSkipSection().isEmpty(), equalTo(true)); + assertThat(restTestSuite.getSetupSection().getDoSections().size(), equalTo(1)); + assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getApi(), equalTo("indices.create")); + assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getParams().size(), equalTo(1)); + assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getParams().get("index"), equalTo("test_index")); + } else { + assertThat(restTestSuite.getSetupSection().isEmpty(), equalTo(true)); + } - assertThat(restTestSuite.getSetupSection().getDoSections().size(), equalTo(1)); - assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getApi(), equalTo("indices.create")); - assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getParams().size(), equalTo(1)); - assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getParams().get("index"), equalTo("test_index")); + assertThat(restTestSuite.getTeardownSection(), notNullValue()); + if (includeTeardown) { + assertThat(restTestSuite.getTeardownSection().isEmpty(), equalTo(false)); + assertThat(restTestSuite.getTeardownSection().getSkipSection().isEmpty(), equalTo(true)); + assertThat(restTestSuite.getTeardownSection().getDoSections().size(), equalTo(1)); + assertThat(restTestSuite.getTeardownSection().getDoSections().get(0).getApiCallSection().getApi(), equalTo("indices.delete")); + assertThat(restTestSuite.getTeardownSection().getDoSections().get(0).getApiCallSection().getParams().size(), equalTo(1)); + assertThat(restTestSuite.getTeardownSection().getDoSections().get(0).getApiCallSection().getParams().get("index"), + equalTo("test_index")); + } else { + assertThat(restTestSuite.getTeardownSection().isEmpty(), equalTo(true)); + } assertThat(restTestSuite.getTestSections().size(), equalTo(2)); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/test/TeardownSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/TeardownSectionParserTests.java new file mode 100644 index 00000000000..eeccea5f5e5 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/test/TeardownSectionParserTests.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest.test; + +import org.elasticsearch.Version; +import org.elasticsearch.common.xcontent.yaml.YamlXContent; +import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext; +import org.elasticsearch.test.rest.parser.TeardownSectionParser; +import org.elasticsearch.test.rest.section.TeardownSection; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +/** + * Unit tests for the teardown section parser + */ +public class TeardownSectionParserTests extends AbstractParserTestCase { + + public void testParseTeardownSection() throws Exception { + parser = YamlXContent.yamlXContent.createParser( + " - do:\n" + + " delete:\n" + + " index: foo\n" + + " type: doc\n" + + " id: 1\n" + + " ignore: 404\n" + + " - do:\n" + + " delete2:\n" + + " index: foo\n" + + " type: doc\n" + + " id: 1\n" + + " ignore: 404" + ); + + TeardownSectionParser teardownSectionParser = new TeardownSectionParser(); + TeardownSection section = teardownSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser)); + + assertThat(section, notNullValue()); + assertThat(section.getSkipSection().isEmpty(), equalTo(true)); + assertThat(section.getDoSections().size(), equalTo(2)); + assertThat(section.getDoSections().get(0).getApiCallSection().getApi(), equalTo("delete")); + assertThat(section.getDoSections().get(1).getApiCallSection().getApi(), equalTo("delete2")); + } + + public void testParseWithSkip() throws Exception { + parser = YamlXContent.yamlXContent.createParser( + " - skip:\n" + + " version: \"2.0.0 - 2.3.0\"\n" + + " reason: \"there is a reason\"\n" + + " - do:\n" + + " delete:\n" + + " index: foo\n" + + " type: doc\n" + + " id: 1\n" + + " ignore: 404\n" + + " - do:\n" + + " delete2:\n" + + " index: foo\n" + + " type: doc\n" + + " id: 1\n" + + " ignore: 404" + ); + + TeardownSectionParser teardownSectionParser = new TeardownSectionParser(); + TeardownSection section = teardownSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser)); + + assertThat(section, notNullValue()); + assertThat(section.getSkipSection().isEmpty(), equalTo(false)); + assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.V_2_0_0)); + assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.V_2_3_0)); + assertThat(section.getSkipSection().getReason(), equalTo("there is a reason")); + assertThat(section.getDoSections().size(), equalTo(2)); + assertThat(section.getDoSections().get(0).getApiCallSection().getApi(), equalTo("delete")); + assertThat(section.getDoSections().get(1).getApiCallSection().getApi(), equalTo("delete2")); + } +} From e079c83020461ec58cfc24be85f58ab7ee407af1 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 30 Jun 2016 08:45:54 -0700 Subject: [PATCH 30/36] Fix test edge case for bytes reference --- .../common/bytes/AbstractBytesReferenceTestCase.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java b/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java index dd9d93dd22a..76757e76724 100644 --- a/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java @@ -394,7 +394,7 @@ public abstract class AbstractBytesReferenceTestCase extends ESTestCase { public void testSliceArrayOffset() throws IOException { int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5)); BytesReference pbr = newBytesReference(length); - int sliceOffset = randomIntBetween(0, pbr.length()); + int sliceOffset = randomIntBetween(0, pbr.length() - 1); int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset); BytesReference slice = pbr.slice(sliceOffset, sliceLength); if (slice.hasArray()) { From 04a4bcdca0374f1473b1264274954f4506e7403d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 30 Jun 2016 08:47:55 -0700 Subject: [PATCH 31/36] Add comment explaining bytes reference edge case --- .../common/bytes/AbstractBytesReferenceTestCase.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java b/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java index 76757e76724..f31d7c69325 100644 --- a/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java @@ -394,7 +394,7 @@ public abstract class AbstractBytesReferenceTestCase extends ESTestCase { public void testSliceArrayOffset() throws IOException { int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5)); BytesReference pbr = newBytesReference(length); - int sliceOffset = randomIntBetween(0, pbr.length() - 1); + int sliceOffset = randomIntBetween(0, pbr.length() - 1); // an offset to the end would be len 0 int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset); BytesReference slice = pbr.slice(sliceOffset, sliceLength); if (slice.hasArray()) { From 09ca6d6ed21fec5c41e42642d0d956d31e1cac25 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 30 Jun 2016 17:58:12 +0200 Subject: [PATCH 32/36] Add a BridgePartition to be used by testAckedIndexing (#19172) We have long worked to capture different partitioning scenarios in our testing infra. This PR adds a new variant, inspired by the Jepsen blogs, which was forgotten far - namely a partition where one node can still see and be seen by all other nodes. It also updates the resiliency page to better reflect all the work that was done in this area. --- .../TransportMasterNodeActionTests.java | 2 +- .../elasticsearch/common/BooleansTests.java | 8 +- .../DiscoveryWithServiceDisruptionsIT.java | 11 +-- docs/resiliency/index.asciidoc | 30 +++++++- .../index/reindex/ReindexScriptTests.java | 3 +- .../org/elasticsearch/test/ESTestCase.java | 9 ++- .../test/disruption/BridgePartition.java | 74 +++++++++++++++++++ 7 files changed, 120 insertions(+), 17 deletions(-) create mode 100644 test/framework/src/main/java/org/elasticsearch/test/disruption/BridgePartition.java diff --git a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index 32fe6b1e408..e15c869c6ff 100644 --- a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -244,7 +244,7 @@ public class TransportMasterNodeActionTests extends ESTestCase { Request request = new Request(); PlainActionFuture listener = new PlainActionFuture<>(); - setState(clusterService, ClusterStateCreationUtils.state(localNode, randomFrom(null, localNode, remoteNode), allNodes)); + setState(clusterService, ClusterStateCreationUtils.state(localNode, randomFrom(localNode, remoteNode, null), allNodes)); new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { @Override diff --git a/core/src/test/java/org/elasticsearch/common/BooleansTests.java b/core/src/test/java/org/elasticsearch/common/BooleansTests.java index 6e5446cebf9..176c4c75dc7 100644 --- a/core/src/test/java/org/elasticsearch/common/BooleansTests.java +++ b/core/src/test/java/org/elasticsearch/common/BooleansTests.java @@ -51,9 +51,9 @@ public class BooleansTests extends ESTestCase { assertThat(Booleans.parseBoolean(null, false), is(false)); assertThat(Booleans.parseBoolean(null, true), is(true)); - assertThat(Booleans.parseBoolean(randomFrom("true", "on", "yes", "1"), randomFrom(null, Boolean.TRUE, Boolean.FALSE)), is(true)); - assertThat(Booleans.parseBoolean(randomFrom("false", "off", "no", "0"), randomFrom(null, Boolean.TRUE, Boolean.FALSE)), is(false)); - assertThat(Booleans.parseBoolean(randomFrom("true", "on", "yes").toUpperCase(Locale.ROOT),randomFrom(null, Boolean.TRUE, Boolean.FALSE)), is(true)); + assertThat(Booleans.parseBoolean(randomFrom("true", "on", "yes", "1"), randomFrom(Boolean.TRUE, Boolean.FALSE, null)), is(true)); + assertThat(Booleans.parseBoolean(randomFrom("false", "off", "no", "0"), randomFrom(Boolean.TRUE, Boolean.FALSE, null)), is(false)); + assertThat(Booleans.parseBoolean(randomFrom("true", "on", "yes").toUpperCase(Locale.ROOT),randomFrom(Boolean.TRUE, Boolean.FALSE, null)), is(true)); assertThat(Booleans.parseBoolean(null, Boolean.FALSE), is(false)); assertThat(Booleans.parseBoolean(null, Boolean.TRUE), is(true)); assertThat(Booleans.parseBoolean(null, null), nullValue()); @@ -70,7 +70,7 @@ public class BooleansTests extends ESTestCase { assertThat(Booleans.parseBooleanExact(randomFrom("true", "on", "yes", "1")), is(true)); assertThat(Booleans.parseBooleanExact(randomFrom("false", "off", "no", "0")), is(false)); try { - Booleans.parseBooleanExact(randomFrom(null, "fred", "foo", "barney")); + Booleans.parseBooleanExact(randomFrom("fred", "foo", "barney", null)); fail("Expected exception while parsing invalid boolean value "); } catch (Exception ex) { assertTrue(ex instanceof IllegalArgumentException); diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index 0187bb28f36..7f89acd169e 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -62,6 +62,7 @@ import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration; import org.elasticsearch.test.disruption.BlockClusterStateProcessing; +import org.elasticsearch.test.disruption.BridgePartition; import org.elasticsearch.test.disruption.IntermittentLongGCDisruption; import org.elasticsearch.test.disruption.LongGCDisruption; import org.elasticsearch.test.disruption.NetworkDelaysPartition; @@ -447,8 +448,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { final int seconds = !(TEST_NIGHTLY && rarely()) ? 1 : 5; final String timeout = seconds + "s"; - // TODO: add node count randomizaion - final List nodes = startCluster(3); + final List nodes = startCluster(rarely() ? 5 : 3); assertAcked(prepareCreate("test") .setSettings(Settings.builder() @@ -540,7 +540,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { logger.info("stopping disruption"); disruptionScheme.stopDisrupting(); for (String node : internalCluster().getNodeNames()) { - ensureStableCluster(3, TimeValue.timeValueMillis(disruptionScheme.expectedTimeToHeal().millis() + + ensureStableCluster(nodes.size(), TimeValue.timeValueMillis(disruptionScheme.expectedTimeToHeal().millis() + DISRUPTION_HEALING_OVERHEAD.millis()), true, node); } ensureGreen("test"); @@ -548,7 +548,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { logger.info("validating successful docs"); for (String node : nodes) { try { - logger.debug("validating through node [{}]", node); + logger.debug("validating through node [{}] ([{}] acked docs)", node, ackedDocs.size()); for (String id : ackedDocs.keySet()) { assertTrue("doc [" + id + "] indexed via node [" + ackedDocs.get(id) + "] not found", client(node).prepareGet("test", "type", id).setPreference("_local").get().isExists()); @@ -1192,7 +1192,8 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { new NetworkUnresponsivePartition(random()), new NetworkDelaysPartition(random()), new NetworkDisconnectPartition(random()), - new SlowClusterStateProcessing(random()) + new SlowClusterStateProcessing(random()), + new BridgePartition(random(), randomBoolean()) ); Collections.shuffle(list, random()); setDisruptionScheme(list.get(0)); diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index 6f3ed169709..802c380b4a7 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -55,6 +55,14 @@ If you encounter an issue, https://github.com/elastic/elasticsearch/issues[pleas We are committed to tracking down and fixing all the issues that are posted. +[float] +==== Jepsen Tests + +The Jepsen platform is specifically designed to test distributed systems. It is not a single test and is regularly adapted +to create new scenarios. We have ported all published scenarios to our testing infrastructure. Of course +as the system evolves, new scenarios can come up that are not yet covered. We are committed to investigating all new scenarios and will +report issues that we find on this page and in our GitHub repository. + [float] === Better request retry mechanism when nodes are disconnected (STATUS: ONGOING) @@ -102,17 +110,31 @@ Indices stats and indices segments requests reach out to all nodes that have sha while the stats request arrives will make that part of the request fail and are just ignored in the overall stats result. {GIT}13719[#13719] [float] -=== Jepsen Test Failures (STATUS: ONGOING) +=== Documentation of guarantees and handling of failures (STATUS: ONGOING) -We have increased our test coverage to include scenarios tested by Jepsen. We make heavy use of randomization to expand on the scenarios that can be tested and to introduce new error conditions. You can follow the work on the master branch of the https://github.com/elastic/elasticsearch/blob/master/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java[`DiscoveryWithServiceDisruptionsIT` class], where we will add more tests as time progresses. +This status page is a start, but we can do a better job of explicitly documenting the processes at work in Elasticsearch and what happens +in the case of each type of failure. The plan is to have a test case that validates each behavior under simulated conditions. Every test + will document the expected results, the associated test code, and an explicit PASS or FAIL status for each simulated case. [float] -=== Document guarantees and handling of failure (STATUS: ONGOING) +=== Run Jepsen (STATUS: ONGOING) + +We have ported all of the known scenarios in the Jepsen blogs to our testing infrastructure. The new tests are run continuously in our +testing farm and are passing. We are also working on running Jepsen independently to verify that no failures are found. -This status page is a start, but we can do a better job of explicitly documenting the processes at work in Elasticsearch, and what happens in the case of each type of failure. The plan is to have a test case that validates each behavior under simulated conditions. Every test will document the expected results, the associated test code and an explicit PASS or FAIL status for each simulated case. == Unreleased +[float] +=== Port Jepsen tests to our testing framework (STATUS: UNRELEASED, V5.0.0) + +We have increased our test coverage to include scenarios tested by Jepsen, as described in the Elasticsearch related blogs. We make heavy +use of randomization to expand on the scenarios that can be tested and to introduce new error conditions. +You can follow the work on the master branch of the +https://github.com/elastic/elasticsearch/blob/master/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java[`DiscoveryWithServiceDisruptionsIT` class], +where the `testAckedIndexing` test was specifically added to cover known Jepsen related scenarios. + + [float] === Loss of documents during network partition (STATUS: UNRELEASED, v5.0.0) diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java index 74b7548cd63..c70b80b8e37 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.lucene.uid.Versions; -import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptService; import java.util.Map; @@ -106,7 +105,7 @@ public class ReindexScriptTests extends AbstractAsyncBulkIndexByScrollActionScri } public void testSetTimestamp() throws Exception { - String timestamp = randomFrom(null, "now", "1234"); + String timestamp = randomFrom("now", "1234", null); IndexRequest index = applyScript((Map ctx) -> ctx.put("_timestamp", timestamp)); assertEquals(timestamp, index.timestamp()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index d04d12304de..6d63b6a5428 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -92,6 +92,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.Set; import java.util.TreeMap; import java.util.concurrent.ExecutorService; @@ -344,9 +345,15 @@ public abstract class ESTestCase extends LuceneTestCase { /** Pick a random object from the given array. The array must not be empty. */ public static T randomFrom(T... array) { - return RandomPicks.randomFrom(random(), array); + return randomFrom(random(), array); } + /** Pick a random object from the given array. The array must not be empty. */ + public static T randomFrom(Random random, T... array) { + return RandomPicks.randomFrom(random, array); + } + + /** Pick a random object from the given list. */ public static T randomFrom(List list) { return RandomPicks.randomFrom(random(), list); diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/BridgePartition.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/BridgePartition.java new file mode 100644 index 00000000000..1a9c2b686c3 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/BridgePartition.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.test.disruption; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.transport.MockTransportService; + +import java.util.Random; + +import static org.elasticsearch.test.ESTestCase.randomFrom; + +/** + * A partition that breaks the cluster into two groups of nodes. The two groups are fully isolated + * with the exception of a single node that can see and be seen by all nodes in both groups. + */ +public class BridgePartition extends NetworkPartition { + + String bridgeNode; + final boolean unresponsive; + + public BridgePartition(Random random, boolean unresponsive) { + super(random); + this.unresponsive = unresponsive; + } + + @Override + public void applyToCluster(InternalTestCluster cluster) { + bridgeNode = randomFrom(random, cluster.getNodeNames()); + this.cluster = cluster; + for (String node: cluster.getNodeNames()) { + if (node.equals(bridgeNode) == false) { + super.applyToNode(node, cluster); + } + } + } + + @Override + public TimeValue expectedTimeToHeal() { + return TimeValue.timeValueSeconds(0); + } + + @Override + void applyDisruption(MockTransportService transportService1, MockTransportService transportService2) { + if (unresponsive) { + transportService1.addUnresponsiveRule(transportService2); + transportService2.addUnresponsiveRule(transportService1); + } else { + transportService1.addFailToSendNoConnectRule(transportService2); + transportService2.addFailToSendNoConnectRule(transportService1); + } + } + + @Override + protected String getPartitionDescription() { + return "bridge (super connected node: [" + bridgeNode + "], unresponsive [" + unresponsive + "])"; + } +} From e1ab3f16fd9d4372ba080b0ea7286adf97adef66 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 30 Jun 2016 18:32:15 +0200 Subject: [PATCH 33/36] Add link to alpha4 release notes --- docs/reference/release-notes.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 90492dee33e..ea535263b7d 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -5,12 +5,14 @@ -- This section summarizes the changes in each release. +* <> * <> * <> * <> * <> -- +include::release-notes/5.0.0-alpha4.asciidoc[] include::release-notes/5.0.0-alpha3.asciidoc[] include::release-notes/5.0.0-alpha2.asciidoc[] include::release-notes/5.0.0-alpha1.asciidoc[] From f5a269b029c4a357e257d7ef9b849470e76a2ddf Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 27 Jun 2016 11:21:45 -0400 Subject: [PATCH 34/36] Start migration away from aggregation streams We'll migrate to NamedWriteable so we can share code with the rest of the system. So we can work on this in multiple pull requests without breaking Elasticsearch in between the commits this change supports *both* old style `InternalAggregations.stream` serialization and `NamedWriteable` style serialization. As such it creates about a half dozen `// NORELEASE` comments that will have to be removed once the migration is complete. This also introduces a boolean `transportClient` flag to `SearchModule` which is used to skip inappropriate registrations for for the transport client while still registering the things it needs. In this case that means that the `InternalAggregation` subclasses are registered with the `NamedWriteableRegistry` but the `AggregationBuilder` subclasses are not. Finally, this moves aggregation registration from guice configuration time to `SearchModule` construction time. This will make it simpler to work with in the future as we further clean up Elasticsearch's extension points. --- .../client/transport/TransportClient.java | 7 +- .../java/org/elasticsearch/node/Node.java | 2 +- .../elasticsearch/search/SearchModule.java | 48 +++++--- .../aggregations/InternalAggregation.java | 111 ++++++++++++------ .../aggregations/InternalAggregations.java | 23 +++- .../metrics/InternalMetricsAggregation.java | 10 ++ .../InternalNumericMetricsAggregation.java | 22 ++++ .../metrics/avg/AvgAggregationBuilder.java | 8 +- .../aggregations/metrics/avg/InternalAvg.java | 61 ++++------ .../index/query/InnerHitBuilderTests.java | 2 +- .../search/SearchModuleTests.java | 10 +- .../aggregations/AggregatorParsingTests.java | 2 +- .../aggregations/BaseAggregationTestCase.java | 2 +- .../SignificanceHeuristicTests.java | 4 +- .../builder/SearchSourceBuilderTests.java | 2 +- .../highlight/HighlightBuilderTests.java | 2 +- .../rescore/QueryRescoreBuilderTests.java | 2 +- .../search/sort/AbstractSortTestCase.java | 2 +- .../search/sort/SortBuilderTests.java | 2 +- .../AbstractSuggestionBuilderTestCase.java | 2 +- .../messy/tests/TemplateQueryParserTests.java | 2 +- .../test/AbstractQueryTestCase.java | 2 +- .../hamcrest/ElasticsearchAssertions.java | 2 +- 23 files changed, 204 insertions(+), 126 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java index c9313fc08c4..eeb9373c435 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -142,12 +142,7 @@ public class TransportClient extends AbstractClient { } modules.add(new NetworkModule(networkService, settings, true, namedWriteableRegistry)); modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool)); - modules.add(new SearchModule(settings, namedWriteableRegistry) { - @Override - protected void configure() { - // noop - } - }); + modules.add(new SearchModule(settings, namedWriteableRegistry, true)); modules.add(new ActionModule(false, true, settings, null, settingsModule.getClusterSettings(), pluginsService.filterPlugins(ActionPlugin.class))); diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index ff095f749c8..fa7f090c096 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -267,7 +267,7 @@ public class Node implements Closeable { ClusterModule clusterModule = new ClusterModule(settings, clusterService); modules.add(clusterModule); modules.add(new IndicesModule(namedWriteableRegistry, pluginsService.filterPlugins(MapperPlugin.class))); - modules.add(new SearchModule(settings, namedWriteableRegistry)); + modules.add(new SearchModule(settings, namedWriteableRegistry, false)); modules.add(new ActionModule(DiscoveryNode.isIngestNode(settings), false, settings, clusterModule.getIndexNameExpressionResolver(), settingsModule.getClusterSettings(), pluginsService.filterPlugins(ActionPlugin.class))); diff --git a/core/src/main/java/org/elasticsearch/search/SearchModule.java b/core/src/main/java/org/elasticsearch/search/SearchModule.java index 34a90894425..cd9d6f8a3f0 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/core/src/main/java/org/elasticsearch/search/SearchModule.java @@ -94,6 +94,7 @@ import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorParsers; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.children.ChildrenAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.children.InternalChildren; @@ -264,6 +265,7 @@ import java.util.Set; */ public class SearchModule extends AbstractModule { + private final boolean transportClient; private final Highlighters highlighters; private final Suggesters suggesters; private final ParseFieldRegistry> scoreFunctionParserRegistry = new ParseFieldRegistry<>("score_function"); @@ -287,9 +289,10 @@ public class SearchModule extends AbstractModule { // pkg private so tests can mock Class searchServiceImpl = SearchService.class; - public SearchModule(Settings settings, NamedWriteableRegistry namedWriteableRegistry) { + public SearchModule(Settings settings, NamedWriteableRegistry namedWriteableRegistry, boolean transportClient) { this.settings = settings; this.namedWriteableRegistry = namedWriteableRegistry; + this.transportClient = transportClient; suggesters = new Suggesters(namedWriteableRegistry); highlighters = new Highlighters(settings); registerBuiltinScoreFunctionParsers(); @@ -300,6 +303,7 @@ public class SearchModule extends AbstractModule { registerBuiltinSignificanceHeuristics(); registerBuiltinMovingAverageModels(); registerBuiltinSubFetchPhases(); + registerBuiltinAggregations(); } public void registerHighlighter(String key, Highlighter highligher) { @@ -414,15 +418,27 @@ public class SearchModule extends AbstractModule { /** * Register an aggregation. * - * @param reader reads the aggregation builder from a stream + * @param builderReader reads the {@link AggregationBuilder} from a stream + * @param internalReader reads the {@link InternalAggregation} from a stream * @param aggregationParser reads the aggregation builder from XContent * @param aggregationName names by which the aggregation may be parsed. The first name is special because it is the name that the reader * is registered under. */ - public void registerAggregation(Writeable.Reader reader, Aggregator.Parser aggregationParser, - ParseField aggregationName) { + public void registerAggregation(Writeable.Reader builderReader, + Writeable.Reader internalReader, Aggregator.Parser aggregationParser, + ParseField aggregationName) { + if (false == transportClient) { + namedWriteableRegistry.register(AggregationBuilder.class, aggregationName.getPreferredName(), builderReader); + aggregationParserRegistry.register(aggregationParser, aggregationName); + } + namedWriteableRegistry.register(InternalAggregation.class, aggregationName.getPreferredName(), internalReader); + } + + public void registerAggregation(Writeable.Reader builderReader, Aggregator.Parser aggregationParser, + ParseField aggregationName) { + // NORELEASE remove me in favor of the above method + namedWriteableRegistry.register(AggregationBuilder.class, aggregationName.getPreferredName(), builderReader); aggregationParserRegistry.register(aggregationParser, aggregationName); - namedWriteableRegistry.register(AggregationBuilder.class, aggregationName.getPreferredName(), reader); } /** @@ -441,15 +457,21 @@ public class SearchModule extends AbstractModule { @Override protected void configure() { - bind(IndicesQueriesRegistry.class).toInstance(queryParserRegistry); - bind(Suggesters.class).toInstance(suggesters); - configureSearch(); - configureAggs(); - configureShapes(); + if (false == transportClient) { + /* + * Nothing is bound for transport client *but* SearchModule is still responsible for settings up the things like the + * NamedWriteableRegistry. + */ + bind(IndicesQueriesRegistry.class).toInstance(queryParserRegistry); + bind(Suggesters.class).toInstance(suggesters); + configureSearch(); + configureShapes(); + bind(AggregatorParsers.class).toInstance(aggregatorParsers); + } } - protected void configureAggs() { - registerAggregation(AvgAggregationBuilder::new, new AvgParser(), AvgAggregationBuilder.AGGREGATION_NAME_FIELD); + private void registerBuiltinAggregations() { + registerAggregation(AvgAggregationBuilder::new, InternalAvg::new, new AvgParser(), AvgAggregationBuilder.AGGREGATION_NAME_FIELD); registerAggregation(SumAggregationBuilder::new, new SumParser(), SumAggregationBuilder.AGGREGATION_NAME_FIELD); registerAggregation(MinAggregationBuilder::new, new MinParser(), MinAggregationBuilder.AGGREGATION_NAME_FIELD); registerAggregation(MaxAggregationBuilder::new, new MaxParser(), MaxAggregationBuilder.AGGREGATION_NAME_FIELD); @@ -527,7 +549,6 @@ public class SearchModule extends AbstractModule { BucketSelectorPipelineAggregationBuilder.AGGREGATION_NAME_FIELD); registerPipelineAggregation(SerialDiffPipelineAggregationBuilder::new, SerialDiffPipelineAggregationBuilder::parse, SerialDiffPipelineAggregationBuilder.AGGREGATION_NAME_FIELD); - bind(AggregatorParsers.class).toInstance(aggregatorParsers); } protected void configureSearch() { @@ -679,7 +700,6 @@ public class SearchModule extends AbstractModule { static { // calcs - InternalAvg.registerStreams(); InternalSum.registerStreams(); InternalMin.registerStreams(); InternalMax.registerStreams(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index b4fab7093f8..b75244ce889 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -19,10 +19,10 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -43,8 +43,8 @@ import java.util.Map; /** * An internal implementation of {@link Aggregation}. Serves as a base class for all aggregation implementations. */ -public abstract class InternalAggregation implements Aggregation, ToXContent, Streamable { - +public abstract class InternalAggregation implements Aggregation, ToXContent, Streamable, NamedWriteable { + // NORELEASE remove Streamable /** * The aggregation type that holds all the string types that are associated with an aggregation: @@ -139,15 +139,84 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St this.metaData = metaData; } + /** + * Read from a stream. + */ + protected InternalAggregation(StreamInput in) throws IOException { + name = in.readString(); + metaData = in.readMap(); + int size = in.readVInt(); + if (size == 0) { + pipelineAggregators = Collections.emptyList(); + } else { + pipelineAggregators = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + BytesReference type = in.readBytesReference(); + PipelineAggregator pipelineAggregator = PipelineAggregatorStreams.stream(type).readResult(in); + pipelineAggregators.add(pipelineAggregator); + } + } + } + + @Override + public final void readFrom(StreamInput in) throws IOException { + try { + getWriteableName(); // Throws UnsupportedOperationException if this aggregation should be read using old style Streams + assert false : "Used reading constructor instead"; + } catch (UnsupportedOperationException e) { + // OK + } + name = in.readString(); + metaData = in.readMap(); + int size = in.readVInt(); + if (size == 0) { + pipelineAggregators = Collections.emptyList(); + } else { + pipelineAggregators = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + BytesReference type = in.readBytesReference(); + PipelineAggregator pipelineAggregator = PipelineAggregatorStreams.stream(type).readResult(in); + pipelineAggregators.add(pipelineAggregator); + } + } + doReadFrom(in); + } + + protected void doReadFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("Use reading constructor instead"); // NORELEASE remove when we remove Streamable + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + out.writeString(name); // NORELEASE remote writing the name - it is automatically handled with writeNamedWriteable + out.writeGenericValue(metaData); + out.writeVInt(pipelineAggregators.size()); + for (PipelineAggregator pipelineAggregator : pipelineAggregators) { + out.writeBytesReference(pipelineAggregator.type().stream()); + pipelineAggregator.writeTo(out); + } + doWriteTo(out); + } + + protected abstract void doWriteTo(StreamOutput out) throws IOException; + @Override public String getName() { return name; } + @Override + public String getWriteableName() { + // NORELEASE remove me when all InternalAggregations override it + throw new UnsupportedOperationException("Override on every class"); + } + /** * @return The {@link Type} of this aggregation */ - public abstract Type type(); + public Type type() { + throw new UnsupportedOperationException("Use getWriteableName instead"); // NORELEASE remove me + } /** * Reduces the given addAggregation to a single one and returns it. In most cases, the assumption will be the all given @@ -214,40 +283,6 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St public abstract XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException; - @Override - public final void writeTo(StreamOutput out) throws IOException { - out.writeString(name); - out.writeGenericValue(metaData); - out.writeVInt(pipelineAggregators.size()); - for (PipelineAggregator pipelineAggregator : pipelineAggregators) { - out.writeBytesReference(pipelineAggregator.type().stream()); - pipelineAggregator.writeTo(out); - } - doWriteTo(out); - } - - protected abstract void doWriteTo(StreamOutput out) throws IOException; - - @Override - public final void readFrom(StreamInput in) throws IOException { - name = in.readString(); - metaData = in.readMap(); - int size = in.readVInt(); - if (size == 0) { - pipelineAggregators = Collections.emptyList(); - } else { - pipelineAggregators = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - BytesReference type = in.readBytesReference(); - PipelineAggregator pipelineAggregator = PipelineAggregatorStreams.stream(type).readResult(in); - pipelineAggregators.add(pipelineAggregator); - } - } - doReadFrom(in); - } - - protected abstract void doReadFrom(StreamInput in) throws IOException; - /** * Common xcontent fields that are shared among addAggregation */ diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java index 357886a69f6..75223dcc476 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java @@ -205,9 +205,14 @@ public class InternalAggregations implements Aggregations, ToXContent, Streamabl } else { aggregations = new ArrayList<>(size); for (int i = 0; i < size; i++) { - BytesReference type = in.readBytesReference(); - InternalAggregation aggregation = AggregationStreams.stream(type).readResult(in); - aggregations.add(aggregation); + // NORELEASE temporary hack to support old style streams and new style NamedWriteable at the same time + if (in.readBoolean()) { + aggregations.add(in.readNamedWriteable(InternalAggregation.class)); + } else { + BytesReference type = in.readBytesReference(); + InternalAggregation aggregation = AggregationStreams.stream(type).readResult(in); + aggregations.add(aggregation); + } } } } @@ -217,8 +222,16 @@ public class InternalAggregations implements Aggregations, ToXContent, Streamabl out.writeVInt(aggregations.size()); for (Aggregation aggregation : aggregations) { InternalAggregation internal = (InternalAggregation) aggregation; - out.writeBytesReference(internal.type().stream()); - internal.writeTo(out); + // NORELEASE Temporary hack to support old style streams and new style NamedWriteable at the same time + try { + internal.getWriteableName(); // Throws UnsupportedOperationException if we should use old style streams. + out.writeBoolean(true); + out.writeNamedWriteable(internal); + } catch (UnsupportedOperationException e) { + out.writeBoolean(false); + out.writeBytesReference(internal.type().stream()); + internal.writeTo(out); + } } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java index c67b7dc6b2f..5c2b1c46f5e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java @@ -19,9 +19,11 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import java.io.IOException; import java.util.List; import java.util.Map; @@ -32,4 +34,12 @@ public abstract class InternalMetricsAggregation extends InternalAggregation { protected InternalMetricsAggregation(String name, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); } + + /** + * Read from a stream. + */ + protected InternalMetricsAggregation(StreamInput in) throws IOException { + super(in); + } + } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java index 02412ba81c6..15c9d7ed59c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java @@ -18,9 +18,11 @@ */ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import java.io.IOException; import java.util.List; import java.util.Map; @@ -41,6 +43,13 @@ public abstract class InternalNumericMetricsAggregation extends InternalMetricsA super(name, pipelineAggregators, metaData); } + /** + * Read from a stream. + */ + protected SingleValue(StreamInput in) throws IOException { + super(in); + } + @Override public String getValueAsString() { return format.format(value()); @@ -67,6 +76,13 @@ public abstract class InternalNumericMetricsAggregation extends InternalMetricsA super(name, pipelineAggregators, metaData); } + /** + * Read from a stream. + */ + protected MultiValue(StreamInput in) throws IOException { + super(in); + } + public abstract double value(String name); public String valueAsString(String name) { @@ -91,4 +107,10 @@ public abstract class InternalNumericMetricsAggregation extends InternalMetricsA super(name, pipelineAggregators, metaData); } + /** + * Read from a stream. + */ + protected InternalNumericMetricsAggregation(StreamInput in) throws IOException { + super(in); + } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregationBuilder.java index ce098177a0b..830ce35fde7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregationBuilder.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; +import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; @@ -36,18 +37,19 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; public class AvgAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOnly { - public static final String NAME = InternalAvg.TYPE.name(); + public static final String NAME = "avg"; + private final static Type TYPE = new Type(NAME); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); public AvgAggregationBuilder(String name) { - super(name, InternalAvg.TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(name, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } /** * Read from a stream. */ public AvgAggregationBuilder(StreamInput in) throws IOException { - super(in, InternalAvg.TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(in, TYPE, ValuesSourceType.NUMERIC, ValueType.NUMERIC); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java index bb4260b8189..5f0d54db003 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -31,30 +30,9 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** -* -*/ public class InternalAvg extends InternalNumericMetricsAggregation.SingleValue implements Avg { - - public final static Type TYPE = new Type("avg"); - - public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() { - @Override - public InternalAvg readResult(StreamInput in) throws IOException { - InternalAvg result = new InternalAvg(); - result.readFrom(in); - return result; - } - }; - - public static void registerStreams() { - AggregationStreams.registerStream(STREAM, TYPE.stream()); - } - - private double sum; - private long count; - - InternalAvg() {} // for serialization + private final double sum; + private final long count; public InternalAvg(String name, double sum, long count, DocValueFormat format, List pipelineAggregators, Map metaData) { @@ -64,6 +42,23 @@ public class InternalAvg extends InternalNumericMetricsAggregation.SingleValue i this.format = format; } + /** + * Read from a stream. + */ + public InternalAvg(StreamInput in) throws IOException { + super(in); + format = in.readNamedWriteable(DocValueFormat.class); + sum = in.readDouble(); + count = in.readVLong(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(format); + out.writeDouble(sum); + out.writeVLong(count); + } + @Override public double value() { return getValue(); @@ -75,8 +70,8 @@ public class InternalAvg extends InternalNumericMetricsAggregation.SingleValue i } @Override - public Type type() { - return TYPE; + public String getWriteableName() { + return AvgAggregationBuilder.NAME; } @Override @@ -90,20 +85,6 @@ public class InternalAvg extends InternalNumericMetricsAggregation.SingleValue i return new InternalAvg(getName(), sum, count, format, pipelineAggregators(), getMetaData()); } - @Override - protected void doReadFrom(StreamInput in) throws IOException { - format = in.readNamedWriteable(DocValueFormat.class); - sum = in.readDouble(); - count = in.readVLong(); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(format); - out.writeDouble(sum); - out.writeVLong(count); - } - @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.field(CommonFields.VALUE, count != 0 ? getValue() : null); diff --git a/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java index d4ba6ca9062..6460d8505ee 100644 --- a/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java @@ -67,7 +67,7 @@ public class InnerHitBuilderTests extends ESTestCase { @BeforeClass public static void init() { namedWriteableRegistry = new NamedWriteableRegistry(); - indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).getQueryParserRegistry(); + indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry, false).getQueryParserRegistry(); } @AfterClass diff --git a/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java index 72873bc0d48..20a557d56a6 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -47,7 +47,7 @@ import static org.hamcrest.Matchers.notNullValue; public class SearchModuleTests extends ModuleTestCase { public void testDoubleRegister() { - SearchModule module = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry()); + SearchModule module = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false); try { module.registerHighlighter("fvh", new PlainHighlighter()); } catch (IllegalArgumentException e) { @@ -62,7 +62,7 @@ public class SearchModuleTests extends ModuleTestCase { } public void testRegisterSuggester() { - SearchModule module = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry()); + SearchModule module = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false); module.registerSuggester("custom", CustomSuggester.INSTANCE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> module.registerSuggester("custom", CustomSuggester.INSTANCE)); @@ -70,7 +70,7 @@ public class SearchModuleTests extends ModuleTestCase { } public void testRegisterHighlighter() { - SearchModule module = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry()); + SearchModule module = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false); CustomHighlighter customHighlighter = new CustomHighlighter(); module.registerHighlighter("custom", customHighlighter); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, @@ -88,14 +88,14 @@ public class SearchModuleTests extends ModuleTestCase { } public void testRegisterQueryParserDuplicate() { - SearchModule module = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry()); + SearchModule module = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> module .registerQuery(TermQueryBuilder::new, TermQueryBuilder::fromXContent, TermQueryBuilder.QUERY_NAME_FIELD)); assertThat(e.getMessage(), containsString("] already registered for [query][term] while trying to register [org.elasticsearch.")); } public void testRegisteredQueries() throws IOException { - SearchModule module = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry()); + SearchModule module = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry(), false); List allSupportedQueries = new ArrayList<>(); Collections.addAll(allSupportedQueries, NON_DEPRECATED_QUERIES); Collections.addAll(allSupportedQueries, DEPRECATED_QUERIES); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java index 1a21069623d..58b4b97db5c 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java @@ -119,7 +119,7 @@ public class AggregatorParsingTests extends ESTestCase { protected void configure() { bindMapperExtension(); } - }, new SearchModule(settings, namedWriteableRegistry) { + }, new SearchModule(settings, namedWriteableRegistry, false) { @Override protected void configureSearch() { // Skip me diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java index 4e0429fca87..a4103e7ee56 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java @@ -151,7 +151,7 @@ public abstract class BaseAggregationTestCase heuristicParserMapper = searchModule.getSignificanceHeuristicParserRegistry(); SearchContext searchContext = new SignificantTermsTestSearchContext(); diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index f3a78b65d78..ac90fabcb07 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -145,7 +145,7 @@ public class SearchSourceBuilderTests extends ESTestCase { bindMapperExtension(); } }, - new SearchModule(settings, namedWriteableRegistry) { + new SearchModule(settings, namedWriteableRegistry, false) { @Override protected void configureSearch() { // Skip me diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java index 9ebbb5b42e0..930a7b220e0 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java @@ -83,7 +83,7 @@ public class HighlightBuilderTests extends ESTestCase { @BeforeClass public static void init() { namedWriteableRegistry = new NamedWriteableRegistry(); - indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).getQueryParserRegistry(); + indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry, false).getQueryParserRegistry(); } @AfterClass diff --git a/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java b/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java index f965d3ac5fd..7c3690dcf35 100644 --- a/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java @@ -70,7 +70,7 @@ public class QueryRescoreBuilderTests extends ESTestCase { @BeforeClass public static void init() { namedWriteableRegistry = new NamedWriteableRegistry(); - indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).getQueryParserRegistry(); + indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry, false).getQueryParserRegistry(); } @AfterClass diff --git a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java index bdd5c76534c..b494fa4d1e6 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java @@ -106,7 +106,7 @@ public abstract class AbstractSortTestCase> extends EST }; namedWriteableRegistry = new NamedWriteableRegistry(); - indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).getQueryParserRegistry(); + indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry, false).getQueryParserRegistry(); } @AfterClass diff --git a/core/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java index f31158ff34e..54b12216302 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java @@ -52,7 +52,7 @@ public class SortBuilderTests extends ESTestCase { @BeforeClass public static void init() { namedWriteableRegistry = new NamedWriteableRegistry(); - indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).getQueryParserRegistry(); + indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry, false).getQueryParserRegistry(); } @AfterClass diff --git a/core/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java b/core/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java index 1f4030f487c..cd6c34497f7 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java @@ -56,7 +56,7 @@ public abstract class AbstractSuggestionBuilderTestCase> scriptSettings.addAll(pluginsService.getPluginSettings()); scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED); SettingsModule settingsModule = new SettingsModule(settings, scriptSettings, pluginsService.getPluginSettingsFilter()); - searchModule = new SearchModule(settings, namedWriteableRegistry) { + searchModule = new SearchModule(settings, namedWriteableRegistry, false) { @Override protected void configureSearch() { // Skip me diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 51d15e019a4..baad3ef6f04 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -631,7 +631,7 @@ public class ElasticsearchAssertions { registry = ESIntegTestCase.internalCluster().getInstance(NamedWriteableRegistry.class); } else { registry = new NamedWriteableRegistry(); - new SearchModule(Settings.EMPTY, registry); + new SearchModule(Settings.EMPTY, registry, false); } assertVersionSerializable(version, streamable, registry); } From a24d302e73251ae4fd2d41abff1fa2cab419be66 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 30 Jun 2016 14:47:38 -0400 Subject: [PATCH 35/36] Fix discovery-azure-classic plugin packaging test This commit fixes the discovery-azure-classing packaging test by fixing the expected name of the installed plugin. --- .../packaging/scripts/module_and_plugin_test_cases.bash | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash index c17df96937a..21882c0ad53 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash @@ -210,7 +210,7 @@ fi } @test "[$GROUP] install discovery-azure-classic plugin" { - install_and_check_plugin discovery azure azure-core-*.jar + install_and_check_plugin discovery azure-classic azure-core-*.jar } @test "[$GROUP] install discovery-ec2 plugin" { From d24cc65cad2f3152237df8b6c457a2d0a603f13a Mon Sep 17 00:00:00 2001 From: gfyoung Date: Thu, 30 Jun 2016 23:00:10 -0400 Subject: [PATCH 36/36] Raised IOException on deleteBlob (#18815) Raise IOException on deleteBlob if the blob doesn't exist This commit raises an IOException on BlobContainer#deleteBlob if the blob does not exist, in conformance with the BlobContainer interface contract. Each implementation of BlobContainer now conforms to this contract (file system, S3, Azure, HDFS). This commit also contains blob container tests for each of the repository implementations. Closes #18530 --- .../common/blobstore/fs/FsBlobContainer.java | 2 +- .../azure/blobstore/AzureBlobContainer.java | 10 + .../storage/AzureStorageServiceMock.java | 4 +- .../azure/AzureBlobStoreContainerTests.java | 47 ++++ .../repositories/hdfs/HdfsBlobContainer.java | 18 +- .../hdfs/HdfsBlobStoreContainerTests.java | 104 +++++++++ .../cloud/aws/blobstore/S3BlobContainer.java | 4 + .../cloud/aws/blobstore/MockAmazonS3.java | 200 ++++++++++++++++++ .../blobstore/S3BlobStoreContainerTests.java | 39 ++++ .../ESBlobStoreContainerTestCase.java | 16 ++ 10 files changed, 432 insertions(+), 12 deletions(-) create mode 100644 plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java create mode 100644 plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java create mode 100644 plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/blobstore/MockAmazonS3.java create mode 100644 plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/blobstore/S3BlobStoreContainerTests.java diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index 822f8d1721a..67ce298becd 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -85,7 +85,7 @@ public class FsBlobContainer extends AbstractBlobContainer { @Override public void deleteBlob(String blobName) throws IOException { Path blobPath = path.resolve(blobName); - Files.deleteIfExists(blobPath); + Files.delete(blobPath); } @Override diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java index e6c3a469076..6117062fc29 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java @@ -70,6 +70,11 @@ public class AzureBlobContainer extends AbstractBlobContainer { @Override public InputStream readBlob(String blobName) throws IOException { logger.trace("readBlob({})", blobName); + + if (!blobExists(blobName)) { + throw new IOException("Blob [" + blobName + "] does not exist"); + } + try { return blobStore.getInputStream(blobStore.container(), buildKey(blobName)); } catch (StorageException e) { @@ -116,6 +121,11 @@ public class AzureBlobContainer extends AbstractBlobContainer { @Override public void deleteBlob(String blobName) throws IOException { logger.trace("deleteBlob({})", blobName); + + if (!blobExists(blobName)) { + throw new IOException("Blob [" + blobName + "] does not exist"); + } + try { blobStore.deleteBlob(blobStore.container(), buildKey(blobName)); } catch (URISyntaxException | StorageException e) { diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java index 8160c560325..506d574ea62 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java @@ -95,13 +95,13 @@ public class AzureStorageServiceMock extends AbstractLifecycleComponent blobsBuilder = MapBuilder.newMapBuilder(); for (String blobName : blobs.keySet()) { final String checkBlob; - if (keyPath != null) { + if (keyPath != null && !keyPath.isEmpty()) { // strip off key path from the beginning of the blob name checkBlob = blobName.replace(keyPath, ""); } else { checkBlob = blobName; } - if (startsWithIgnoreCase(checkBlob, prefix)) { + if (prefix == null || startsWithIgnoreCase(checkBlob, prefix)) { blobsBuilder.put(blobName, new PlainBlobMetaData(checkBlob, blobs.get(blobName).size())); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java new file mode 100644 index 00000000000..5b161613c9b --- /dev/null +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import com.microsoft.azure.storage.StorageException; +import org.elasticsearch.cloud.azure.blobstore.AzureBlobStore; +import org.elasticsearch.cloud.azure.storage.AzureStorageServiceMock; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; +import org.elasticsearch.repositories.RepositoryName; +import org.elasticsearch.repositories.RepositorySettings; + +import java.io.IOException; +import java.net.URISyntaxException; + +public class AzureBlobStoreContainerTests extends ESBlobStoreContainerTestCase { + @Override + protected BlobStore newBlobStore() throws IOException { + try { + RepositoryName repositoryName = new RepositoryName("azure", "ittest"); + RepositorySettings repositorySettings = new RepositorySettings( + Settings.EMPTY, Settings.EMPTY); + AzureStorageServiceMock client = new AzureStorageServiceMock(Settings.EMPTY); + return new AzureBlobStore(repositoryName, Settings.EMPTY, repositorySettings, client); + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } +} diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java index 6ba726e2b24..c8b3d9f7e1d 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java @@ -68,16 +68,16 @@ final class HdfsBlobContainer extends AbstractBlobContainer { @Override public void deleteBlob(String blobName) throws IOException { - try { - store.execute(new Operation() { - @Override - public Boolean run(FileContext fileContext) throws IOException { - return fileContext.delete(new Path(path, blobName), true); - } - }); - } catch (FileNotFoundException ok) { - // behaves like Files.deleteIfExists + if (!blobExists(blobName)) { + throw new IOException("Blob [" + blobName + "] does not exist"); } + + store.execute(new Operation() { + @Override + public Boolean run(FileContext fileContext) throws IOException { + return fileContext.delete(new Path(path, blobName), true); + } + }); } @Override diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java new file mode 100644 index 00000000000..a96a8183e58 --- /dev/null +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.hdfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.AbstractFileSystem; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; + +import javax.security.auth.Subject; +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.net.URI; +import java.net.URISyntaxException; +import java.security.AccessController; +import java.security.Principal; +import java.security.PrivilegedAction; +import java.util.Collections; + +public class HdfsBlobStoreContainerTests extends ESBlobStoreContainerTestCase { + + @Override + protected BlobStore newBlobStore() throws IOException { + return AccessController.doPrivileged( + new PrivilegedAction() { + @Override + public HdfsBlobStore run() { + try { + FileContext fileContext = createContext(new URI("hdfs:///")); + return new HdfsBlobStore(fileContext, "temp", 1024); + } catch (IOException | URISyntaxException e) { + throw new RuntimeException(e); + } + } + }); + } + + public FileContext createContext(URI uri) { + // mirrors HdfsRepository.java behaviour + Configuration cfg = new Configuration(true); + cfg.setClassLoader(HdfsRepository.class.getClassLoader()); + cfg.reloadConfiguration(); + + Constructor ctor; + Subject subject; + + try { + Class clazz = Class.forName("org.apache.hadoop.security.User"); + ctor = clazz.getConstructor(String.class); + ctor.setAccessible(true); + } catch (ClassNotFoundException | NoSuchMethodException e) { + throw new RuntimeException(e); + } + + try { + Principal principal = (Principal) ctor.newInstance(System.getProperty("user.name")); + subject = new Subject(false, Collections.singleton(principal), + Collections.emptySet(), Collections.emptySet()); + } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) { + throw new RuntimeException(e); + } + + // disable file system cache + cfg.setBoolean("fs.hdfs.impl.disable.cache", true); + + // set file system to TestingFs to avoid a bunch of security + // checks, similar to what is done in HdfsTests.java + cfg.set(String.format("fs.AbstractFileSystem.%s.impl", uri.getScheme()), + TestingFs.class.getName()); + + // create the FileContext with our user + return Subject.doAs(subject, new PrivilegedAction() { + @Override + public FileContext run() { + try { + TestingFs fs = (TestingFs) AbstractFileSystem.get(uri, cfg); + return FileContext.getFileContext(fs, cfg); + } catch (UnsupportedFileSystemException e) { + throw new RuntimeException(e); + } + } + }); + } +} diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobContainer.java index 42df840ce40..5e014ab3ecd 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobContainer.java @@ -108,6 +108,10 @@ public class S3BlobContainer extends AbstractBlobContainer { @Override public void deleteBlob(String blobName) throws IOException { + if (!blobExists(blobName)) { + throw new IOException("Blob [" + blobName + "] does not exist"); + } + try { blobStore.client().deleteObject(blobStore.bucket(), buildKey(blobName)); } catch (AmazonClientException e) { diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/blobstore/MockAmazonS3.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/blobstore/MockAmazonS3.java new file mode 100644 index 00000000000..8124f693943 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/blobstore/MockAmazonS3.java @@ -0,0 +1,200 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.aws.blobstore; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.AmazonServiceException; +import com.amazonaws.services.s3.AbstractAmazonS3; +import com.amazonaws.services.s3.model.AmazonS3Exception; +import com.amazonaws.services.s3.model.CopyObjectRequest; +import com.amazonaws.services.s3.model.CopyObjectResult; +import com.amazonaws.services.s3.model.DeleteObjectRequest; +import com.amazonaws.services.s3.model.GetObjectMetadataRequest; +import com.amazonaws.services.s3.model.GetObjectRequest; +import com.amazonaws.services.s3.model.ListObjectsRequest; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.amazonaws.services.s3.model.PutObjectResult; +import com.amazonaws.services.s3.model.S3Object; +import com.amazonaws.services.s3.model.S3ObjectInputStream; +import com.amazonaws.services.s3.model.S3ObjectSummary; +import com.amazonaws.util.Base64; + +import java.io.IOException; +import java.io.InputStream; +import java.security.DigestInputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +class MockAmazonS3 extends AbstractAmazonS3 { + + private Map blobs = new ConcurrentHashMap<>(); + + // in ESBlobStoreContainerTestCase.java, the maximum + // length of the input data is 100 bytes + private byte[] byteCounter = new byte[100]; + + @Override + public boolean doesBucketExist(String bucket) { + return true; + } + + @Override + public ObjectMetadata getObjectMetadata( + GetObjectMetadataRequest getObjectMetadataRequest) + throws AmazonClientException, AmazonServiceException { + String blobName = getObjectMetadataRequest.getKey(); + + if (!blobs.containsKey(blobName)) { + throw new AmazonS3Exception("[" + blobName + "] does not exist."); + } + + return new ObjectMetadata(); // nothing is done with it + } + + @Override + public PutObjectResult putObject(PutObjectRequest putObjectRequest) + throws AmazonClientException, AmazonServiceException { + String blobName = putObjectRequest.getKey(); + DigestInputStream stream = (DigestInputStream) putObjectRequest.getInputStream(); + + if (blobs.containsKey(blobName)) { + throw new AmazonS3Exception("[" + blobName + "] already exists."); + } + + blobs.put(blobName, stream); + + // input and output md5 hashes need to match to avoid an exception + String md5 = Base64.encodeAsString(stream.getMessageDigest().digest()); + PutObjectResult result = new PutObjectResult(); + result.setContentMd5(md5); + + return result; + } + + @Override + public S3Object getObject(GetObjectRequest getObjectRequest) + throws AmazonClientException, AmazonServiceException { + // in ESBlobStoreContainerTestCase.java, the prefix is empty, + // so the key and blobName are equivalent to each other + String blobName = getObjectRequest.getKey(); + + if (!blobs.containsKey(blobName)) { + throw new AmazonS3Exception("[" + blobName + "] does not exist."); + } + + // the HTTP request attribute is irrelevant for reading + S3ObjectInputStream stream = new S3ObjectInputStream( + blobs.get(blobName), null, false); + S3Object s3Object = new S3Object(); + s3Object.setObjectContent(stream); + return s3Object; + } + + @Override + public ObjectListing listObjects(ListObjectsRequest listObjectsRequest) + throws AmazonClientException, AmazonServiceException { + MockObjectListing list = new MockObjectListing(); + list.setTruncated(false); + + String blobName; + String prefix = listObjectsRequest.getPrefix(); + + ArrayList mockObjectSummaries = new ArrayList<>(); + + for (Map.Entry blob : blobs.entrySet()) { + blobName = blob.getKey(); + S3ObjectSummary objectSummary = new S3ObjectSummary(); + + if (prefix.isEmpty() || blobName.startsWith(prefix)) { + objectSummary.setKey(blobName); + + try { + objectSummary.setSize(getSize(blob.getValue())); + } catch (IOException e) { + throw new AmazonS3Exception("Object listing " + + "failed for blob [" + blob.getKey() + "]"); + } + + mockObjectSummaries.add(objectSummary); + } + } + + list.setObjectSummaries(mockObjectSummaries); + return list; + } + + @Override + public CopyObjectResult copyObject(CopyObjectRequest copyObjectRequest) + throws AmazonClientException, AmazonServiceException { + String sourceBlobName = copyObjectRequest.getSourceKey(); + String targetBlobName = copyObjectRequest.getDestinationKey(); + + if (!blobs.containsKey(sourceBlobName)) { + throw new AmazonS3Exception("Source blob [" + + sourceBlobName + "] does not exist."); + } + + if (blobs.containsKey(targetBlobName)) { + throw new AmazonS3Exception("Target blob [" + + targetBlobName + "] already exists."); + } + + blobs.put(targetBlobName, blobs.get(sourceBlobName)); + return new CopyObjectResult(); // nothing is done with it + } + + @Override + public void deleteObject(DeleteObjectRequest deleteObjectRequest) + throws AmazonClientException, AmazonServiceException { + String blobName = deleteObjectRequest.getKey(); + + if (!blobs.containsKey(blobName)) { + throw new AmazonS3Exception("[" + blobName + "] does not exist."); + } + + blobs.remove(blobName); + } + + private int getSize(InputStream stream) throws IOException { + int size = stream.read(byteCounter); + stream.reset(); // in case we ever need the size again + return size; + } + + private class MockObjectListing extends ObjectListing { + // the objectSummaries attribute in ObjectListing.java + // is read-only, but we need to be able to write to it, + // so we create a mock of it to work around this + private List mockObjectSummaries; + + @Override + public List getObjectSummaries() { + return mockObjectSummaries; + } + + private void setObjectSummaries(List objectSummaries) { + mockObjectSummaries = objectSummaries; + } + } +} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/blobstore/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/blobstore/S3BlobStoreContainerTests.java new file mode 100644 index 00000000000..bca1c1d8a18 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/blobstore/S3BlobStoreContainerTests.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.aws.blobstore; + +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; + +import java.io.IOException; +import java.util.Locale; + +public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase { + protected BlobStore newBlobStore() throws IOException { + MockAmazonS3 client = new MockAmazonS3(); + String bucket = randomAsciiOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + + return new S3BlobStore(Settings.EMPTY, client, bucket, null, false, + new ByteSizeValue(10, ByteSizeUnit.MB), 5, "public-read-write", "standard"); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java index 6ff0b71cdcc..67ad0eb7358 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java @@ -112,6 +112,22 @@ public abstract class ESBlobStoreContainerTestCase extends ESTestCase { } } + public void testDeleteBlob() throws IOException { + try (final BlobStore store = newBlobStore()) { + final String blobName = "foobar"; + final BlobContainer container = store.blobContainer(new BlobPath()); + expectThrows(IOException.class, () -> container.deleteBlob(blobName)); + + byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); + final BytesArray bytesArray = new BytesArray(data); + container.writeBlob(blobName, bytesArray); + container.deleteBlob(blobName); // should not raise + + // blob deleted, so should raise again + expectThrows(IOException.class, () -> container.deleteBlob(blobName)); + } + } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/15579") public void testOverwriteFails() throws IOException { try (final BlobStore store = newBlobStore()) {